Browse Source

x64: Swap operand order of `cmp` in internal representation (#8408)

* x64: Rename fields of `CmpRmiR` and swap them

This commit is the start of work that is the equivalent of #8362 but for
comparisons of general purpose instructions. Here only the instruction
arguments are swapped and renamed. All callers preserve the
right-to-left ordering and a subsequent commit will swap them to
left-to-right.

* x64: Swap `test_rr` operands in Winch

* x64: Swap operands of `cmp_rr` in Winch

* x64: Swap operands of `cmp_ir` in Winch

* x64: Swap operands for the `cmp` masm method in Winch

This additionally needed to affect the `branch` method. Methods now
additionally document what's the left-hand-side and what's the
right-hand-side.

Of note here was that the `branch` instruction actually used the terms
"lhs" and "rhs" incorrectly where the left/right-ness was actually
swapped in the actual comparison. This causes no issues, however, since
`branch` was only ever used when both operands were the same or a
reflexive condition was used.

* x64: Swap operands for `Inst::cmp_rmi_r`

* x64: Swap operand order of `cmp_rmi_r` ISLE helper

Also update all callers to swap orders as well.

* x64: Swap operand order of `x64_test` helper

* Swap operand order of `x64_cmp_imm` helper

* x64: Swap operand order of `cmp`

* x64: Define `x64_cmp_imm` with `x64_cmp`

Minor refactoring which clarifies that this is just a normal `cmp`
except with a different signature.

* x64: Use `x64_cmp_imm` in a few more locations

A bit easier on the eyes to read.
pull/8422/head
Alex Crichton 7 months ago
committed by GitHub
parent
commit
1fa8de140f
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 2
      cranelift/codegen/src/isa/x64/abi.rs
  2. 37
      cranelift/codegen/src/isa/x64/inst.isle
  3. 18
      cranelift/codegen/src/isa/x64/inst/emit.rs
  4. 108
      cranelift/codegen/src/isa/x64/inst/emit_tests.rs
  5. 27
      cranelift/codegen/src/isa/x64/inst/mod.rs
  6. 34
      cranelift/codegen/src/isa/x64/lower.isle
  7. 15
      cranelift/codegen/src/isa/x64/pcc.rs
  8. 12
      winch/codegen/src/codegen/mod.rs
  9. 6
      winch/codegen/src/isa/aarch64/masm.rs
  10. 22
      winch/codegen/src/isa/x64/asm.rs
  11. 26
      winch/codegen/src/isa/x64/masm.rs
  12. 17
      winch/codegen/src/masm.rs
  13. 2
      winch/codegen/src/visitor.rs

2
cranelift/codegen/src/isa/x64/abi.rs

@ -469,7 +469,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I> {
smallvec![
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::reg(regs::rsp()), limit_reg),
Inst::cmp_rmi_r(OperandSize::Size64, limit_reg, RegMemImm::reg(regs::rsp())),
Inst::TrapIf {
// NBE == "> unsigned"; args above are reversed; this tests limit_reg > rsp.
cc: CC::NBE,

37
cranelift/codegen/src/isa/x64/inst.isle

@ -218,8 +218,8 @@
;; Integer comparisons/tests: cmp or test (b w l q) (reg addr imm) reg.
(CmpRmiR (size OperandSize) ;; 1, 2, 4, or 8
(opcode CmpOpcode)
(src GprMemImm)
(dst Gpr))
(src1 Gpr)
(src2 GprMemImm))
;; Materializes the requested condition code in the destinaton reg.
(Setcc (cc CC)
@ -2800,7 +2800,7 @@
dst))
;; Helper for creating `MInst.CmpRmiR` instructions.
(decl cmp_rmi_r (OperandSize CmpOpcode GprMemImm Gpr) ProducesFlags)
(decl cmp_rmi_r (OperandSize CmpOpcode Gpr GprMemImm) ProducesFlags)
(rule (cmp_rmi_r size opcode src1 src2)
(ProducesFlags.ProducesFlagsSideEffect
(MInst.CmpRmiR size
@ -2809,14 +2809,14 @@
src2)))
;; Helper for creating `cmp` instructions.
(decl x64_cmp (OperandSize GprMemImm Gpr) ProducesFlags)
(decl x64_cmp (OperandSize Gpr GprMemImm) ProducesFlags)
(rule (x64_cmp size src1 src2)
(cmp_rmi_r size (CmpOpcode.Cmp) src1 src2))
;; Helper for creating `cmp` instructions with an immediate.
(decl x64_cmp_imm (OperandSize u32 Gpr) ProducesFlags)
(decl x64_cmp_imm (OperandSize Gpr u32) ProducesFlags)
(rule (x64_cmp_imm size src1 src2)
(cmp_rmi_r size (CmpOpcode.Cmp) (RegMemImm.Imm src1) src2))
(x64_cmp size src1 (RegMemImm.Imm src2)))
;; Helper for creating `MInst.XmmCmpRmR` instructions.
(decl xmm_cmp_rm_r (SseOpcode Xmm XmmMem) ProducesFlags)
@ -2844,7 +2844,7 @@
(xmm_cmp_rm_r_vex (AvxOpcode.Vucomisd) src1 src2))
;; Helper for creating `test` instructions.
(decl x64_test (OperandSize GprMemImm Gpr) ProducesFlags)
(decl x64_test (OperandSize Gpr GprMemImm) ProducesFlags)
(rule (x64_test size src1 src2)
(cmp_rmi_r size (CmpOpcode.Test) src1 src2))
@ -4766,17 +4766,16 @@
;; For GPR-held values we only need to emit `CMP + SETCC`. We rely here on
;; Cranelift's verification that `a` and `b` are of the same type.
;; Unfortunately for clarity, the registers are flipped here (TODO).
(rule 0 (emit_cmp cc a @ (value_type ty) b)
(let ((size OperandSize (raw_operand_size_of_type ty)))
(icmp_cond_result (x64_cmp size b a) cc)))
(icmp_cond_result (x64_cmp size a b) cc)))
;; As a special case, swap the arguments to the comparison when the LHS is a
;; constant. This ensures that we avoid moving the constant into a register when
;; performing the comparison.
(rule 1 (emit_cmp cc (and (simm32_from_value a) (value_type ty)) b)
(let ((size OperandSize (raw_operand_size_of_type ty)))
(icmp_cond_result (x64_cmp size a b) (intcc_swap_args cc))))
(icmp_cond_result (x64_cmp size b a) (intcc_swap_args cc))))
;; Special case: use the test instruction for comparisons with 0.
(rule 2 (emit_cmp cc a @ (value_type ty) (u64_from_iconst 0))
@ -4796,8 +4795,8 @@
(a_hi Gpr (value_regs_get_gpr a 1))
(b_lo Gpr (value_regs_get_gpr b 0))
(b_hi Gpr (value_regs_get_gpr b 1))
(cmp_lo Reg (with_flags_reg (x64_cmp (OperandSize.Size64) b_lo a_lo) (x64_setcc (CC.Z))))
(cmp_hi Reg (with_flags_reg (x64_cmp (OperandSize.Size64) b_hi a_hi) (x64_setcc (CC.Z))))
(cmp_lo Reg (with_flags_reg (x64_cmp (OperandSize.Size64) a_lo b_lo) (x64_setcc (CC.Z))))
(cmp_hi Reg (with_flags_reg (x64_cmp (OperandSize.Size64) a_hi b_hi) (x64_setcc (CC.Z))))
;; At this point, `cmp_lo` and `cmp_hi` contain either 0 or 1 in the
;; lowest 8 bits--`SETcc` guarantees this. The upper bits may be
;; unchanged so we must compare against 1 below; this instruction
@ -4809,7 +4808,7 @@
;; the halves `AND`s to 0, they were not equal, therefore we `SETcc`
;; with `NZ`.
(icmp_cond_result
(x64_test (OperandSize.Size64) (RegMemImm.Imm 1) cmp)
(x64_test (OperandSize.Size64) cmp (RegMemImm.Imm 1))
(CC.NZ))))
(rule 5 (emit_cmp (IntCC.NotEqual) a @ (value_type $I128) b)
@ -4817,12 +4816,12 @@
(a_hi Gpr (value_regs_get_gpr a 1))
(b_lo Gpr (value_regs_get_gpr b 0))
(b_hi Gpr (value_regs_get_gpr b 1))
(cmp_lo Reg (with_flags_reg (x64_cmp (OperandSize.Size64) b_lo a_lo) (x64_setcc (CC.NZ))))
(cmp_hi Reg (with_flags_reg (x64_cmp (OperandSize.Size64) b_hi a_hi) (x64_setcc (CC.NZ))))
(cmp_lo Reg (with_flags_reg (x64_cmp (OperandSize.Size64) a_lo b_lo) (x64_setcc (CC.NZ))))
(cmp_hi Reg (with_flags_reg (x64_cmp (OperandSize.Size64) a_hi b_hi) (x64_setcc (CC.NZ))))
;; See comments for `IntCC.Equal`.
(cmp Reg (x64_or $I64 cmp_lo cmp_hi)))
(icmp_cond_result
(x64_test (OperandSize.Size64) (RegMemImm.Imm 1) cmp)
(x64_test (OperandSize.Size64) cmp (RegMemImm.Imm 1))
(CC.NZ))))
;; Result = (a_hi <> b_hi) ||
@ -4832,20 +4831,20 @@
(a_hi Gpr (value_regs_get_gpr a 1))
(b_lo Gpr (value_regs_get_gpr b 0))
(b_hi Gpr (value_regs_get_gpr b 1))
(cmp_hi ValueRegs (with_flags (x64_cmp (OperandSize.Size64) b_hi a_hi)
(cmp_hi ValueRegs (with_flags (x64_cmp (OperandSize.Size64) a_hi b_hi)
(consumes_flags_concat
(x64_setcc (intcc_without_eq cc))
(x64_setcc (CC.Z)))))
(cc_hi Reg (value_regs_get cmp_hi 0))
(eq_hi Reg (value_regs_get cmp_hi 1))
(cmp_lo Reg (with_flags_reg (x64_cmp (OperandSize.Size64) b_lo a_lo)
(cmp_lo Reg (with_flags_reg (x64_cmp (OperandSize.Size64) a_lo b_lo)
(x64_setcc (intcc_unsigned cc))))
(res_lo Reg (x64_and $I64 eq_hi cmp_lo))
(res Reg (x64_or $I64 cc_hi res_lo)))
(icmp_cond_result
(x64_test (OperandSize.Size64) (RegMemImm.Imm 1) res)
(x64_test (OperandSize.Size64) res (RegMemImm.Imm 1))
(CC.NZ))))
(type FcmpCondResult

18
cranelift/codegen/src/isa/x64/inst/emit.rs

@ -765,7 +765,7 @@ pub(crate) fn emit(
// Check if the divisor is -1, and if it isn't then immediately
// go to the `idiv`.
let inst = Inst::cmp_rmi_r(size, RegMemImm::imm(0xffffffff), divisor);
let inst = Inst::cmp_rmi_r(size, divisor, RegMemImm::imm(0xffffffff));
inst.emit(&[], sink, info, state);
one_way_jmp(sink, CC::NZ, do_op);
@ -1278,8 +1278,8 @@ pub(crate) fn emit(
Inst::CmpRmiR {
size,
src: src_e,
dst: reg_g,
src1: reg_g,
src2: src_e,
opcode,
} => {
let reg_g = allocs.next(reg_g.to_reg());
@ -1580,8 +1580,8 @@ pub(crate) fn emit(
// cmp rsp, tmp_reg
let inst = Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::reg(regs::rsp()),
tmp.to_reg(),
RegMemImm::reg(regs::rsp()),
);
inst.emit(&[], sink, info, state);
@ -3335,7 +3335,7 @@ pub(crate) fn emit(
// If x seen as a signed int64 is not negative, a signed-conversion will do the right
// thing.
// TODO use tst src, src here.
let inst = Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::imm(0), src);
let inst = Inst::cmp_rmi_r(OperandSize::Size64, src, RegMemImm::imm(0));
inst.emit(&[], sink, info, state);
one_way_jmp(sink, CC::L, handle_negative);
@ -3483,7 +3483,7 @@ pub(crate) fn emit(
inst.emit(&[], sink, info, state);
// Compare against 1, in case of overflow the dst operand was INT_MIN.
let inst = Inst::cmp_rmi_r(*dst_size, RegMemImm::imm(1), dst);
let inst = Inst::cmp_rmi_r(*dst_size, dst, RegMemImm::imm(1));
inst.emit(&[], sink, info, state);
one_way_jmp(sink, CC::NO, done); // no overflow => done
@ -3730,7 +3730,7 @@ pub(crate) fn emit(
let inst = Inst::xmm_to_gpr(trunc_op, src, Writable::from_reg(dst), *dst_size);
inst.emit(&[], sink, info, state);
let inst = Inst::cmp_rmi_r(*dst_size, RegMemImm::imm(0), dst);
let inst = Inst::cmp_rmi_r(*dst_size, dst, RegMemImm::imm(0));
inst.emit(&[], sink, info, state);
one_way_jmp(sink, CC::NL, done); // if dst >= 0, jump to done
@ -3767,7 +3767,7 @@ pub(crate) fn emit(
let inst = Inst::xmm_to_gpr(trunc_op, tmp_xmm2, Writable::from_reg(dst), *dst_size);
inst.emit(&[], sink, info, state);
let inst = Inst::cmp_rmi_r(*dst_size, RegMemImm::imm(0), dst);
let inst = Inst::cmp_rmi_r(*dst_size, dst, RegMemImm::imm(0));
inst.emit(&[], sink, info, state);
if *is_saturating {
@ -3969,8 +3969,8 @@ pub(crate) fn emit(
// cmp %r_temp, %r_operand
let i3 = Inst::cmp_rmi_r(
OperandSize::from_ty(*ty),
RegMemImm::reg(temp.to_reg()),
operand,
RegMemImm::reg(temp.to_reg()),
);
i3.emit(&[], sink, info, state);

108
cranelift/codegen/src/isa/x64/inst/emit_tests.rs

@ -3241,25 +3241,25 @@ fn test_x64_emit() {
// ========================================================
// CmpRMIR
insns.push((
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::reg(r15), rdx),
Inst::cmp_rmi_r(OperandSize::Size64, rdx, RegMemImm::reg(r15)),
"4C39FA",
"cmpq %r15, %rdx",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::reg(rcx), r8),
Inst::cmp_rmi_r(OperandSize::Size64, r8, RegMemImm::reg(rcx)),
"4939C8",
"cmpq %rcx, %r8",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::reg(rcx), rsi),
Inst::cmp_rmi_r(OperandSize::Size64, rsi, RegMemImm::reg(rcx)),
"4839CE",
"cmpq %rcx, %rsi",
));
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rdx,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"483B5763",
"cmpq 99(%rdi), %rdx",
@ -3267,8 +3267,8 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
r8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"4C3B4763",
"cmpq 99(%rdi), %r8",
@ -3276,48 +3276,48 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rsi,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"483B7763",
"cmpq 99(%rdi), %rsi",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::imm(76543210), rdx),
Inst::cmp_rmi_r(OperandSize::Size64, rdx, RegMemImm::imm(76543210)),
"4881FAEAF48F04",
"cmpq $76543210, %rdx",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::imm(-76543210i32 as u32), r8),
Inst::cmp_rmi_r(OperandSize::Size64, r8, RegMemImm::imm(-76543210i32 as u32)),
"4981F8160B70FB",
"cmpq $-76543210, %r8",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::imm(76543210), rsi),
Inst::cmp_rmi_r(OperandSize::Size64, rsi, RegMemImm::imm(76543210)),
"4881FEEAF48F04",
"cmpq $76543210, %rsi",
));
//
insns.push((
Inst::cmp_rmi_r(OperandSize::Size32, RegMemImm::reg(r15), rdx),
Inst::cmp_rmi_r(OperandSize::Size32, rdx, RegMemImm::reg(r15)),
"4439FA",
"cmpl %r15d, %edx",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size32, RegMemImm::reg(rcx), r8),
Inst::cmp_rmi_r(OperandSize::Size32, r8, RegMemImm::reg(rcx)),
"4139C8",
"cmpl %ecx, %r8d",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size32, RegMemImm::reg(rcx), rsi),
Inst::cmp_rmi_r(OperandSize::Size32, rsi, RegMemImm::reg(rcx)),
"39CE",
"cmpl %ecx, %esi",
));
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size32,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rdx,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"3B5763",
"cmpl 99(%rdi), %edx",
@ -3325,8 +3325,8 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size32,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
r8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"443B4763",
"cmpl 99(%rdi), %r8d",
@ -3334,48 +3334,48 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size32,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rsi,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"3B7763",
"cmpl 99(%rdi), %esi",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size32, RegMemImm::imm(76543210), rdx),
Inst::cmp_rmi_r(OperandSize::Size32, rdx, RegMemImm::imm(76543210)),
"81FAEAF48F04",
"cmpl $76543210, %edx",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size32, RegMemImm::imm(-76543210i32 as u32), r8),
Inst::cmp_rmi_r(OperandSize::Size32, r8, RegMemImm::imm(-76543210i32 as u32)),
"4181F8160B70FB",
"cmpl $-76543210, %r8d",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size32, RegMemImm::imm(76543210), rsi),
Inst::cmp_rmi_r(OperandSize::Size32, rsi, RegMemImm::imm(76543210)),
"81FEEAF48F04",
"cmpl $76543210, %esi",
));
//
insns.push((
Inst::cmp_rmi_r(OperandSize::Size16, RegMemImm::reg(r15), rdx),
Inst::cmp_rmi_r(OperandSize::Size16, rdx, RegMemImm::reg(r15)),
"664439FA",
"cmpw %r15w, %dx",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size16, RegMemImm::reg(rcx), r8),
Inst::cmp_rmi_r(OperandSize::Size16, r8, RegMemImm::reg(rcx)),
"664139C8",
"cmpw %cx, %r8w",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size16, RegMemImm::reg(rcx), rsi),
Inst::cmp_rmi_r(OperandSize::Size16, rsi, RegMemImm::reg(rcx)),
"6639CE",
"cmpw %cx, %si",
));
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size16,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rdx,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"663B5763",
"cmpw 99(%rdi), %dx",
@ -3383,8 +3383,8 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size16,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
r8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"66443B4763",
"cmpw 99(%rdi), %r8w",
@ -3392,48 +3392,48 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size16,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rsi,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"663B7763",
"cmpw 99(%rdi), %si",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size16, RegMemImm::imm(23210), rdx),
Inst::cmp_rmi_r(OperandSize::Size16, rdx, RegMemImm::imm(23210)),
"6681FAAA5A",
"cmpw $23210, %dx",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size16, RegMemImm::imm(-7654i32 as u32), r8),
Inst::cmp_rmi_r(OperandSize::Size16, r8, RegMemImm::imm(-7654i32 as u32)),
"664181F81AE2",
"cmpw $-7654, %r8w",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size16, RegMemImm::imm(7654), rsi),
Inst::cmp_rmi_r(OperandSize::Size16, rsi, RegMemImm::imm(7654)),
"6681FEE61D",
"cmpw $7654, %si",
));
//
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r15), rdx),
Inst::cmp_rmi_r(OperandSize::Size8, rdx, RegMemImm::reg(r15)),
"4438FA",
"cmpb %r15b, %dl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rcx), r8),
Inst::cmp_rmi_r(OperandSize::Size8, r8, RegMemImm::reg(rcx)),
"4138C8",
"cmpb %cl, %r8b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rcx), rsi),
Inst::cmp_rmi_r(OperandSize::Size8, rsi, RegMemImm::reg(rcx)),
"4038CE",
"cmpb %cl, %sil",
));
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rdx,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"3A5763",
"cmpb 99(%rdi), %dl",
@ -3441,8 +3441,8 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
r8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"443A4763",
"cmpb 99(%rdi), %r8b",
@ -3450,115 +3450,115 @@ fn test_x64_emit() {
insns.push((
Inst::cmp_rmi_r(
OperandSize::Size8,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
rsi,
RegMemImm::mem(Amode::imm_reg(99, rdi)),
),
"403A7763",
"cmpb 99(%rdi), %sil",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::imm(70), rdx),
Inst::cmp_rmi_r(OperandSize::Size8, rdx, RegMemImm::imm(70)),
"80FA46",
"cmpb $70, %dl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::imm(-76i32 as u32), r8),
Inst::cmp_rmi_r(OperandSize::Size8, r8, RegMemImm::imm(-76i32 as u32)),
"4180F8B4",
"cmpb $-76, %r8b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::imm(76), rsi),
Inst::cmp_rmi_r(OperandSize::Size8, rsi, RegMemImm::imm(76)),
"4080FE4C",
"cmpb $76, %sil",
));
// Extra byte-cases (paranoia!) for cmp_rmi_r for first operand = R
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rax), rbx),
Inst::cmp_rmi_r(OperandSize::Size8, rbx, RegMemImm::reg(rax)),
"38C3",
"cmpb %al, %bl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rbx), rax),
Inst::cmp_rmi_r(OperandSize::Size8, rax, RegMemImm::reg(rbx)),
"38D8",
"cmpb %bl, %al",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rcx), rdx),
Inst::cmp_rmi_r(OperandSize::Size8, rdx, RegMemImm::reg(rcx)),
"38CA",
"cmpb %cl, %dl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rcx), rsi),
Inst::cmp_rmi_r(OperandSize::Size8, rsi, RegMemImm::reg(rcx)),
"4038CE",
"cmpb %cl, %sil",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rcx), r10),
Inst::cmp_rmi_r(OperandSize::Size8, r10, RegMemImm::reg(rcx)),
"4138CA",
"cmpb %cl, %r10b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rcx), r14),
Inst::cmp_rmi_r(OperandSize::Size8, r14, RegMemImm::reg(rcx)),
"4138CE",
"cmpb %cl, %r14b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rbp), rdx),
Inst::cmp_rmi_r(OperandSize::Size8, rdx, RegMemImm::reg(rbp)),
"4038EA",
"cmpb %bpl, %dl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rbp), rsi),
Inst::cmp_rmi_r(OperandSize::Size8, rsi, RegMemImm::reg(rbp)),
"4038EE",
"cmpb %bpl, %sil",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rbp), r10),
Inst::cmp_rmi_r(OperandSize::Size8, r10, RegMemImm::reg(rbp)),
"4138EA",
"cmpb %bpl, %r10b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(rbp), r14),
Inst::cmp_rmi_r(OperandSize::Size8, r14, RegMemImm::reg(rbp)),
"4138EE",
"cmpb %bpl, %r14b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r9), rdx),
Inst::cmp_rmi_r(OperandSize::Size8, rdx, RegMemImm::reg(r9)),
"4438CA",
"cmpb %r9b, %dl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r9), rsi),
Inst::cmp_rmi_r(OperandSize::Size8, rsi, RegMemImm::reg(r9)),
"4438CE",
"cmpb %r9b, %sil",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r9), r10),
Inst::cmp_rmi_r(OperandSize::Size8, r10, RegMemImm::reg(r9)),
"4538CA",
"cmpb %r9b, %r10b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r9), r14),
Inst::cmp_rmi_r(OperandSize::Size8, r14, RegMemImm::reg(r9)),
"4538CE",
"cmpb %r9b, %r14b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r13), rdx),
Inst::cmp_rmi_r(OperandSize::Size8, rdx, RegMemImm::reg(r13)),
"4438EA",
"cmpb %r13b, %dl",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r13), rsi),
Inst::cmp_rmi_r(OperandSize::Size8, rsi, RegMemImm::reg(r13)),
"4438EE",
"cmpb %r13b, %sil",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r13), r10),
Inst::cmp_rmi_r(OperandSize::Size8, r10, RegMemImm::reg(r13)),
"4538EA",
"cmpb %r13b, %r10b",
));
insns.push((
Inst::cmp_rmi_r(OperandSize::Size8, RegMemImm::reg(r13), r14),
Inst::cmp_rmi_r(OperandSize::Size8, r14, RegMemImm::reg(r13)),
"4538EE",
"cmpb %r13b, %r14b",
));

27
cranelift/codegen/src/isa/x64/inst/mod.rs

@ -490,13 +490,13 @@ impl Inst {
/// Does a comparison of dst - src for operands of size `size`, as stated by the machine
/// instruction semantics. Be careful with the order of parameters!
pub(crate) fn cmp_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst {
src.assert_regclass_is(RegClass::Int);
debug_assert_eq!(dst.class(), RegClass::Int);
pub(crate) fn cmp_rmi_r(size: OperandSize, src1: Reg, src2: RegMemImm) -> Inst {
src2.assert_regclass_is(RegClass::Int);
debug_assert_eq!(src1.class(), RegClass::Int);
Inst::CmpRmiR {
size,
src: GprMemImm::new(src).unwrap(),
dst: Gpr::new(dst).unwrap(),
src1: Gpr::new(src1).unwrap(),
src2: GprMemImm::new(src2).unwrap(),
opcode: CmpOpcode::Cmp,
}
}
@ -1572,18 +1572,18 @@ impl PrettyPrint for Inst {
Inst::CmpRmiR {
size,
src,
dst,
src1,
src2,
opcode,
} => {
let dst = pretty_print_reg(dst.to_reg(), size.to_bytes(), allocs);
let src = src.pretty_print(size.to_bytes(), allocs);
let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes(), allocs);
let src2 = src2.pretty_print(size.to_bytes(), allocs);
let op = match opcode {
CmpOpcode::Cmp => "cmp",
CmpOpcode::Test => "test",
};
let op = ljustify2(op.to_string(), suffix_bwlq(*size));
format!("{op} {src}, {dst}")
format!("{op} {src2}, {src1}")
}
Inst::Setcc { cc, dst } => {
@ -2284,10 +2284,9 @@ fn x64_get_operands<F: Fn(VReg) -> VReg>(inst: &Inst, collector: &mut OperandCol
collector.reg_fixed_use(reg, regs::rcx());
}
}
Inst::CmpRmiR { src, dst, .. } => {
// N.B.: use, not def (cmp doesn't write its result).
collector.reg_use(dst.to_reg());
src.get_operands(collector);
Inst::CmpRmiR { src1, src2, .. } => {
collector.reg_use(src1.to_reg());
src2.get_operands(collector);
}
Inst::Setcc { dst, .. } => {
collector.reg_def(dst.to_writable_reg());

34
cranelift/codegen/src/isa/x64/lower.isle

@ -518,8 +518,8 @@
(zero Gpr (imm $I64 0))
;; Nullify the carry if we are shifting in by a multiple of 128.
(carry_ Gpr (with_flags_reg (x64_test (OperandSize.Size64)
(RegMemImm.Imm 127)
amt)
amt
(RegMemImm.Imm 127))
(cmove $I64
(CC.Z)
zero
@ -529,7 +529,7 @@
;; Combine the two shifted halves. However, if we are shifting by >= 64
;; (modulo 128), then the low bits are zero and the high bits are our
;; low bits.
(with_flags (x64_test (OperandSize.Size64) (RegMemImm.Imm 64) amt)
(with_flags (x64_test (OperandSize.Size64) amt (RegMemImm.Imm 64))
(consumes_flags_concat
(cmove $I64 (CC.Z) lo_shifted zero)
(cmove $I64 (CC.Z) hi_shifted_ lo_shifted)))))
@ -629,14 +629,14 @@
(zero Gpr (imm $I64 0))
;; Nullify the carry if we are shifting by a multiple of 128.
(carry_ Gpr (with_flags_reg (x64_test (OperandSize.Size64) (RegMemImm.Imm 127) amt)
(carry_ Gpr (with_flags_reg (x64_test (OperandSize.Size64) amt (RegMemImm.Imm 127))
(cmove $I64 (CC.Z) zero carry)))
;; Add the carry bits into the lo.
(lo_shifted_ Gpr (x64_or $I64 carry_ lo_shifted)))
;; Combine the two shifted halves. However, if we are shifting by >= 64
;; (modulo 128), then the hi bits are zero and the lo bits are what
;; would otherwise be our hi bits.
(with_flags (x64_test (OperandSize.Size64) (RegMemImm.Imm 64) amt)
(with_flags (x64_test (OperandSize.Size64) amt (RegMemImm.Imm 64))
(consumes_flags_concat
(cmove $I64 (CC.Z) lo_shifted_ hi_shifted)
(cmove $I64 (CC.Z) hi_shifted zero)))))
@ -739,7 +739,7 @@
(imm $I64 64)
amt)))
;; Nullify the carry if we are shifting by a multiple of 128.
(carry_ Gpr (with_flags_reg (x64_test (OperandSize.Size64) (RegMemImm.Imm 127) amt)
(carry_ Gpr (with_flags_reg (x64_test (OperandSize.Size64) amt (RegMemImm.Imm 127))
(cmove $I64 (CC.Z) (imm $I64 0) carry)))
;; Add the carry into the low half.
(lo_shifted_ Gpr (x64_or $I64 lo_shifted carry_))
@ -748,7 +748,7 @@
;; Combine the two shifted halves. However, if we are shifting by >= 64
;; (modulo 128), then the hi bits are all sign bits and the lo bits are
;; what would otherwise be our hi bits.
(with_flags (x64_test (OperandSize.Size64) (RegMemImm.Imm 64) amt)
(with_flags (x64_test (OperandSize.Size64) amt (RegMemImm.Imm 64))
(consumes_flags_concat
(cmove $I64 (CC.Z) lo_shifted_ hi_shifted)
(cmove $I64 (CC.Z) hi_shifted sign_bits)))))
@ -1659,7 +1659,7 @@
;; allow for load-op merging, here we cannot do that.
(x_reg Reg x)
(y_reg Reg y))
(with_flags_reg (x64_cmp size x_reg y_reg)
(with_flags_reg (x64_cmp size y_reg x_reg)
(cmove ty cc y_reg x_reg))))
(rule -1 (lower (has_type (fits_in_64 ty) (umin x y)))
@ -2154,7 +2154,7 @@
(RegMemImm.Imm 64)))
(result_lo Gpr
(with_flags_reg
(x64_cmp_imm (OperandSize.Size64) 64 upper)
(x64_cmp_imm (OperandSize.Size64) upper 64)
(cmove $I64 (CC.NZ) upper lower))))
(value_regs result_lo (imm $I64 0))))
@ -2191,7 +2191,7 @@
(RegMemImm.Imm 64)))
(result_lo Gpr
(with_flags_reg
(x64_cmp_imm (OperandSize.Size64) 64 lower)
(x64_cmp_imm (OperandSize.Size64) lower 64)
(cmove $I64 (CC.Z) upper lower))))
(value_regs result_lo (imm $I64 0))))
@ -2452,7 +2452,7 @@
;; Null references are represented by the constant value `0`.
(rule (lower (is_null src @ (value_type $R64)))
(with_flags
(x64_cmp_imm (OperandSize.Size64) 0 src)
(x64_cmp_imm (OperandSize.Size64) src 0)
(x64_setcc (CC.Z))))
;; Rules for `is_invalid` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -2460,7 +2460,7 @@
;; Invalid references are represented by the constant value `-1`.
(rule (lower (is_invalid src @ (value_type $R64)))
(with_flags
(x64_cmp_imm (OperandSize.Size64) 0xffffffff src) ;; simm32 0xffff_ffff is sign-extended to -1.
(x64_cmp_imm (OperandSize.Size64) src 0xffffffff) ;; simm32 0xffff_ffff is sign-extended to -1.
(x64_setcc (CC.Z))))
@ -3332,11 +3332,11 @@
(rule (cmp_zero_i128 (cc_nz_or_z cc) val)
(let ((lo Gpr (value_regs_get_gpr val 0))
(hi Gpr (value_regs_get_gpr val 1))
(lo_z Gpr (with_flags_reg (x64_cmp (OperandSize.Size64) (RegMemImm.Imm 0) lo)
(lo_z Gpr (with_flags_reg (x64_cmp_imm (OperandSize.Size64) lo 0)
(x64_setcc (CC.Z))))
(hi_z Gpr (with_flags_reg (x64_cmp (OperandSize.Size64) (RegMemImm.Imm 0) hi)
(hi_z Gpr (with_flags_reg (x64_cmp_imm (OperandSize.Size64) hi 0)
(x64_setcc (CC.Z)))))
(icmp_cond_result (x64_test (OperandSize.Size8) lo_z hi_z) cc)))
(icmp_cond_result (x64_test (OperandSize.Size8) hi_z lo_z) cc)))
(decl cmp_zero_int_bool_ref (Value) ProducesFlags)
@ -3353,7 +3353,7 @@
(size_reg Reg (imm ty (u32_as_u64 jt_size)))
(idx_reg Gpr (extend_to_gpr idx $I64 (ExtendKind.Zero)))
(clamped_idx Reg (with_flags_reg
(x64_cmp size size_reg idx_reg)
(x64_cmp size idx_reg size_reg)
(cmove ty (CC.B) idx_reg size_reg))))
(emit_side_effect (jmp_table_seq ty clamped_idx default_target jt_targets))))
@ -4639,7 +4639,7 @@
(any_byte_zero Xmm (x64_pcmpeqb val (xmm_zero $I8X16)))
(mask Gpr (x64_pmovmskb (OperandSize.Size32) any_byte_zero))
)
(with_flags (x64_cmp (OperandSize.Size32) (RegMemImm.Imm 0xffff) mask)
(with_flags (x64_cmp_imm (OperandSize.Size32) mask 0xffff)
(x64_setcc (CC.NZ)))))
;; Rules for `vall_true` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

15
cranelift/codegen/src/isa/x64/pcc.rs

@ -438,15 +438,18 @@ pub(crate) fn check(
}
Inst::CmpRmiR {
size, dst, ref src, ..
} => match <&RegMemImm>::from(src) {
size,
src1,
ref src2,
..
} => match <&RegMemImm>::from(src2) {
RegMemImm::Mem {
addr: SyntheticAmode::ConstantOffset(k),
} => {
match vcode.constants.get(*k) {
VCodeConstantData::U64(bytes) => {
let value = u64::from_le_bytes(*bytes);
let lhs = get_fact_or_default(vcode, dst.to_reg(), 64);
let lhs = get_fact_or_default(vcode, src1.to_reg(), 64);
let rhs = Fact::constant(64, value);
state.cmp_flags = Some((lhs, rhs));
}
@ -456,19 +459,19 @@ pub(crate) fn check(
}
RegMemImm::Mem { ref addr } => {
if let Some(rhs) = check_load(ctx, None, addr, vcode, size.to_type(), 64)? {
let lhs = get_fact_or_default(vcode, dst.to_reg(), 64);
let lhs = get_fact_or_default(vcode, src1.to_reg(), 64);
state.cmp_flags = Some((lhs, rhs));
}
Ok(())
}
RegMemImm::Reg { reg } => {
let rhs = get_fact_or_default(vcode, *reg, 64);
let lhs = get_fact_or_default(vcode, dst.to_reg(), 64);
let lhs = get_fact_or_default(vcode, src1.to_reg(), 64);
state.cmp_flags = Some((lhs, rhs));
Ok(())
}
RegMemImm::Imm { simm32 } => {
let lhs = get_fact_or_default(vcode, dst.to_reg(), 64);
let lhs = get_fact_or_default(vcode, src1.to_reg(), 64);
let rhs = Fact::constant(64, (*simm32 as i32) as i64 as u64);
state.cmp_flags = Some((lhs, rhs));
Ok(())

12
winch/codegen/src/codegen/mod.rs

@ -356,7 +356,7 @@ where
);
// Typecheck.
self.masm.cmp(callee_id.into(), caller_id, OperandSize::S32);
self.masm.cmp(caller_id, callee_id.into(), OperandSize::S32);
self.masm.trapif(IntCmpKind::Ne, TrapCode::BadSignature);
self.context.free_reg(callee_id);
self.context.free_reg(caller_id);
@ -492,8 +492,8 @@ where
self.masm.branch(
IntCmpKind::Ne,
elem_value.into(),
elem_value,
elem_value.into(),
defined,
ptr_type.into(),
);
@ -613,8 +613,8 @@ where
|masm, bounds, _| {
let bounds_reg = bounds.as_typed_reg().reg;
masm.cmp(
bounds_reg.into(),
index_offset_and_access_size.into(),
bounds_reg.into(),
heap.ty.into(),
);
IntCmpKind::GtU
@ -697,8 +697,8 @@ where
let adjusted_bounds = bounds.as_u64() - offset_with_access_size;
let index_reg = index.as_typed_reg().reg;
masm.cmp(
RegImm::i64(adjusted_bounds as i64),
index_reg,
RegImm::i64(adjusted_bounds as i64),
heap.ty.into(),
);
IntCmpKind::GtU
@ -776,7 +776,7 @@ where
.address_at_reg(base, table_data.current_elems_offset);
let bound_size = table_data.current_elements_size;
self.masm.load(bound_addr, bound, bound_size.into());
self.masm.cmp(bound.into(), index, bound_size);
self.masm.cmp(index, bound.into(), bound_size);
self.masm
.trapif(IntCmpKind::GeU, TrapCode::TableOutOfBounds);
@ -801,7 +801,7 @@ where
if self.env.table_access_spectre_mitigation() {
// Perform a bounds check and override the value of the
// table element address in case the index is out of bounds.
self.masm.cmp(bound.into(), index, OperandSize::S32);
self.masm.cmp(index, bound.into(), OperandSize::S32);
self.masm.cmov(tmp, base, IntCmpKind::GeU, ptr_size);
}
self.context.free_reg(bound);

6
winch/codegen/src/isa/aarch64/masm.rs

@ -452,7 +452,7 @@ impl Masm for MacroAssembler {
todo!()
}
fn cmp(&mut self, _src: RegImm, _dest: Reg, _size: OperandSize) {
fn cmp(&mut self, _src1: Reg, _src2: RegImm, _size: OperandSize) {
todo!()
}
@ -495,8 +495,8 @@ impl Masm for MacroAssembler {
fn branch(
&mut self,
_kind: IntCmpKind,
_lhs: RegImm,
_rhs: Reg,
_lhs: Reg,
_rhs: RegImm,
_taken: MachLabel,
_size: OperandSize,
) {

22
winch/codegen/src/isa/x64/asm.rs

@ -835,8 +835,8 @@ impl Assembler {
DivKind::Signed => {
self.emit(Inst::CmpRmiR {
size: size.into(),
src: GprMemImm::new(RegMemImm::imm(0)).unwrap(),
dst: divisor.into(),
src1: divisor.into(),
src2: GprMemImm::new(RegMemImm::imm(0)).unwrap(),
opcode: CmpOpcode::Cmp,
});
self.emit(Inst::TrapIf {
@ -975,23 +975,23 @@ impl Assembler {
});
}
pub fn cmp_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) {
pub fn cmp_ir(&mut self, src1: Reg, imm: i32, size: OperandSize) {
let imm = RegMemImm::imm(imm as u32);
self.emit(Inst::CmpRmiR {
size: size.into(),
opcode: CmpOpcode::Cmp,
src: GprMemImm::new(imm).expect("valid immediate"),
dst: dst.into(),
src1: src1.into(),
src2: GprMemImm::new(imm).expect("valid immediate"),
});
}
pub fn cmp_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
pub fn cmp_rr(&mut self, src1: Reg, src2: Reg, size: OperandSize) {
self.emit(Inst::CmpRmiR {
size: size.into(),
opcode: CmpOpcode::Cmp,
src: src.into(),
dst: dst.into(),
src1: src1.into(),
src2: src2.into(),
});
}
@ -1025,12 +1025,12 @@ impl Assembler {
}
/// Emit a test instruction with two register operands.
pub fn test_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
pub fn test_rr(&mut self, src1: Reg, src2: Reg, size: OperandSize) {
self.emit(Inst::CmpRmiR {
size: size.into(),
opcode: CmpOpcode::Test,
src: src.into(),
dst: dst.into(),
src1: src1.into(),
src2: src2.into(),
})
}

26
winch/codegen/src/isa/x64/masm.rs

@ -93,7 +93,7 @@ impl Masm for MacroAssembler {
self.add_stack_max(scratch);
self.asm.cmp_rr(regs::rsp(), scratch, self.ptr_size);
self.asm.cmp_rr(scratch, regs::rsp(), self.ptr_size);
self.asm.trapif(IntCmpKind::GtU, TrapCode::StackOverflow);
// Emit unwind info.
@ -627,25 +627,25 @@ impl Masm for MacroAssembler {
Address::offset(reg, offset)
}
fn cmp(&mut self, src: RegImm, dst: Reg, size: OperandSize) {
match src {
fn cmp(&mut self, src1: Reg, src2: RegImm, size: OperandSize) {
match src2 {
RegImm::Imm(imm) => {
if let Some(v) = imm.to_i32() {
self.asm.cmp_ir(v, dst, size);
self.asm.cmp_ir(src1, v, size);
} else {
let scratch = regs::scratch();
self.load_constant(&imm, scratch, size);
self.asm.cmp_rr(scratch, dst, size);
self.asm.cmp_rr(src1, scratch, size);
}
}
RegImm::Reg(src) => {
self.asm.cmp_rr(src, dst, size);
RegImm::Reg(src2) => {
self.asm.cmp_rr(src1, src2, size);
}
}
}
fn cmp_with_set(&mut self, src: RegImm, dst: Reg, kind: IntCmpKind, size: OperandSize) {
self.cmp(src, dst, size);
self.cmp(dst, src, size);
self.asm.setcc(kind, dst);
}
@ -748,20 +748,20 @@ impl Masm for MacroAssembler {
fn branch(
&mut self,
kind: IntCmpKind,
lhs: RegImm,
rhs: Reg,
lhs: Reg,
rhs: RegImm,
taken: MachLabel,
size: OperandSize,
) {
use IntCmpKind::*;
match &(lhs, rhs) {
(RegImm::Reg(rlhs), rrhs) => {
(rlhs, RegImm::Reg(rrhs)) => {
// If the comparision kind is zero or not zero and both operands
// are the same register, emit a test instruction. Else we emit
// a normal comparison.
if (kind == Eq || kind == Ne) && (rlhs == rrhs) {
self.asm.test_rr(*rrhs, *rlhs, size);
self.asm.test_rr(*rlhs, *rrhs, size);
} else {
self.cmp(lhs, rhs, size);
}
@ -959,7 +959,7 @@ impl Masm for MacroAssembler {
let max = default_index;
let size = OperandSize::S32;
self.asm.mov_ir(max as u64, tmp, size);
self.asm.cmp_rr(index, tmp, size);
self.asm.cmp_rr(tmp, index, size);
self.asm.cmov(tmp, index, IntCmpKind::LtU, size);
let default = targets[default_index];

17
winch/codegen/src/masm.rs

@ -717,11 +717,20 @@ pub(crate) trait MacroAssembler {
/// Calculate remainder.
fn rem(&mut self, context: &mut CodeGenContext, kind: RemKind, size: OperandSize);
/// Compare src and dst and put the result in dst.
fn cmp(&mut self, src: RegImm, dest: Reg, size: OperandSize);
/// Compares `src1` against `src2` for the side effect of setting processor
/// flags.
///
/// Note that `src1` is the left-hand-side of the comparison and `src2` is
/// the right-hand-side, so if testing `a < b` then `src1 == a` and
/// `src2 == b`
fn cmp(&mut self, src1: Reg, src2: RegImm, size: OperandSize);
/// Compare src and dst and put the result in dst.
/// This function will potentially emit a series of instructions.
///
/// The initial value in `dst` is the left-hand-side of the comparison and
/// the initial value in `src` is the right-hand-side of the comparison.
/// That means for `a < b` then `dst == a` and `src == b`.
fn cmp_with_set(&mut self, src: RegImm, dst: Reg, kind: IntCmpKind, size: OperandSize);
/// Compare floats in src1 and src2 and put the result in dst.
@ -881,8 +890,8 @@ pub(crate) trait MacroAssembler {
fn branch(
&mut self,
kind: IntCmpKind,
lhs: RegImm,
rhs: Reg,
lhs: Reg,
rhs: RegImm,
taken: MachLabel,
size: OperandSize,
);

2
winch/codegen/src/visitor.rs

@ -1885,7 +1885,7 @@ where
let val2 = self.context.pop_to_reg(self.masm, None);
let val1 = self.context.pop_to_reg(self.masm, None);
self.masm
.cmp(RegImm::i32(0), cond.reg.into(), OperandSize::S32);
.cmp(cond.reg.into(), RegImm::i32(0), OperandSize::S32);
// Conditionally move val1 to val2 if the the comparision is
// not zero.
self.masm

Loading…
Cancel
Save