Browse Source

cranelift: Use `x64_` prefix to disambiguate with clif in ISLE

Instead of using `m_` like we used to, which was short for "mach inst" but not
obvious or clear at all.
pull/3690/head
Nick Fitzgerald 3 years ago
parent
commit
b78731839b
  1. 27
      cranelift/codegen/src/isa/x64/inst.isle
  2. 68
      cranelift/codegen/src/isa/x64/lower.isle

27
cranelift/codegen/src/isa/x64/inst.isle

@ -73,8 +73,7 @@
(src Reg)
(dst WritableReg))
(LoadEffectiveAddress (addr SyntheticAmode)
(dst WritableReg))
))
(dst WritableReg))))
(type OperandSize extern
(enum Size8
@ -534,9 +533,9 @@
(let ((wr WritableReg (temp_writable_reg ty))
(r Reg (writable_reg_to_reg wr))
(_ Unit (emit (MInst.XmmRmR (sse_cmp_op $I32X4)
r
(RegMem.Reg r)
wr))))
r
(RegMem.Reg r)
wr))))
r))
;; Helper for creating an SSE register holding an `i64x2` from two `i64` values.
@ -697,8 +696,8 @@
;;
;; Use `m_` prefix (short for "mach inst") to disambiguate with the ISLE-builtin
;; `and` operator.
(decl m_and (Type Reg RegMemImm) Reg)
(rule (m_and ty src1 src2)
(decl x64_and (Type Reg RegMemImm) Reg)
(rule (x64_and ty src1 src2)
(alu_rmi_r ty
(AluRmiROpcode.And)
src1
@ -766,9 +765,9 @@
(let ((wr WritableReg (temp_writable_reg ty))
(r Reg (writable_reg_to_reg wr))
(_ Unit (emit (MInst.XmmRmR (sse_xor_op ty)
r
(RegMem.Reg r)
wr))))
r
(RegMem.Reg r)
wr))))
r))
;; Special case for `f32` zero immediates to use `xorps`.
@ -807,14 +806,14 @@
;; Helper for creating `rotl` instructions (prefixed with "m_", short for "mach
;; inst", to disambiguate this from clif's `rotl`).
(decl m_rotl (Type Reg Imm8Reg) Reg)
(rule (m_rotl ty src1 src2)
(decl x64_rotl (Type Reg Imm8Reg) Reg)
(rule (x64_rotl ty src1 src2)
(shift_r ty (ShiftKind.RotateLeft) src1 src2))
;; Helper for creating `rotr` instructions (prefixed with "m_", short for "mach
;; inst", to disambiguate this from clif's `rotr`).
(decl m_rotr (Type Reg Imm8Reg) Reg)
(rule (m_rotr ty src1 src2)
(decl x64_rotr (Type Reg Imm8Reg) Reg)
(rule (x64_rotr ty src1 src2)
(shift_r ty (ShiftKind.RotateRight) src1 src2))
;; Helper for creating `shl` instructions.

68
cranelift/codegen/src/isa/x64/lower.isle

@ -326,37 +326,37 @@
;; And two registers.
(rule (lower (has_type (fits_in_64 ty) (band x y)))
(value_reg (m_and ty
(put_in_reg x)
(RegMemImm.Reg (put_in_reg y)))))
(value_reg (x64_and ty
(put_in_reg x)
(RegMemImm.Reg (put_in_reg y)))))
;; And with a memory operand.
(rule (lower (has_type (fits_in_64 ty)
(band x (sinkable_load y))))
(value_reg (m_and ty
(put_in_reg x)
(sink_load y))))
(value_reg (x64_and ty
(put_in_reg x)
(sink_load y))))
(rule (lower (has_type (fits_in_64 ty)
(band (sinkable_load x) y)))
(value_reg (m_and ty
(put_in_reg y)
(sink_load x))))
(value_reg (x64_and ty
(put_in_reg y)
(sink_load x))))
;; And with an immediate.
(rule (lower (has_type (fits_in_64 ty)
(band x (simm32_from_value y))))
(value_reg (m_and ty
(put_in_reg x)
y)))
(value_reg (x64_and ty
(put_in_reg x)
y)))
(rule (lower (has_type (fits_in_64 ty)
(band (simm32_from_value x) y)))
(value_reg (m_and ty
(put_in_reg y)
x)))
(value_reg (x64_and ty
(put_in_reg y)
x)))
;; SSE.
@ -378,8 +378,8 @@
(y_regs ValueRegs (put_in_regs y))
(y_lo Reg (value_regs_get y_regs 0))
(y_hi Reg (value_regs_get y_regs 1)))
(value_regs (m_and $I64 x_lo (RegMemImm.Reg y_lo))
(m_and $I64 x_hi (RegMemImm.Reg y_hi)))))
(value_regs (x64_and $I64 x_lo (RegMemImm.Reg y_lo))
(x64_and $I64 x_hi (RegMemImm.Reg y_hi)))))
(rule (lower (has_type $B128 (band x y)))
;; Booleans are always `0` or `1`, so we only need to do the `and` on the
@ -389,7 +389,7 @@
(x_lo Reg (value_regs_get x_regs 0))
(x_hi Reg (value_regs_get x_regs 1))
(y_lo Reg (lo_reg y)))
(value_regs (m_and $I64 x_lo (RegMemImm.Reg y_lo))
(value_regs (x64_and $I64 x_lo (RegMemImm.Reg y_lo))
x_hi)))
;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -832,13 +832,13 @@
(rule (lower (has_type (ty_8_or_16 ty) (rotl src amt)))
(let ((amt_ Reg (extend_to_reg amt $I32 (ExtendKind.Zero))))
(value_reg (m_rotl ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(value_reg (x64_rotl ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(rule (lower (has_type (ty_8_or_16 ty)
(rotl src (u64_from_iconst amt))))
(value_reg (m_rotl ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
(value_reg (x64_rotl ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
;; `i64` and `i32`: we can rely on x86's rotate-amount masking since
;; we operate on the whole register.
@ -847,13 +847,13 @@
;; NB: Only the low bits of `amt` matter since we logically mask the
;; shift amount to the value's bit width.
(let ((amt_ Reg (lo_reg amt)))
(value_reg (m_rotl ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(value_reg (x64_rotl ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(rule (lower (has_type (ty_32_or_64 ty)
(rotl src (u64_from_iconst amt))))
(value_reg (m_rotl ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
(value_reg (x64_rotl ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
;; `i128`.
@ -872,13 +872,13 @@
(rule (lower (has_type (ty_8_or_16 ty) (rotr src amt)))
(let ((amt_ Reg (extend_to_reg amt $I32 (ExtendKind.Zero))))
(value_reg (m_rotr ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(value_reg (x64_rotr ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(rule (lower (has_type (ty_8_or_16 ty)
(rotr src (u64_from_iconst amt))))
(value_reg (m_rotr ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
(value_reg (x64_rotr ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
;; `i64` and `i32`: we can rely on x86's rotate-amount masking since
;; we operate on the whole register.
@ -887,13 +887,13 @@
;; NB: Only the low bits of `amt` matter since we logically mask the
;; shift amount to the value's bit width.
(let ((amt_ Reg (lo_reg amt)))
(value_reg (m_rotr ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(value_reg (x64_rotr ty (put_in_reg src) (Imm8Reg.Reg amt_)))))
(rule (lower (has_type (ty_32_or_64 ty)
(rotr src (u64_from_iconst amt))))
(value_reg (m_rotr ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
(value_reg (x64_rotr ty
(put_in_reg src)
(const_to_type_masked_imm8 amt ty))))
;; `i128`.

Loading…
Cancel
Save