Browse Source

MachInst isel and aarch64 backend: docs / clarity improvements.

From discussion with Julian and Ben, this PR makes a few documentation-
and naming-level changes (no functionality change):

- Document that the `LowerCtx`-provided output register can be used as a
  scratch register during the lowered instruction sequence before
  placing the final result in it.

- Rename `input_to_*` helpers in the AArch64 backend to
  `put_input_in_*`, emphasizing that these are side-effecting helpers
  that potentially generate code (e.g., sign/zero-extensions) to ensure
  an input value is in a register.
pull/1900/head
Chris Fallin 4 years ago
parent
commit
492000e945
  1. 56
      cranelift/codegen/src/isa/aarch64/lower.rs
  2. 260
      cranelift/codegen/src/isa/aarch64/lower_inst.rs
  3. 6
      cranelift/codegen/src/machinst/lower.rs

56
cranelift/codegen/src/isa/aarch64/lower.rs

@ -168,7 +168,7 @@ pub(crate) fn output_to_const_f128<C: LowerCtx<I = Inst>>(
}
/// How to handle narrow values loaded into registers; see note on `narrow_mode`
/// parameter to `input_to_*` below.
/// parameter to `put_input_in_*` below.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum NarrowValueMode {
None,
@ -193,7 +193,7 @@ impl NarrowValueMode {
}
/// Allocate a register for an instruction output and return it.
pub(crate) fn output_to_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, out: InsnOutput) -> Writable<Reg> {
pub(crate) fn get_output_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, out: InsnOutput) -> Writable<Reg> {
ctx.get_output(out.insn, out.output)
}
@ -202,12 +202,12 @@ pub(crate) fn output_to_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, out: InsnOutput)
/// The given register will be extended appropriately, according to
/// `narrow_mode` and the input's type. If extended, the value is
/// always extended to 64 bits, for simplicity.
pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
narrow_mode: NarrowValueMode,
) -> Reg {
debug!("input_to_reg: input {:?}", input);
debug!("put_input_in_reg: input {:?}", input);
let ty = ctx.input_ty(input.insn, input.input);
let from_bits = ty_bits(ty) as u8;
let inputs = ctx.get_input(input.insn, input.input);
@ -302,7 +302,7 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
/// divide or a right-shift or a compare-to-zero), `narrow_mode` should be
/// set to `ZeroExtend` or `SignExtend` as appropriate, and the resulting
/// register will be provided the extended value.
fn input_to_rs<C: LowerCtx<I = Inst>>(
fn put_input_in_rs<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
narrow_mode: NarrowValueMode,
@ -317,21 +317,21 @@ fn input_to_rs<C: LowerCtx<I = Inst>>(
// Can we get the shift amount as an immediate?
if let Some(shiftimm) = input_to_shiftimm(ctx, shift_amt) {
let reg = input_to_reg(ctx, shiftee, narrow_mode);
let reg = put_input_in_reg(ctx, shiftee, narrow_mode);
return ResultRS::RegShift(reg, ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm));
}
}
}
ResultRS::Reg(input_to_reg(ctx, input, narrow_mode))
ResultRS::Reg(put_input_in_reg(ctx, input, narrow_mode))
}
/// Lower an instruction input to a reg or reg/shift, or reg/extend operand.
/// This does not actually codegen the source instruction; it just uses the
/// vreg into which the source instruction will generate its value.
///
/// See note on `input_to_rs` for a description of `narrow_mode`.
fn input_to_rse<C: LowerCtx<I = Inst>>(
/// See note on `put_input_in_rs` for a description of `narrow_mode`.
fn put_input_in_rse<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
narrow_mode: NarrowValueMode,
@ -349,7 +349,7 @@ fn input_to_rse<C: LowerCtx<I = Inst>>(
&& ((narrow_mode.is_32bit() && out_bits < 32)
|| (!narrow_mode.is_32bit() && out_bits < 64))
{
let reg = input_to_reg(ctx, InsnInput { insn, input: 0 }, NarrowValueMode::None);
let reg = put_input_in_reg(ctx, InsnInput { insn, input: 0 }, NarrowValueMode::None);
let extendop = match (narrow_mode, out_bits) {
(NarrowValueMode::SignExtend32, 1) | (NarrowValueMode::SignExtend64, 1) => {
ExtendOp::SXTB
@ -394,15 +394,15 @@ fn input_to_rse<C: LowerCtx<I = Inst>>(
(false, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
let reg = input_to_reg(ctx, InsnInput { insn, input: 0 }, NarrowValueMode::None);
let reg = put_input_in_reg(ctx, InsnInput { insn, input: 0 }, NarrowValueMode::None);
return ResultRSE::RegExtend(reg, extendop);
}
}
ResultRSE::from_rs(input_to_rs(ctx, input, narrow_mode))
ResultRSE::from_rs(put_input_in_rs(ctx, input, narrow_mode))
}
pub(crate) fn input_to_rse_imm12<C: LowerCtx<I = Inst>>(
pub(crate) fn put_input_in_rse_imm12<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
narrow_mode: NarrowValueMode,
@ -413,10 +413,10 @@ pub(crate) fn input_to_rse_imm12<C: LowerCtx<I = Inst>>(
}
}
ResultRSEImm12::from_rse(input_to_rse(ctx, input, narrow_mode))
ResultRSEImm12::from_rse(put_input_in_rse(ctx, input, narrow_mode))
}
pub(crate) fn input_to_rs_immlogic<C: LowerCtx<I = Inst>>(
pub(crate) fn put_input_in_rs_immlogic<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
narrow_mode: NarrowValueMode,
@ -429,10 +429,10 @@ pub(crate) fn input_to_rs_immlogic<C: LowerCtx<I = Inst>>(
}
}
ResultRSImmLogic::from_rs(input_to_rs(ctx, input, narrow_mode))
ResultRSImmLogic::from_rs(put_input_in_rs(ctx, input, narrow_mode))
}
pub(crate) fn input_to_reg_immshift<C: LowerCtx<I = Inst>>(
pub(crate) fn put_input_in_reg_immshift<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
) -> ResultRegImmShift {
@ -442,7 +442,7 @@ pub(crate) fn input_to_reg_immshift<C: LowerCtx<I = Inst>>(
}
}
ResultRegImmShift::Reg(input_to_reg(ctx, input, NarrowValueMode::None))
ResultRegImmShift::Reg(put_input_in_reg(ctx, input, NarrowValueMode::None))
}
//============================================================================
@ -546,7 +546,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
// Handle one reg and offset.
if addends.len() == 1 {
let reg = input_to_reg(ctx, addends[0], NarrowValueMode::ZeroExtend64);
let reg = put_input_in_reg(ctx, addends[0], NarrowValueMode::ZeroExtend64);
return MemArg::RegOffset(reg, offset as i64, elem_ty);
}
@ -560,9 +560,9 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
maybe_input_insn_multi(ctx, addends[i], &[Opcode::Uextend, Opcode::Sextend])
{
// Non-extended addend.
let r1 = input_to_reg(ctx, addends[1 - i], NarrowValueMode::ZeroExtend64);
let r1 = put_input_in_reg(ctx, addends[1 - i], NarrowValueMode::ZeroExtend64);
// Extended addend.
let r2 = input_to_reg(
let r2 = put_input_in_reg(
ctx,
InsnInput {
insn: ext_insn,
@ -596,8 +596,8 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
// Handle two regs and a zero offset in the general case, if possible.
if addends.len() == 2 && offset == 0 {
let ra = input_to_reg(ctx, addends[0], NarrowValueMode::ZeroExtend64);
let rb = input_to_reg(ctx, addends[1], NarrowValueMode::ZeroExtend64);
let ra = put_input_in_reg(ctx, addends[0], NarrowValueMode::ZeroExtend64);
let rb = put_input_in_reg(ctx, addends[1], NarrowValueMode::ZeroExtend64);
return MemArg::reg_plus_reg(ra, rb);
}
@ -609,7 +609,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
// Add each addend to the address.
for addend in addends {
let reg = input_to_reg(ctx, *addend, NarrowValueMode::ZeroExtend64);
let reg = put_input_in_reg(ctx, *addend, NarrowValueMode::ZeroExtend64);
// In an addition, the stack register is the zero register, so divert it to another
// register just before doing the actual add.
@ -997,8 +997,8 @@ pub(crate) fn lower_icmp_or_ifcmp_to_flags<C: LowerCtx<I = Inst>>(
},
];
let ty = ctx.input_ty(insn, 0);
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rm = input_to_rse_imm12(ctx, inputs[1], narrow_mode);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_rse_imm12(ctx, inputs[1], narrow_mode);
debug!("lower_icmp_or_ifcmp_to_flags: rn = {:?} rm = {:?}", rn, rm);
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64);
let rd = writable_zero_reg();
@ -1018,8 +1018,8 @@ pub(crate) fn lower_fcmp_or_ffcmp_to_flags<C: LowerCtx<I = Inst>>(ctx: &mut C, i
input: 1,
},
];
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
match bits {
32 => {
ctx.emit(Inst::FpuCmp32 { rn, rm });

260
cranelift/codegen/src/isa/aarch64/lower_inst.rs

@ -42,31 +42,31 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
match op {
Opcode::Iconst | Opcode::Bconst | Opcode::Null => {
let value = ctx.get_constant(insn).unwrap();
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
lower_constant_u64(ctx, rd, value);
}
Opcode::F32const => {
let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32);
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
lower_constant_f32(ctx, rd, value);
}
Opcode::F64const => {
let value = f64::from_bits(ctx.get_constant(insn).unwrap());
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
lower_constant_f64(ctx, rd, value);
}
Opcode::Iadd => {
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let ty = ty.unwrap();
let alu_op = choose_32_64(ty, ALUOp::Add32, ALUOp::Add64);
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
}
Opcode::Isub => {
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let ty = ty.unwrap();
let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64);
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
@ -87,9 +87,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
};
let va = ctx.alloc_tmp(RegClass::V128, I128);
let vb = ctx.alloc_tmp(RegClass::V128, I128);
let ra = input_to_reg(ctx, inputs[0], narrow_mode);
let rb = input_to_reg(ctx, inputs[1], narrow_mode);
let rd = output_to_reg(ctx, outputs[0]);
let ra = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rb = put_input_in_reg(ctx, inputs[1], narrow_mode);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::MovToVec64 { rd: va, rn: ra });
ctx.emit(Inst::MovToVec64 { rd: vb, rn: rb });
ctx.emit(Inst::VecRRR {
@ -121,9 +121,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
};
let va = ctx.alloc_tmp(RegClass::V128, I128);
let vb = ctx.alloc_tmp(RegClass::V128, I128);
let ra = input_to_reg(ctx, inputs[0], narrow_mode);
let rb = input_to_reg(ctx, inputs[1], narrow_mode);
let rd = output_to_reg(ctx, outputs[0]);
let ra = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rb = put_input_in_reg(ctx, inputs[1], narrow_mode);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::MovToVec64 { rd: va, rn: ra });
ctx.emit(Inst::MovToVec64 { rd: vb, rn: rb });
ctx.emit(Inst::VecRRR {
@ -142,18 +142,18 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Ineg => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let rn = zero_reg();
let rm = input_to_rse_imm12(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rse_imm12(ctx, inputs[0], NarrowValueMode::None);
let ty = ty.unwrap();
let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64);
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
}
Opcode::Imul => {
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let ty = ty.unwrap();
let alu_op = choose_32_64(ty, ALUOp::MAdd32, ALUOp::MAdd64);
ctx.emit(Inst::AluRRRR {
@ -166,7 +166,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Umulhi | Opcode::Smulhi => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let is_signed = op == Opcode::Smulhi;
let input_ty = ctx.input_ty(insn, 0);
assert!(ctx.input_ty(insn, 1) == input_ty);
@ -174,8 +174,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
match input_ty {
I64 => {
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let ra = zero_reg();
let alu_op = if is_signed {
ALUOp::SMulH
@ -196,8 +196,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else {
NarrowValueMode::ZeroExtend64
};
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rm = input_to_reg(ctx, inputs[1], narrow_mode);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
let ra = zero_reg();
ctx.emit(Inst::AluRRRR {
alu_op: ALUOp::MAdd64,
@ -254,9 +254,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ALUOp::UDiv64
};
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rm = input_to_reg(ctx, inputs[1], narrow_mode);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
// The div instruction does not trap on divide by zero or signed overflow
// so checks are inserted below.
//
@ -372,8 +372,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// If we reach this point, we weren't able to incorporate the extend as
// a register-mode on another instruction, so we have a 'None'
// narrow-value/extend mode here, and we emit the explicit instruction.
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::Extend {
rd,
rn,
@ -385,15 +385,15 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Bnot => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
if ty_bits(ty) < 128 {
let rm = input_to_rs_immlogic(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rs_immlogic(ctx, inputs[0], NarrowValueMode::None);
let alu_op = choose_32_64(ty, ALUOp::OrrNot32, ALUOp::OrrNot64);
// NOT rd, rm ==> ORR_NOT rd, zero, rm
ctx.emit(alu_inst_immlogic(alu_op, rd, zero_reg(), rm));
} else {
let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
ctx.emit(Inst::VecMisc {
op: VecMisc2::Not,
rd,
@ -409,11 +409,11 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
| Opcode::BandNot
| Opcode::BorNot
| Opcode::BxorNot => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
if ty_bits(ty) < 128 {
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_rs_immlogic(ctx, inputs[1], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rs_immlogic(ctx, inputs[1], NarrowValueMode::None);
let alu_op = match op {
Opcode::Band => choose_32_64(ty, ALUOp::And32, ALUOp::And64),
Opcode::Bor => choose_32_64(ty, ALUOp::Orr32, ALUOp::Orr64),
@ -433,9 +433,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
_ => unreachable!(),
};
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::VecRRR {
alu_op,
@ -458,9 +458,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(Opcode::Sshr, InstSize::Size32) => NarrowValueMode::SignExtend32,
_ => unreachable!(),
};
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rm = input_to_reg_immshift(ctx, inputs[1]);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg_immshift(ctx, inputs[1]);
let alu_op = match op {
Opcode::Ishl => choose_32_64(ty, ALUOp::Lsl32, ALUOp::Lsl64),
Opcode::Ushr => choose_32_64(ty, ALUOp::Lsr32, ALUOp::Lsr64),
@ -503,8 +503,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty = ty.unwrap();
let ty_bits_size = ty_bits(ty) as u8;
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(
ctx,
inputs[0],
if ty_bits_size <= 32 {
@ -513,7 +513,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
NarrowValueMode::ZeroExtend64
},
);
let rm = input_to_reg_immshift(ctx, inputs[1]);
let rm = put_input_in_reg_immshift(ctx, inputs[1]);
if ty_bits_size == 32 || ty_bits_size == 64 {
let alu_op = choose_32_64(ty, ALUOp::RotR32, ALUOp::RotR64);
@ -652,7 +652,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let needs_zext = match op {
Opcode::Bitrev | Opcode::Ctz => false,
Opcode::Clz | Opcode::Cls => true,
@ -666,7 +666,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else {
NarrowValueMode::None
};
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let op_ty = match ty {
I8 | I16 | I32 => I32,
I64 => I64,
@ -722,11 +722,11 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// x += x << 32
// x >> 56
let ty = ty.unwrap();
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
// FIXME(#1537): zero-extend 8/16/32-bit operands only to 32 bits,
// and fix the sequence below to work properly for this.
let narrow_mode = NarrowValueMode::ZeroExtend64;
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
// If this is a 32-bit Popcnt, use Lsr32 to clear the top 32 bits of the register, then
@ -903,7 +903,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let is_float = ty_is_float(elem_ty);
let mem = lower_address(ctx, elem_ty, &inputs[..], off);
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let memflags = ctx.memflags(insn).expect("memory flags");
let srcloc = if !memflags.notrap() {
@ -967,7 +967,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let is_float = ty_is_float(elem_ty);
let mem = lower_address(ctx, elem_ty, &inputs[1..], off);
let rd = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let memflags = ctx.memflags(insn).expect("memory flags");
let srcloc = if !memflags.notrap() {
@ -997,7 +997,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} => (stack_slot, offset),
_ => unreachable!(),
};
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let offset: i32 = offset.into();
let inst = ctx
.abi()
@ -1031,7 +1031,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(ALUOp::SubS32, NarrowValueMode::ZeroExtend32)
};
let rcond = input_to_reg(ctx, inputs[0], narrow_mode);
let rcond = put_input_in_reg(ctx, inputs[0], narrow_mode);
// cmp rcond, #0
ctx.emit(Inst::AluRRR {
alu_op: cmp_op,
@ -1052,9 +1052,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
};
// csel.COND rd, rn, rm
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[2], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
let ty = ctx.output_ty(insn, 0);
let bits = ty_bits(ty);
if ty_is_float(ty) && bits == 32 {
@ -1070,10 +1070,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty = ty.unwrap();
if ty_bits(ty) < 128 {
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
let rd = output_to_reg(ctx, outputs[0]);
let rcond = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rn = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[2], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
// AND rTmp, rn, rcond
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::And64,
@ -1096,10 +1096,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
rm: tmp.to_reg(),
});
} else {
let rcond = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rn = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[2], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(rd, rcond, ty));
ctx.emit(Inst::VecRRR {
@ -1120,7 +1120,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// single-def ifcmp.
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::CSet { rd, cond });
}
@ -1129,7 +1129,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let cond = lower_fp_condcode(condcode);
let ffcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ffcmp).unwrap();
lower_fcmp_or_ffcmp_to_flags(ctx, ffcmp_insn);
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::CSet { rd, cond });
}
@ -1138,8 +1138,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Copy => {
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let ty = ctx.input_ty(insn, 0);
ctx.emit(Inst::gen_move(rd, rn, ty));
}
@ -1157,16 +1157,16 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// - Ireduce: changing width of an integer. Smaller ints are stored
// with undefined high-order bits, so we can simply do a copy.
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0);
ctx.emit(Inst::gen_move(rd, rn, ty));
}
Opcode::Bmask => {
// Bool is {0, 1}, so we can subtract from 0 to get all-1s.
let rd = output_to_reg(ctx, outputs[0]);
let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let rd = get_output_reg(ctx, outputs[0]);
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Sub64,
rd,
@ -1176,7 +1176,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Bitcast => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let ity = ctx.input_ty(insn, 0);
let oty = ctx.output_ty(insn, 0);
match (ty_is_float(ity), ty_is_float(oty)) {
@ -1186,19 +1186,19 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else {
NarrowValueMode::ZeroExtend64
};
let rm = input_to_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg(ctx, inputs[0], narrow_mode);
ctx.emit(Inst::gen_move(rd, rm, oty));
}
(false, false) => {
let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
ctx.emit(Inst::gen_move(rd, rm, oty));
}
(false, true) => {
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
ctx.emit(Inst::MovToVec64 { rd, rn });
}
(true, false) => {
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
ctx.emit(Inst::MovFromVec {
rd,
rn,
@ -1214,7 +1214,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// N.B.: according to the AArch64 ABI, the top bits of a register
// (above the bits for the value's type) are undefined, so we
// need not extend the return values.
let reg = input_to_reg(ctx, *input, NarrowValueMode::None);
let reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
let retval_reg = ctx.retval(i);
let ty = ctx.input_ty(insn, i);
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
@ -1234,7 +1234,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let condcode = inst_condcode(ctx.data(insn)).unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0);
let bits = ty_bits(ty);
let narrow_mode = match (bits <= 32, is_signed) {
@ -1243,15 +1243,15 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(false, true) => NarrowValueMode::SignExtend64,
(false, false) => NarrowValueMode::ZeroExtend64,
};
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
if ty_bits(ty) < 128 {
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64);
let rm = input_to_rse_imm12(ctx, inputs[1], narrow_mode);
let rm = put_input_in_rse_imm12(ctx, inputs[1], narrow_mode);
ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, rm));
ctx.emit(Inst::CondSet { cond, rd });
} else {
let rm = input_to_reg(ctx, inputs[1], narrow_mode);
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
lower_vector_compare(ctx, rd, rn, rm, ty, cond)?;
}
}
@ -1260,9 +1260,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let condcode = inst_fp_condcode(ctx.data(insn)).unwrap();
let cond = lower_fp_condcode(condcode);
let ty = ctx.input_ty(insn, 0);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
if ty_bits(ty) < 128 {
match ty_bits(ty) {
@ -1344,7 +1344,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::FuncAddr => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let (extname, _) = ctx.call_target(insn).unwrap();
let extname = extname.clone();
let loc = ctx.srcloc(insn);
@ -1361,7 +1361,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::SymbolValue => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let (extname, _, offset) = ctx.symbol_value(insn).unwrap();
let extname = extname.clone();
let loc = ctx.srcloc(insn);
@ -1388,7 +1388,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
)
}
Opcode::CallIndirect => {
let ptr = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let ptr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let sig = ctx.call_sig(insn).unwrap();
assert!(inputs.len() - 1 == sig.params.len());
assert!(outputs.len() == sig.returns.len());
@ -1400,24 +1400,24 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
abi.emit_stack_pre_adjust(ctx);
assert!(inputs.len() == abi.num_args());
for (i, input) in inputs.iter().enumerate() {
let arg_reg = input_to_reg(ctx, *input, NarrowValueMode::None);
let arg_reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
}
abi.emit_call(ctx);
for (i, output) in outputs.iter().enumerate() {
let retval_reg = output_to_reg(ctx, *output);
let retval_reg = get_output_reg(ctx, *output);
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
}
abi.emit_stack_post_adjust(ctx);
}
Opcode::GetPinnedReg => {
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::mov(rd, xreg(PINNED_REG)));
}
Opcode::SetPinnedReg => {
let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
ctx.emit(Inst::mov(writable_xreg(PINNED_REG), rm));
}
@ -1451,13 +1451,13 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Vconst => {
let value = output_to_const_f128(ctx, outputs[0]).unwrap();
let rd = output_to_reg(ctx, outputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
lower_constant_f128(ctx, rd, value);
}
Opcode::RawBitcast => {
let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0);
ctx.emit(Inst::gen_move(rd, rm, ty));
}
@ -1465,8 +1465,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Extractlane => {
if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) {
let idx = *imm;
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let ty = ty.unwrap();
if ty_is_int(ty) {
@ -1483,8 +1483,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Splat => {
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0);
let inst = if ty_is_int(ty) {
Inst::VecDup { rd, rn, ty }
@ -1495,8 +1495,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::VanyTrue | Opcode::VallTrue => {
let rd = output_to_reg(ctx, outputs[0]);
let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let tmp = ctx.alloc_tmp(RegClass::V128, ty.unwrap());
// This operation is implemented by using umaxp or uminv to
@ -1581,9 +1581,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(Opcode::Fmax, 64) => FPUOp2::Max64,
_ => panic!("Unknown op/bits combination"),
};
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::FpuRRR { fpu_op, rd, rn, rm });
}
@ -1602,8 +1602,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(Opcode::Fdemote, 64) => panic!("Cannot demote to 64 bits"),
_ => panic!("Unknown op/bits combination"),
};
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::FpuRR { fpu_op, rd, rn });
}
@ -1620,8 +1620,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(Opcode::Nearest, 64) => FpuRoundMode::Nearest64,
_ => panic!("Unknown op/bits combination"),
};
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::FpuRound { op, rd, rn });
}
@ -1632,10 +1632,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
64 => FPUOp3::MAdd64,
_ => panic!("Unknown op size"),
};
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let ra = input_to_reg(ctx, inputs[2], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let ra = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::FpuRRRR {
fpu_op,
rn,
@ -1658,9 +1658,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty = ctx.output_ty(insn, 0);
let bits = ty_bits(ty) as u8;
assert!(bits == 32 || bits == 64);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let tmp = ctx.alloc_tmp(RegClass::V128, F64);
// Copy LHS to rd.
@ -1699,8 +1699,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
_ => panic!("Unknown input/output-bits combination"),
};
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
// First, check the output: it's important to carry the NaN conversion before the
// in-bounds conversion, per wasm semantics.
@ -1842,8 +1842,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
(true, 64) => NarrowValueMode::SignExtend64,
_ => panic!("Unknown input size"),
};
let rn = input_to_reg(ctx, inputs[0], narrow_mode);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rd = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::IntToFpu { op, rd, rn });
}
@ -1853,8 +1853,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let out_ty = ctx.output_ty(insn, 0);
let out_bits = ty_bits(out_ty);
let out_signed = op == Opcode::FcvtToSintSat;
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = output_to_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
// FIMM Vtmp1, u32::MAX or u64::MAX or i32::MAX or i64::MAX
// FMIN Vtmp2, Vin, Vtmp1
@ -1991,9 +1991,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Now handle the iadd as above, except use an AddS opcode that sets
// flags.
let rd = output_to_reg(ctx, outputs[0]);
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = input_to_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let rd = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let ty = ty.unwrap();
let alu_op = choose_32_64(ty, ALUOp::AddS32, ALUOp::AddS64);
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
@ -2139,7 +2139,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
kind: CondBrKind::Cond(cond),
});
} else {
let rt = input_to_reg(
let rt = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
@ -2173,7 +2173,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
(false, true) => NarrowValueMode::SignExtend64,
(false, false) => NarrowValueMode::ZeroExtend64,
};
let rn = input_to_reg(
let rn = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
@ -2181,7 +2181,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
},
narrow_mode,
);
let rm = input_to_rse_imm12(
let rm = put_input_in_rse_imm12(
ctx,
InsnInput {
insn: branches[0],
@ -2220,7 +2220,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
} else {
// If the ifcmp result is actually placed in a
// register, we need to move it back into the flags.
let rn = input_to_reg(ctx, flag_input, NarrowValueMode::None);
let rn = put_input_in_reg(ctx, flag_input, NarrowValueMode::None);
ctx.emit(Inst::MovToNZCV { rn });
ctx.emit(Inst::CondBr {
taken,
@ -2248,7 +2248,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
} else {
// If the ffcmp result is actually placed in a
// register, we need to move it back into the flags.
let rn = input_to_reg(ctx, flag_input, NarrowValueMode::None);
let rn = put_input_in_reg(ctx, flag_input, NarrowValueMode::None);
ctx.emit(Inst::MovToNZCV { rn });
ctx.emit(Inst::CondBr {
taken,
@ -2294,7 +2294,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
needed_space: 4 * (6 + jt_size) as CodeOffset,
});
let ridx = input_to_reg(
let ridx = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],

6
cranelift/codegen/src/machinst/lower.rs

@ -132,7 +132,11 @@ pub trait LowerCtx {
fn get_input(&self, ir_inst: Inst, idx: usize) -> LowerInput;
/// Get the `idx`th output register of the given IR instruction. When
/// `backend.lower_inst_to_regs(ctx, inst)` is called, it is expected that
/// the backend will write results to these output register(s).
/// the backend will write results to these output register(s). This
/// register will always be "fresh"; it is guaranteed not to overlap with
/// any of the inputs, and can be freely used as a scratch register within
/// the lowered instruction sequence, as long as its final value is the
/// result of the computation.
fn get_output(&self, ir_inst: Inst, idx: usize) -> Writable<Reg>;
// Codegen primitives: allocate temps, emit instructions, set result registers,

Loading…
Cancel
Save