Browse Source

cranelift: Delete more unused regalloc-related stuff (#8604)

Part of the ongoing saga of #8524, #8566, #8581, and #8592
pull/8568/merge
Jamey Sharp 6 months ago
committed by GitHub
parent
commit
7d7031910b
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 107
      cranelift/codegen/src/isa/aarch64/inst/emit.rs
  2. 2
      cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs
  3. 271
      cranelift/codegen/src/isa/riscv64/inst/emit.rs
  4. 4
      cranelift/codegen/src/isa/riscv64/inst/emit_tests.rs
  5. 47
      cranelift/codegen/src/isa/s390x/inst/emit.rs
  6. 2
      cranelift/codegen/src/isa/s390x/inst/emit_tests.rs
  7. 11
      cranelift/codegen/src/isa/s390x/inst/mod.rs
  8. 178
      cranelift/codegen/src/isa/x64/inst/emit.rs
  9. 4
      cranelift/codegen/src/isa/x64/inst/emit_tests.rs
  10. 18
      cranelift/codegen/src/isa/x64/inst/mod.rs
  11. 70
      cranelift/codegen/src/machinst/buffer.rs
  12. 12
      cranelift/codegen/src/machinst/mod.rs
  13. 22
      cranelift/codegen/src/machinst/vcode.rs
  14. 2
      winch/codegen/src/isa/aarch64/asm.rs
  15. 2
      winch/codegen/src/isa/x64/asm.rs

107
cranelift/codegen/src/isa/aarch64/inst/emit.rs

@ -1,7 +1,6 @@
//! AArch64 ISA: binary code emission.
use cranelift_control::ControlPlane;
use regalloc2::Allocation;
use crate::binemit::StackMap;
use crate::ir::{self, types::*};
@ -716,13 +715,7 @@ impl MachInstEmit for Inst {
type State = EmitState;
type Info = EmitInfo;
fn emit(
&self,
_allocs: &[Allocation],
sink: &mut MachBuffer<Inst>,
emit_info: &Self::Info,
state: &mut EmitState,
) {
fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
// N.B.: we *must* not exceed the "worst-case size" used to compute
// where to insert islands, except when islands are explicitly triggered
// (with an `EmitIsland`). We check this in debug builds. This is `mut`
@ -966,7 +959,7 @@ impl MachInstEmit for Inst {
let (mem_insts, mem) = mem_finalize(Some(sink), &mem, access_ty, state);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
// ldst encoding helpers take Reg, not Writable<Reg>.
@ -1106,7 +1099,7 @@ impl MachInstEmit for Inst {
let (mem_insts, mem) = mem_finalize(Some(sink), &mem, access_ty, state);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
let op = match self {
@ -1379,7 +1372,7 @@ impl MachInstEmit for Inst {
assert!(rm.class() == RegClass::Int);
assert!(rd.to_reg().class() == rm.class());
let size = OperandSize::Size64;
Inst::Mov { size, rd, rm }.emit(&[], sink, emit_info, state);
Inst::Mov { size, rd, rm }.emit(sink, emit_info, state);
}
&Inst::MovToPReg { rd, rm } => {
let rd: Writable<Reg> = Writable::from_reg(rd.into());
@ -1393,7 +1386,7 @@ impl MachInstEmit for Inst {
assert!(rd.to_reg().class() == RegClass::Int);
assert!(rm.class() == rd.to_reg().class());
let size = OperandSize::Size64;
Inst::Mov { size, rd, rm }.emit(&[], sink, emit_info, state);
Inst::Mov { size, rd, rm }.emit(sink, emit_info, state);
}
&Inst::MovWide { op, rd, imm, size } => {
sink.put4(enc_move_wide(op, rd, imm, size));
@ -1505,7 +1498,7 @@ impl MachInstEmit for Inst {
from_bits,
to_bits: size.bits(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
match op {
@ -1521,7 +1514,7 @@ impl MachInstEmit for Inst {
rn: x27,
rm: x26,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRR {
alu_op: ALUOp::OrrNot,
@ -1530,7 +1523,7 @@ impl MachInstEmit for Inst {
rn: xzr,
rm: x28,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
AtomicRMWLoopOp::Umin
| AtomicRMWLoopOp::Umax
@ -1557,7 +1550,7 @@ impl MachInstEmit for Inst {
rm: x26,
extendop,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
} else {
Inst::AluRRR {
alu_op: ALUOp::SubS,
@ -1566,7 +1559,7 @@ impl MachInstEmit for Inst {
rn: x27,
rm: x26,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
Inst::CSel {
@ -1575,7 +1568,7 @@ impl MachInstEmit for Inst {
rn: x27,
rm: x26,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
_ => {
// add/sub/and/orr/eor x28, x27, x26
@ -1600,7 +1593,7 @@ impl MachInstEmit for Inst {
rn: x27,
rm: x26,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
}
@ -2871,7 +2864,7 @@ impl MachInstEmit for Inst {
rn,
imml,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::Extend {
rd,
@ -2885,7 +2878,7 @@ impl MachInstEmit for Inst {
rd,
rm: rn,
};
mov.emit(&[], sink, emit_info, state);
mov.emit(sink, emit_info, state);
}
&Inst::Extend {
rd,
@ -2928,7 +2921,7 @@ impl MachInstEmit for Inst {
if is_hint {
sink.put4(key.enc_auti_hint());
Inst::Ret {}.emit(&[], sink, emit_info, state);
Inst::Ret {}.emit(sink, emit_info, state);
} else {
sink.put4(0xd65f0bff | (op2 << 9)); // reta{key}
}
@ -2992,7 +2985,7 @@ impl MachInstEmit for Inst {
rn: callee,
targets: vec![],
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.add_call_site(ir::Opcode::ReturnCallIndirect);
// `emit_return_call_common_sequence` emits an island if
@ -3119,13 +3112,13 @@ impl MachInstEmit for Inst {
rn: zero_reg(),
rm: ridx,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Prevent any data value speculation.
Inst::Csdb.emit(&[], sink, emit_info, state);
Inst::Csdb.emit(sink, emit_info, state);
// Load address of jump table
let inst = Inst::Adr { rd: rtmp1, off: 16 };
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Load value out of jump table
let inst = Inst::SLoad32 {
rd: rtmp2,
@ -3136,7 +3129,7 @@ impl MachInstEmit for Inst {
),
flags: MemFlags::trusted(),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Add base of jump table to jump-table-sourced block offset
let inst = Inst::AluRRR {
alu_op: ALUOp::Add,
@ -3145,14 +3138,14 @@ impl MachInstEmit for Inst {
rn: rtmp1.to_reg(),
rm: rtmp2.to_reg(),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Branch to computed address. (`targets` here is only used for successor queries
// and is not needed for emission.)
let inst = Inst::IndirectBr {
rn: rtmp1.to_reg(),
targets: vec![],
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Emit jump table (table of 32-bit offsets).
let jt_off = sink.cur_offset();
for &target in targets.iter() {
@ -3186,7 +3179,7 @@ impl MachInstEmit for Inst {
// adrp rd, symbol
sink.add_reloc(Reloc::Aarch64AdrGotPage21, &**name, 0);
let inst = Inst::Adrp { rd, off: 0 };
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// ldr rd, [rd, :got_lo12:X]
sink.add_reloc(Reloc::Aarch64Ld64GotLo12Nc, &**name, 0);
@ -3195,7 +3188,7 @@ impl MachInstEmit for Inst {
mem: AMode::reg(rd.to_reg()),
flags: MemFlags::trusted(),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
} else {
// With absolute offsets we set up a load from a preallocated space, and then jump
// over it.
@ -3212,11 +3205,11 @@ impl MachInstEmit for Inst {
},
flags: MemFlags::trusted(),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
sink.add_reloc(Reloc::Abs8, &**name, offset);
sink.put8(0);
}
@ -3225,7 +3218,7 @@ impl MachInstEmit for Inst {
let mem = mem.clone();
let (mem_insts, mem) = mem_finalize(Some(sink), &mem, I8, state);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
let (reg, index_reg, offset) = match mem {
@ -3260,7 +3253,7 @@ impl MachInstEmit for Inst {
extendop,
};
add.emit(&[], sink, emit_info, state);
add.emit(sink, emit_info, state);
} else if offset == 0 {
if reg != rd.to_reg() {
let mov = Inst::Mov {
@ -3269,7 +3262,7 @@ impl MachInstEmit for Inst {
rm: reg,
};
mov.emit(&[], sink, emit_info, state);
mov.emit(sink, emit_info, state);
}
} else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
let add = Inst::AluRRImm12 {
@ -3279,7 +3272,7 @@ impl MachInstEmit for Inst {
rn: reg,
imm12,
};
add.emit(&[], sink, emit_info, state);
add.emit(sink, emit_info, state);
} else {
// Use `tmp2` here: `reg` may be `spilltmp` if the `AMode` on this instruction
// was initially an `SPOffset`. Assert that `tmp2` is truly free to use. Note
@ -3290,7 +3283,7 @@ impl MachInstEmit for Inst {
debug_assert!(reg != tmp2_reg());
let tmp = writable_tmp2_reg();
for insn in Inst::load_constant(tmp, abs_offset, &mut |_| tmp).into_iter() {
insn.emit(&[], sink, emit_info, state);
insn.emit(sink, emit_info, state);
}
let add = Inst::AluRRR {
alu_op,
@ -3299,7 +3292,7 @@ impl MachInstEmit for Inst {
rn: reg,
rm: tmp.to_reg(),
};
add.emit(&[], sink, emit_info, state);
add.emit(sink, emit_info, state);
}
}
&Inst::Paci { key } => {
@ -3337,7 +3330,7 @@ impl MachInstEmit for Inst {
let jmp = Inst::Jump {
dest: BranchTarget::Label(jump_around_label),
};
jmp.emit(&[], sink, emit_info, state);
jmp.emit(sink, emit_info, state);
sink.emit_island(needed_space + 4, &mut state.ctrl_plane);
sink.bind_label(jump_around_label, &mut state.ctrl_plane);
}
@ -3366,7 +3359,7 @@ impl MachInstEmit for Inst {
// adrp x0, :tlsdesc:tlsvar
sink.add_reloc(Reloc::Aarch64TlsDescAdrPage21, &**symbol, 0);
Inst::Adrp { rd, off: 0 }.emit(&[], sink, emit_info, state);
Inst::Adrp { rd, off: 0 }.emit(sink, emit_info, state);
// ldr tmp, [x0, :tlsdesc_lo12:tlsvar]
sink.add_reloc(Reloc::Aarch64TlsDescLd64Lo12, &**symbol, 0);
@ -3375,7 +3368,7 @@ impl MachInstEmit for Inst {
mem: AMode::reg(rd.to_reg()),
flags: MemFlags::trusted(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// add x0, x0, :tlsdesc_lo12:tlsvar
sink.add_reloc(Reloc::Aarch64TlsDescAddLo12, &**symbol, 0);
@ -3386,7 +3379,7 @@ impl MachInstEmit for Inst {
rn: rd.to_reg(),
imm12: Imm12::maybe_from_u64(0).unwrap(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// blr tmp
sink.add_reloc(Reloc::Aarch64TlsDescCall, &**symbol, 0);
@ -3402,7 +3395,7 @@ impl MachInstEmit for Inst {
callee_pop_size: 0,
}),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// mrs tmp, tpidr_el0
sink.put4(0xd53bd040 | machreg_to_gpr(tmp.to_reg()));
@ -3415,7 +3408,7 @@ impl MachInstEmit for Inst {
rn: rd.to_reg(),
rm: tmp.to_reg(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::MachOTlsGetAddr { ref symbol, rd } => {
@ -3448,7 +3441,7 @@ impl MachInstEmit for Inst {
mem: AMode::reg(rd.to_reg()),
flags: MemFlags::trusted(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// call function pointer in temp register
Inst::CallInd {
@ -3463,7 +3456,7 @@ impl MachInstEmit for Inst {
callee_pop_size: 0,
}),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::Unwind { ref inst } => {
@ -3507,7 +3500,7 @@ impl MachInstEmit for Inst {
rn: start.to_reg(),
imm12: step,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::Store32 {
rd: regs::zero_reg(),
mem: AMode::RegReg {
@ -3516,7 +3509,7 @@ impl MachInstEmit for Inst {
},
flags: MemFlags::trusted(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRR {
alu_op: ALUOp::AddS,
size: OperandSize::Size64,
@ -3524,7 +3517,7 @@ impl MachInstEmit for Inst {
rn: start.to_reg(),
rm: end,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
let loop_end = sink.get_label();
Inst::CondBr {
@ -3532,7 +3525,7 @@ impl MachInstEmit for Inst {
not_taken: BranchTarget::Label(loop_end),
kind: CondBrKind::Cond(Cond::Gt),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.bind_label(loop_end, &mut state.ctrl_plane);
}
}
@ -3549,7 +3542,7 @@ impl MachInstEmit for Inst {
state.clear_post_insn();
}
fn pretty_print_inst(&self, _allocs: &[Allocation], state: &mut Self::State) -> String {
fn pretty_print_inst(&self, state: &mut Self::State) -> String {
self.print_with_state(state)
}
}
@ -3560,14 +3553,10 @@ fn emit_return_call_common_sequence(
state: &mut EmitState,
info: &ReturnCallInfo,
) {
for u in info.uses.iter() {
let _ = u.vreg;
}
for inst in
AArch64MachineDeps::gen_clobber_restore(CallConv::Tail, &emit_info.0, state.frame_layout())
{
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
let setup_area_size = state.frame_layout().setup_area_size;
@ -3588,7 +3577,7 @@ fn emit_return_call_common_sequence(
},
flags: MemFlags::trusted(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
// Adjust SP to account for the possible over-allocation in the prologue.
@ -3597,7 +3586,7 @@ fn emit_return_call_common_sequence(
for inst in
AArch64MachineDeps::gen_sp_reg_adjust(i32::try_from(incoming_args_diff).unwrap())
{
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
}

2
cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs

@ -7867,7 +7867,7 @@ fn test_aarch64_binemit() {
assert_eq!(expected_printing, actual_printing);
let mut buffer = MachBuffer::new();
insn.emit(&[], &mut buffer, &emit_info, &mut Default::default());
insn.emit(&mut buffer, &emit_info, &mut Default::default());
let buffer = buffer.finish(&Default::default(), &mut Default::default());
let actual_encoding = &buffer.stringify_code_bytes();
assert_eq!(expected_encoding, actual_encoding);

271
cranelift/codegen/src/isa/riscv64/inst/emit.rs

@ -8,7 +8,6 @@ use crate::isa::riscv64::lower::isle::generated_code::{
};
use crate::trace;
use cranelift_control::ControlPlane;
use regalloc2::Allocation;
pub struct EmitInfo {
shared_flag: settings::Flags,
@ -222,13 +221,7 @@ impl MachInstEmit for Inst {
type State = EmitState;
type Info = EmitInfo;
fn emit(
&self,
_allocs: &[Allocation],
sink: &mut MachBuffer<Inst>,
emit_info: &Self::Info,
state: &mut EmitState,
) {
fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
// Check if we need to update the vector state before emitting this instruction
if let Some(expected) = self.expected_vstate() {
if state.vstate != EmitVState::Known(expected.clone()) {
@ -237,7 +230,7 @@ impl MachInstEmit for Inst {
rd: writable_zero_reg(),
vstate: expected.clone(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
}
@ -265,7 +258,7 @@ impl MachInstEmit for Inst {
);
}
fn pretty_print_inst(&self, _allocs: &[Allocation], state: &mut Self::State) -> String {
fn pretty_print_inst(&self, state: &mut Self::State) -> String {
self.print_with_state(state)
}
}
@ -860,7 +853,7 @@ impl Inst {
rs: zero_reg(),
imm12: Imm12::ZERO,
};
x.emit(&[], sink, emit_info, state)
x.emit(sink, emit_info, state)
}
&Inst::RawData { ref data } => {
// Right now we only put a u32 or u64 in this instruction.
@ -891,14 +884,14 @@ impl Inst {
flags: MemFlags::new(),
from: AMode::Label(label_data),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// Jump over the inline pool
Inst::gen_jump(label_end).emit(&[], sink, emit_info, state);
Inst::gen_jump(label_end).emit(sink, emit_info, state);
// Emit the inline data
sink.bind_label(label_data, &mut state.ctrl_plane);
Inst::RawData { data: data.into() }.emit(&[], sink, emit_info, state);
Inst::RawData { data: data.into() }.emit(sink, emit_info, state);
sink.bind_label(label_end, &mut state.ctrl_plane);
}
@ -1017,7 +1010,7 @@ impl Inst {
// register and load from that.
(Some(_), None, None) => {
let tmp = writable_spilltmp_reg();
Inst::LoadAddr { rd: tmp, mem: from }.emit(&[], sink, emit_info, state);
Inst::LoadAddr { rd: tmp, mem: from }.emit(sink, emit_info, state);
(tmp.to_reg(), Imm12::ZERO)
}
@ -1070,7 +1063,7 @@ impl Inst {
// Otherwise load the address it into a reg and load from it.
_ => {
let tmp = writable_spilltmp_reg();
Inst::LoadAddr { rd: tmp, mem: to }.emit(&[], sink, emit_info, state);
Inst::LoadAddr { rd: tmp, mem: to }.emit(sink, emit_info, state);
(tmp.to_reg(), Imm12::ZERO)
}
};
@ -1094,7 +1087,7 @@ impl Inst {
base: link_reg(),
offset: Imm12::ZERO,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::Extend {
@ -1135,7 +1128,7 @@ impl Inst {
}
insts
.into_iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
}
&Inst::Call { ref info } => {
@ -1165,7 +1158,7 @@ impl Inst {
base: info.rn,
offset: Imm12::ZERO,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
if let Some(s) = state.take_stack_map() {
sink.add_stack_map(StackMapExtent::StartedAtOffset(start_offset), s);
@ -1204,7 +1197,7 @@ impl Inst {
base: callee,
offset: Imm12::ZERO,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::Jal { label } => {
sink.use_label_at_offset(*start_off, label, LabelUse::Jal20);
@ -1229,7 +1222,7 @@ impl Inst {
match not_taken {
CondBrTarget::Label(label) => {
Inst::gen_jump(label).emit(&[], sink, emit_info, state)
Inst::gen_jump(label).emit(sink, emit_info, state)
}
CondBrTarget::Fallthrough => {}
};
@ -1270,11 +1263,11 @@ impl Inst {
vstate: VState::from_type(ty),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::MovFromPReg { rd, rm } => {
Inst::gen_move(rd, Reg::from(rm), I64).emit(&[], sink, emit_info, state);
Inst::gen_move(rd, Reg::from(rm), I64).emit(sink, emit_info, state);
}
&Inst::BrTable {
@ -1301,7 +1294,7 @@ impl Inst {
let distance = (inst_count * Inst::UNCOMPRESSED_INSTRUCTION_SIZE as usize) as u32;
if sink.island_needed(distance) {
let jump_around_label = sink.get_label();
Inst::gen_jump(jump_around_label).emit(&[], sink, emit_info, state);
Inst::gen_jump(jump_around_label).emit(sink, emit_info, state);
sink.emit_island(distance + 4, &mut state.ctrl_plane);
sink.bind_label(jump_around_label, &mut state.ctrl_plane);
}
@ -1342,7 +1335,7 @@ impl Inst {
from_bits: 32,
to_bits: 64,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// Bounds check.
//
@ -1351,7 +1344,7 @@ impl Inst {
// default block.
Inst::load_constant_u32(tmp2, targets.len() as u64)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
Inst::CondBr {
taken: CondBrTarget::Label(label_compute_target),
not_taken: CondBrTarget::Fallthrough,
@ -1361,7 +1354,7 @@ impl Inst {
rs2: tmp2.to_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.use_label_at_offset(sink.cur_offset(), default_target, LabelUse::PCRel32);
Inst::construct_auipc_and_jalr(None, tmp2, 0)
@ -1486,7 +1479,7 @@ impl Inst {
rs,
imm12,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
(_, Some(rs), None) => {
let mut insts = Inst::load_constant_u64(rd, offset as u64);
@ -1498,7 +1491,7 @@ impl Inst {
});
insts
.into_iter()
.for_each(|inst| inst.emit(&[], sink, emit_info, state));
.for_each(|inst| inst.emit(sink, emit_info, state));
}
(AMode::Const(addr), None, _) => {
// Get an address label for the constant and recurse.
@ -1507,7 +1500,7 @@ impl Inst {
rd,
mem: AMode::Label(label),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
(AMode::Label(label), None, _) => {
// Get the current PC.
@ -1549,18 +1542,18 @@ impl Inst {
not_taken: CondBrTarget::Label(label_false),
kind: condition,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.bind_label(label_true, &mut state.ctrl_plane);
// here is the true
// select the first value
for i in gen_moves(dst.regs(), x.regs()) {
i.emit(&[], sink, emit_info, state);
i.emit(sink, emit_info, state);
}
Inst::gen_jump(label_end).emit(&[], sink, emit_info, state);
Inst::gen_jump(label_end).emit(sink, emit_info, state);
sink.bind_label(label_false, &mut state.ctrl_plane);
for i in gen_moves(dst.regs(), y.regs()) {
i.emit(&[], sink, emit_info, state);
i.emit(sink, emit_info, state);
}
sink.bind_label(label_end, &mut state.ctrl_plane);
@ -1600,11 +1593,11 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
if ty.bits() < 32 {
AtomicOP::extract(dst, offset, dst.to_reg(), ty)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
} else if ty.bits() == 32 {
Inst::Extend {
rd: dst,
@ -1613,7 +1606,7 @@ impl Inst {
from_bits: 32,
to_bits: 64,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
Inst::CondBr {
taken: CondBrTarget::Label(fail_label),
@ -1624,7 +1617,7 @@ impl Inst {
rs2: dst.to_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
let store_value = if ty.bits() < 32 {
// reload value to t0.
Inst::Atomic {
@ -1634,11 +1627,11 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// set reset part.
AtomicOP::merge(t0, writable_spilltmp_reg(), offset, v, ty)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
t0.to_reg()
} else {
v
@ -1650,7 +1643,7 @@ impl Inst {
src: store_value,
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// check is our value stored.
Inst::CondBr {
taken: CondBrTarget::Label(cas_lebel),
@ -1661,7 +1654,7 @@ impl Inst {
rs2: zero_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.bind_label(fail_label, &mut state.ctrl_plane);
}
&Inst::AtomicRmwLoop {
@ -1683,7 +1676,7 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
//
let store_value: Reg = match op {
@ -1694,7 +1687,7 @@ impl Inst {
| crate::ir::AtomicRmwOp::Xor => {
AtomicOP::extract(dst, offset, dst.to_reg(), ty)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
Inst::AluRRR {
alu_op: match op {
crate::ir::AtomicRmwOp::Add => AluOPRRR::Add,
@ -1708,7 +1701,7 @@ impl Inst {
rs1: dst.to_reg(),
rs2: x,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::Atomic {
op: AtomicOP::load_op(ty),
rd: writable_spilltmp_reg2(),
@ -1716,7 +1709,7 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
AtomicOP::merge(
writable_spilltmp_reg2(),
writable_spilltmp_reg(),
@ -1725,14 +1718,14 @@ impl Inst {
ty,
)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
spilltmp_reg2()
}
crate::ir::AtomicRmwOp::Nand => {
if ty.bits() < 32 {
AtomicOP::extract(dst, offset, dst.to_reg(), ty)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
}
Inst::AluRRR {
alu_op: AluOPRRR::And,
@ -1740,8 +1733,8 @@ impl Inst {
rs1: x,
rs2: dst.to_reg(),
}
.emit(&[], sink, emit_info, state);
Inst::construct_bit_not(t0, t0.to_reg()).emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::construct_bit_not(t0, t0.to_reg()).emit(sink, emit_info, state);
if ty.bits() < 32 {
Inst::Atomic {
op: AtomicOP::load_op(ty),
@ -1750,7 +1743,7 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
AtomicOP::merge(
writable_spilltmp_reg2(),
writable_spilltmp_reg(),
@ -1759,7 +1752,7 @@ impl Inst {
ty,
)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
spilltmp_reg2()
} else {
t0.to_reg()
@ -1779,7 +1772,7 @@ impl Inst {
AtomicOP::extract_sext(dst, offset, dst.to_reg(), ty)
}
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
Inst::CondBr {
taken: CondBrTarget::Label(label_select_dst),
@ -1796,12 +1789,12 @@ impl Inst {
rs2: x,
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// here we select x.
Inst::gen_move(t0, x, I64).emit(&[], sink, emit_info, state);
Inst::gen_jump(label_select_done).emit(&[], sink, emit_info, state);
Inst::gen_move(t0, x, I64).emit(sink, emit_info, state);
Inst::gen_jump(label_select_done).emit(sink, emit_info, state);
sink.bind_label(label_select_dst, &mut state.ctrl_plane);
Inst::gen_move(t0, dst.to_reg(), I64).emit(&[], sink, emit_info, state);
Inst::gen_move(t0, dst.to_reg(), I64).emit(sink, emit_info, state);
sink.bind_label(label_select_done, &mut state.ctrl_plane);
Inst::Atomic {
op: AtomicOP::load_op(ty),
@ -1810,7 +1803,7 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
AtomicOP::merge(
writable_spilltmp_reg2(),
writable_spilltmp_reg(),
@ -1819,13 +1812,13 @@ impl Inst {
ty,
)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
spilltmp_reg2()
}
crate::ir::AtomicRmwOp::Xchg => {
AtomicOP::extract(dst, offset, dst.to_reg(), ty)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
Inst::Atomic {
op: AtomicOP::load_op(ty),
rd: writable_spilltmp_reg2(),
@ -1833,7 +1826,7 @@ impl Inst {
src: zero_reg(),
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
AtomicOP::merge(
writable_spilltmp_reg2(),
writable_spilltmp_reg(),
@ -1842,7 +1835,7 @@ impl Inst {
ty,
)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
spilltmp_reg2()
}
};
@ -1854,7 +1847,7 @@ impl Inst {
src: store_value,
amo: AMO::SeqCst,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// if store is not ok,retry.
Inst::CondBr {
@ -1866,7 +1859,7 @@ impl Inst {
rs2: zero_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::LoadExtName {
@ -1928,10 +1921,10 @@ impl Inst {
flags: MemFlags::trusted(),
from: AMode::Label(label_data),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// Jump over the data
Inst::gen_jump(label_end).emit(&[], sink, emit_info, state);
Inst::gen_jump(label_end).emit(sink, emit_info, state);
sink.bind_label(label_data, &mut state.ctrl_plane);
sink.add_reloc(Reloc::Abs8, name.as_ref(), offset);
@ -2009,8 +2002,8 @@ impl Inst {
not_taken: CondBrTarget::Fallthrough,
kind: cond.inverse(),
}
.emit(&[], sink, emit_info, state);
Inst::Udf { trap_code }.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::Udf { trap_code }.emit(sink, emit_info, state);
sink.bind_label(label_end, &mut state.ctrl_plane);
}
@ -2030,7 +2023,7 @@ impl Inst {
pred: Inst::FENCE_REQ_R | Inst::FENCE_REQ_W,
succ: Inst::FENCE_REQ_R | Inst::FENCE_REQ_W,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// load.
Inst::Load {
rd: rd,
@ -2038,26 +2031,26 @@ impl Inst {
flags: MemFlags::new(),
from: AMode::RegOffset(p, 0),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::Fence {
pred: Inst::FENCE_REQ_R,
succ: Inst::FENCE_REQ_R | Inst::FENCE_REQ_W,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::AtomicStore { src, ty, p } => {
Inst::Fence {
pred: Inst::FENCE_REQ_R | Inst::FENCE_REQ_W,
succ: Inst::FENCE_REQ_W,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::Store {
to: AMode::RegOffset(p, 0),
op: StoreOP::from_type(ty),
flags: MemFlags::new(),
src,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
&Inst::Popcnt {
@ -2068,23 +2061,19 @@ impl Inst {
ty,
} => {
// load 0 to sum , init.
Inst::gen_move(sum, zero_reg(), I64).emit(&[], sink, emit_info, state);
Inst::gen_move(sum, zero_reg(), I64).emit(sink, emit_info, state);
// load
Inst::load_imm12(step, Imm12::from_i16(ty.bits() as i16)).emit(
&[],
sink,
emit_info,
state,
);
Inst::load_imm12(step, Imm12::from_i16(ty.bits() as i16))
.emit(sink, emit_info, state);
//
Inst::load_imm12(tmp, Imm12::ONE).emit(&[], sink, emit_info, state);
Inst::load_imm12(tmp, Imm12::ONE).emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Slli,
rd: tmp,
rs: tmp.to_reg(),
imm12: Imm12::from_i16((ty.bits() - 1) as i16),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
let label_done = sink.get_label();
let label_loop = sink.get_label();
sink.bind_label(label_loop, &mut state.ctrl_plane);
@ -2097,7 +2086,7 @@ impl Inst {
rs2: zero_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// test and add sum.
{
Inst::AluRRR {
@ -2106,7 +2095,7 @@ impl Inst {
rs1: tmp.to_reg(),
rs2: rs,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
let label_over = sink.get_label();
Inst::CondBr {
taken: CondBrTarget::Label(label_over),
@ -2117,14 +2106,14 @@ impl Inst {
rs2: spilltmp_reg2(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Addi,
rd: sum,
rs: sum.to_reg(),
imm12: Imm12::ONE,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.bind_label(label_over, &mut state.ctrl_plane);
}
// set step and tmp.
@ -2135,15 +2124,15 @@ impl Inst {
rs: step.to_reg(),
imm12: Imm12::from_i16(-1),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Srli,
rd: tmp,
rs: tmp.to_reg(),
imm12: Imm12::ONE,
}
.emit(&[], sink, emit_info, state);
Inst::gen_jump(label_loop).emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::gen_jump(label_loop).emit(sink, emit_info, state);
}
sink.bind_label(label_done, &mut state.ctrl_plane);
}
@ -2156,16 +2145,12 @@ impl Inst {
ty,
} => {
// load 0 to sum , init.
Inst::gen_move(sum, zero_reg(), I64).emit(&[], sink, emit_info, state);
Inst::gen_move(sum, zero_reg(), I64).emit(sink, emit_info, state);
// load
Inst::load_imm12(step, Imm12::from_i16(ty.bits() as i16)).emit(
&[],
sink,
emit_info,
state,
);
Inst::load_imm12(step, Imm12::from_i16(ty.bits() as i16))
.emit(sink, emit_info, state);
//
Inst::load_imm12(tmp, Imm12::ONE).emit(&[], sink, emit_info, state);
Inst::load_imm12(tmp, Imm12::ONE).emit(sink, emit_info, state);
if leading {
Inst::AluRRImm12 {
alu_op: AluOPRRI::Slli,
@ -2173,7 +2158,7 @@ impl Inst {
rs: tmp.to_reg(),
imm12: Imm12::from_i16((ty.bits() - 1) as i16),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
let label_done = sink.get_label();
let label_loop = sink.get_label();
@ -2187,7 +2172,7 @@ impl Inst {
rs2: zero_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// test and add sum.
{
Inst::AluRRR {
@ -2196,7 +2181,7 @@ impl Inst {
rs1: tmp.to_reg(),
rs2: rs,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::CondBr {
taken: CondBrTarget::Label(label_done),
not_taken: CondBrTarget::Fallthrough,
@ -2206,14 +2191,14 @@ impl Inst {
rs2: spilltmp_reg2(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Addi,
rd: sum,
rs: sum.to_reg(),
imm12: Imm12::ONE,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
// set step and tmp.
{
@ -2223,7 +2208,7 @@ impl Inst {
rs: step.to_reg(),
imm12: Imm12::from_i16(-1),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: if leading {
AluOPRRI::Srli
@ -2234,8 +2219,8 @@ impl Inst {
rs: tmp.to_reg(),
imm12: Imm12::ONE,
}
.emit(&[], sink, emit_info, state);
Inst::gen_jump(label_loop).emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::gen_jump(label_loop).emit(sink, emit_info, state);
}
sink.bind_label(label_done, &mut state.ctrl_plane);
}
@ -2247,30 +2232,26 @@ impl Inst {
tmp2,
rd,
} => {
Inst::gen_move(rd, zero_reg(), I64).emit(&[], sink, emit_info, state);
Inst::load_imm12(step, Imm12::from_i16(ty.bits() as i16)).emit(
&[],
sink,
emit_info,
state,
);
Inst::gen_move(rd, zero_reg(), I64).emit(sink, emit_info, state);
Inst::load_imm12(step, Imm12::from_i16(ty.bits() as i16))
.emit(sink, emit_info, state);
//
Inst::load_imm12(tmp, Imm12::ONE).emit(&[], sink, emit_info, state);
Inst::load_imm12(tmp, Imm12::ONE).emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Slli,
rd: tmp,
rs: tmp.to_reg(),
imm12: Imm12::from_i16((ty.bits() - 1) as i16),
}
.emit(&[], sink, emit_info, state);
Inst::load_imm12(tmp2, Imm12::ONE).emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::load_imm12(tmp2, Imm12::ONE).emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Slli,
rd: tmp2,
rs: tmp2.to_reg(),
imm12: Imm12::from_i16((ty.bits() - 8) as i16),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
let label_done = sink.get_label();
let label_loop = sink.get_label();
@ -2284,7 +2265,7 @@ impl Inst {
rs2: zero_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// test and set bit.
{
Inst::AluRRR {
@ -2293,7 +2274,7 @@ impl Inst {
rs1: tmp.to_reg(),
rs2: rs,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
let label_over = sink.get_label();
Inst::CondBr {
taken: CondBrTarget::Label(label_over),
@ -2304,14 +2285,14 @@ impl Inst {
rs2: spilltmp_reg2(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRR {
alu_op: AluOPRRR::Or,
rd: rd,
rs1: rd.to_reg(),
rs2: tmp2.to_reg(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.bind_label(label_over, &mut state.ctrl_plane);
}
// set step and tmp.
@ -2322,33 +2303,29 @@ impl Inst {
rs: step.to_reg(),
imm12: Imm12::from_i16(-1),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Srli,
rd: tmp,
rs: tmp.to_reg(),
imm12: Imm12::ONE,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
{
// reset tmp2
// if (step %=8 == 0) then tmp2 = tmp2 >> 15
// if (step %=8 != 0) then tmp2 = tmp2 << 1
let label_over = sink.get_label();
let label_sll_1 = sink.get_label();
Inst::load_imm12(writable_spilltmp_reg2(), Imm12::from_i16(8)).emit(
&[],
sink,
emit_info,
state,
);
Inst::load_imm12(writable_spilltmp_reg2(), Imm12::from_i16(8))
.emit(sink, emit_info, state);
Inst::AluRRR {
alu_op: AluOPRRR::Rem,
rd: writable_spilltmp_reg2(),
rs1: step.to_reg(),
rs2: spilltmp_reg2(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::CondBr {
taken: CondBrTarget::Label(label_sll_1),
not_taken: CondBrTarget::Fallthrough,
@ -2358,15 +2335,15 @@ impl Inst {
rs2: zero_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Srli,
rd: tmp2,
rs: tmp2.to_reg(),
imm12: Imm12::from_i16(15),
}
.emit(&[], sink, emit_info, state);
Inst::gen_jump(label_over).emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::gen_jump(label_over).emit(sink, emit_info, state);
sink.bind_label(label_sll_1, &mut state.ctrl_plane);
Inst::AluRRImm12 {
alu_op: AluOPRRI::Slli,
@ -2374,10 +2351,10 @@ impl Inst {
rs: tmp2.to_reg(),
imm12: Imm12::ONE,
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
sink.bind_label(label_over, &mut state.ctrl_plane);
}
Inst::gen_jump(label_loop).emit(&[], sink, emit_info, state);
Inst::gen_jump(label_loop).emit(sink, emit_info, state);
}
sink.bind_label(label_done, &mut state.ctrl_plane);
}
@ -2389,10 +2366,10 @@ impl Inst {
let step = writable_spilltmp_reg();
Inst::load_constant_u64(step, (guard_size as u64) * (probe_count as u64))
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
Inst::load_constant_u64(guard_size_tmp, guard_size as u64)
.iter()
.for_each(|i| i.emit(&[], sink, emit_info, state));
.for_each(|i| i.emit(sink, emit_info, state));
let loop_start = sink.get_label();
let label_done = sink.get_label();
@ -2406,7 +2383,7 @@ impl Inst {
rs2: guard_size_tmp.to_reg(),
},
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// compute address.
Inst::AluRRR {
alu_op: AluOPRRR::Sub,
@ -2414,14 +2391,14 @@ impl Inst {
rs1: stack_reg(),
rs2: step.to_reg(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::Store {
to: AMode::RegOffset(spilltmp_reg2(), 0),
op: StoreOP::Sb,
flags: MemFlags::new(),
src: zero_reg(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
// reset step.
Inst::AluRRR {
alu_op: AluOPRRR::Sub,
@ -2429,8 +2406,8 @@ impl Inst {
rs1: step.to_reg(),
rs2: guard_size_tmp.to_reg(),
}
.emit(&[], sink, emit_info, state);
Inst::gen_jump(loop_start).emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::gen_jump(loop_start).emit(sink, emit_info, state);
sink.bind_label(label_done, &mut state.ctrl_plane);
}
&Inst::VecAluRRRImm5 {
@ -2533,7 +2510,7 @@ impl Inst {
rd: tmp,
mem: base.clone(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
tmp.to_reg()
}
}
@ -2580,7 +2557,7 @@ impl Inst {
rd: tmp,
mem: base.clone(),
}
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
tmp.to_reg()
}
}
@ -2636,7 +2613,7 @@ fn emit_return_call_common_sequence(
ty,
MemFlags::trusted(),
)
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
clobber_offset -= 8
}
@ -2650,7 +2627,7 @@ fn emit_return_call_common_sequence(
I64,
MemFlags::trusted(),
)
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
Inst::gen_load(
writable_fp_reg(),
@ -2658,7 +2635,7 @@ fn emit_return_call_common_sequence(
I64,
MemFlags::trusted(),
)
.emit(&[], sink, emit_info, state);
.emit(sink, emit_info, state);
}
// If we over-allocated the incoming args area in the prologue, resize down to what the callee
@ -2670,7 +2647,7 @@ fn emit_return_call_common_sequence(
let sp_increment = sp_to_fp_offset + setup_area_size + incoming_args_diff;
if sp_increment > 0 {
for inst in Riscv64MachineDeps::gen_sp_reg_adjust(i32::try_from(sp_increment).unwrap()) {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
}
}

4
cranelift/codegen/src/isa/riscv64/inst/emit_tests.rs

@ -2114,7 +2114,7 @@ fn test_riscv64_binemit() {
assert_eq!(unit.assembly, actual_printing);
let mut buffer = MachBuffer::new();
unit.inst
.emit(&[], &mut buffer, &emit_info, &mut Default::default());
.emit(&mut buffer, &emit_info, &mut Default::default());
let buffer = buffer.finish(&Default::default(), &mut Default::default());
let actual_encoding = buffer.stringify_code_bytes();
@ -2201,7 +2201,7 @@ fn riscv64_worst_case_instruction_size() {
let mut max: (u32, MInst) = (0, Inst::Nop0);
for i in candidates {
let mut buffer = MachBuffer::new();
i.emit(&[], &mut buffer, &emit_info, &mut Default::default());
i.emit(&mut buffer, &emit_info, &mut Default::default());
let buffer = buffer.finish(&Default::default(), &mut Default::default());
let length = buffer.data().len() as u32;
if length > max.0 {

47
cranelift/codegen/src/isa/s390x/inst/emit.rs

@ -6,7 +6,6 @@ use crate::isa::s390x::inst::*;
use crate::isa::s390x::settings as s390x_settings;
use crate::trace;
use cranelift_control::ControlPlane;
use regalloc2::Allocation;
/// Debug macro for testing that a regpair is valid: that the high register is even, and the low
/// register is one higher than the high register.
@ -181,7 +180,7 @@ pub fn mem_emit(
},
);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
if add_trap {
@ -244,7 +243,7 @@ pub fn mem_rs_emit(
},
);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
if add_trap {
@ -295,7 +294,7 @@ pub fn mem_imm8_emit(
},
);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
if add_trap {
@ -342,7 +341,7 @@ pub fn mem_imm16_emit(
},
);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
if add_trap {
@ -412,7 +411,7 @@ pub fn mem_vrx_emit(
},
);
for inst in mem_insts.into_iter() {
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
if add_trap {
@ -1364,17 +1363,11 @@ impl MachInstEmit for Inst {
type State = EmitState;
type Info = EmitInfo;
fn emit(
&self,
_allocs: &[Allocation],
sink: &mut MachBuffer<Inst>,
emit_info: &Self::Info,
state: &mut EmitState,
) {
fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
self.emit_with_alloc_consumer(sink, emit_info, state)
}
fn pretty_print_inst(&self, _allocs: &[Allocation], state: &mut EmitState) -> String {
fn pretty_print_inst(&self, state: &mut EmitState) -> String {
self.print_with_state(state)
}
}
@ -1450,7 +1443,7 @@ impl Inst {
ri: rn,
rm,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
} else {
put(sink, &enc_rrf_ab(opcode, rd.to_reg(), rn, rm, 0));
}
@ -1468,7 +1461,7 @@ impl Inst {
ri: rn,
imm,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
} else {
let opcode = match alu_op {
ALUOp::Add32 => 0xecd8, // AHIK
@ -2053,7 +2046,7 @@ impl Inst {
target: loop_label,
cond,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Emit label at the end of the loop.
sink.bind_label(done_label, &mut state.ctrl_plane);
@ -2240,7 +2233,7 @@ impl Inst {
&Inst::MovPReg { rd, rm } => {
let rm: Reg = rm.into();
debug_assert!([regs::gpr(0), regs::gpr(14), regs::gpr(15)].contains(&rm));
Inst::Mov64 { rd, rm }.emit(&[], sink, emit_info, state);
Inst::Mov64 { rd, rm }.emit(sink, emit_info, state);
}
&Inst::Mov32 { rd, rm } => {
let opcode = 0x18; // LR
@ -2360,7 +2353,7 @@ impl Inst {
rd,
mem: MemArg::reg(reg, MemFlags::trusted()),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
&Inst::FpuMove32 { rd, rn } => {
@ -2422,7 +2415,7 @@ impl Inst {
mem: MemArg::reg(reg, MemFlags::trusted()),
lane_imm: 0,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
&Inst::LoadFpuConst64 { rd, const_data } => {
let opcode = 0xa75; // BRAS
@ -2435,7 +2428,7 @@ impl Inst {
mem: MemArg::reg(reg, MemFlags::trusted()),
lane_imm: 0,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
&Inst::FpuRR { fpu_op, rd, rn } => {
let (opcode, m3, m4, m5, opcode_fpr) = match fpu_op {
@ -2846,7 +2839,7 @@ impl Inst {
rn,
rm,
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
&Inst::VecLoad { rd, ref mem }
@ -2939,7 +2932,7 @@ impl Inst {
rd,
mem: MemArg::reg(reg, MemFlags::trusted()),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
&Inst::VecLoadConstReplicate {
size,
@ -2957,7 +2950,7 @@ impl Inst {
rd,
mem: MemArg::reg(reg, MemFlags::trusted()),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
}
&Inst::VecImmByteMask { rd, mask } => {
let opcode = 0xe744; // VGBM
@ -3356,7 +3349,7 @@ impl Inst {
target: table_label,
},
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Set temp to target address by adding the value of the jump table entry.
let inst = Inst::AluRX {
@ -3365,7 +3358,7 @@ impl Inst {
ri: rtmp.to_reg(),
mem: MemArg::reg_plus_reg(rtmp.to_reg(), ridx, MemFlags::trusted()),
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Branch to computed address. (`targets` here is only used for successor queries
// and is not needed for emission.)
@ -3373,7 +3366,7 @@ impl Inst {
rn: rtmp.to_reg(),
targets: vec![],
};
inst.emit(&[], sink, emit_info, state);
inst.emit(sink, emit_info, state);
// Emit jump table (table of 32-bit offsets).
sink.bind_label(table_label, &mut state.ctrl_plane);

2
cranelift/codegen/src/isa/s390x/inst/emit_tests.rs

@ -13375,7 +13375,7 @@ fn test_s390x_binemit() {
buffer.bind_label(label0, ctrl_plane);
// Emit the instruction.
insn.emit(&[], &mut buffer, &emit_info, &mut Default::default());
insn.emit(&mut buffer, &emit_info, &mut Default::default());
// Label 1 after the instruction.
let label1 = buffer.get_label();

11
cranelift/codegen/src/isa/s390x/inst/mod.rs

@ -2096,9 +2096,8 @@ impl Inst {
format!("vlr {}, {}", rd, rn)
}
}
&Inst::FpuCMov32 { rd, cond, ri, rm } => {
&Inst::FpuCMov32 { rd, cond, rm, .. } => {
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
let _ri = ri;
let (rm, rm_fpr) = pretty_print_fpr(rm);
if rd_fpr.is_some() && rm_fpr.is_some() {
let cond = cond.invert().pretty_print_default();
@ -2108,9 +2107,8 @@ impl Inst {
format!("j{} 10 ; vlr {}, {}", cond, rd, rm)
}
}
&Inst::FpuCMov64 { rd, cond, ri, rm } => {
&Inst::FpuCMov64 { rd, cond, rm, .. } => {
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
let _ri = ri;
let (rm, rm_fpr) = pretty_print_fpr(rm);
if rd_fpr.is_some() && rm_fpr.is_some() {
let cond = cond.invert().pretty_print_default();
@ -2822,16 +2820,16 @@ impl Inst {
&Inst::VecLoadLane {
size,
rd,
ri,
ref mem,
lane_imm,
..
}
| &Inst::VecLoadLaneRev {
size,
rd,
ri,
ref mem,
lane_imm,
..
} => {
let opcode_vrx = match (self, size) {
(&Inst::VecLoadLane { .. }, 8) => "vleb",
@ -2845,7 +2843,6 @@ impl Inst {
};
let (rd, _) = pretty_print_fpr(rd.to_reg());
let _ri = ri;
let mem = mem.clone();
let (mem_str, mem) = mem_finalize_for_show(
&mem,

178
cranelift/codegen/src/isa/x64/inst/emit.rs

@ -36,7 +36,7 @@ fn emit_signed_cvt(
src2: GprMem::new(RegMem::reg(src)).unwrap(),
src2_size: OperandSize::Size64,
}
.emit(&[], sink, info, state);
.emit(sink, info, state);
}
/// Emits a one way conditional jump if CC is set (true).
@ -764,7 +764,7 @@ pub(crate) fn emit(
// Check if the divisor is -1, and if it isn't then immediately
// go to the `idiv`.
let inst = Inst::cmp_rmi_r(size, divisor, RegMemImm::imm(0xffffffff));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
one_way_jmp(sink, CC::NZ, do_op);
// ... otherwise the divisor is -1 and the result is always 0. This
@ -777,9 +777,9 @@ pub(crate) fn emit(
// output but for srem only the `dst_remainder` output is used for
// now.
let inst = Inst::imm(OperandSize::Size64, 0, Writable::from_reg(dst));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done_label);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Here the `idiv` is executed, which is different depending on the
// size
@ -803,7 +803,7 @@ pub(crate) fn emit(
Writable::from_reg(Gpr::new(regs::rdx()).unwrap()),
),
};
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(done_label, state.ctrl_plane_mut());
}
@ -888,7 +888,7 @@ pub(crate) fn emit(
let src = Gpr::new(src).unwrap();
let size = OperandSize::Size64;
let dst = WritableGpr::from_writable_reg(dst.to_writable_reg()).unwrap();
Inst::MovRR { size, src, dst }.emit(&[], sink, info, state);
Inst::MovRR { size, src, dst }.emit(sink, info, state);
}
Inst::MovToPReg { src, dst } => {
@ -898,7 +898,7 @@ pub(crate) fn emit(
debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&dst));
let dst = WritableGpr::from_writable_reg(Writable::from_reg(dst)).unwrap();
let size = OperandSize::Size64;
Inst::MovRR { size, src, dst }.emit(&[], sink, info, state);
Inst::MovRR { size, src, dst }.emit(sink, info, state);
}
Inst::MovzxRmR { ext_mode, src, dst } => {
@ -1023,7 +1023,7 @@ pub(crate) fn emit(
RegMemImm::imm(simm32 as u32),
Writable::from_reg(dst),
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
// If the offset is 0 and the shift is 0 (meaning multiplication
// by 1) then:
@ -1050,7 +1050,7 @@ pub(crate) fn emit(
RegMemImm::reg(operand.to_reg()),
Writable::from_reg(dst.to_reg()),
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
// If `lea`'s 3-operand mode is leveraged by regalloc, or if
@ -1437,7 +1437,7 @@ pub(crate) fn emit(
}
};
let inst = Inst::xmm_unary_rm_r(op, consequent.into(), dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(next, state.ctrl_plane_mut());
}
@ -1530,7 +1530,7 @@ pub(crate) fn emit(
// Create the guard bound register
// mov tmp_reg, rsp
let inst = Inst::gen_move(tmp, regs::rsp(), types::I64);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// sub tmp_reg, GUARD_SIZE * probe_count
let inst = Inst::alu_rmi_r(
@ -1539,7 +1539,7 @@ pub(crate) fn emit(
RegMemImm::imm(guard_size * probe_count),
tmp,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Emit the main loop!
let loop_start = sink.get_label();
@ -1552,7 +1552,7 @@ pub(crate) fn emit(
RegMemImm::imm(*guard_size),
Writable::from_reg(regs::rsp()),
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// TODO: `mov [rsp], 0` would be better, but we don't have that instruction
// Probe the stack! We don't use Inst::gen_store_stack here because we need a predictable
@ -1563,7 +1563,7 @@ pub(crate) fn emit(
regs::rsp(),
SyntheticAmode::Real(Amode::imm_reg(0, regs::rsp())),
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Compare and jump if we are not done yet
// cmp rsp, tmp_reg
@ -1572,7 +1572,7 @@ pub(crate) fn emit(
tmp.to_reg(),
RegMemImm::reg(regs::rsp()),
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// jne .loop_start
// TODO: Encoding the JmpIf as a short jump saves us 4 bytes here.
@ -1591,7 +1591,7 @@ pub(crate) fn emit(
RegMemImm::imm(guard_size * probe_count),
Writable::from_reg(regs::rsp()),
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
Inst::CallKnown { dest, opcode, info } => {
@ -1643,7 +1643,7 @@ pub(crate) fn emit(
Inst::JmpUnknown {
target: RegMem::reg(callee),
}
.emit(&[], sink, info, state);
.emit(sink, info, state);
sink.add_call_site(ir::Opcode::ReturnCallIndirect);
}
@ -1824,7 +1824,7 @@ pub(crate) fn emit(
// Load base address of jump table.
let start_of_jumptable = sink.get_label();
let inst = Inst::lea(Amode::rip_relative(start_of_jumptable), tmp1);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Load value out of the jump table. It's a relative offset to the target block, so it
// might be negative; use a sign-extension.
@ -1838,7 +1838,7 @@ pub(crate) fn emit(
)),
tmp2,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Add base of jump table to jump-table-sourced block offset.
let inst = Inst::alu_rmi_r(
@ -1847,11 +1847,11 @@ pub(crate) fn emit(
RegMemImm::reg(tmp2.to_reg()),
tmp1,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Branch to computed address.
let inst = Inst::jmp_unknown(RegMem::reg(tmp1.to_reg()));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Emit jump table (table of 32-bit offsets).
sink.bind_label(start_of_jumptable, state.ctrl_plane_mut());
@ -2977,7 +2977,7 @@ pub(crate) fn emit(
};
let inst = Inst::xmm_cmp_rm_r(cmp_op, dst.to_reg(), RegMem::reg(lhs));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
one_way_jmp(sink, CC::NZ, do_min_max);
one_way_jmp(sink, CC::P, propagate_nan);
@ -2987,24 +2987,24 @@ pub(crate) fn emit(
// case, and are no-ops otherwise.
let op = if *is_min { or_op } else { and_op };
let inst = Inst::xmm_rm_r(op, RegMem::reg(lhs), dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// x86's min/max are not symmetric; if either operand is a NaN, they return the
// read-only operand: perform an addition between the two operands, which has the
// desired NaN propagation effects.
sink.bind_label(propagate_nan, state.ctrl_plane_mut());
let inst = Inst::xmm_rm_r(add_op, RegMem::reg(lhs), dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
one_way_jmp(sink, CC::P, done);
sink.bind_label(do_min_max, state.ctrl_plane_mut());
let inst = Inst::xmm_rm_r(min_max_op, RegMem::reg(lhs), dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(done, state.ctrl_plane_mut());
}
@ -3318,7 +3318,7 @@ pub(crate) fn emit(
// thing.
// TODO use tst src, src here.
let inst = Inst::cmp_rmi_r(OperandSize::Size64, src, RegMemImm::imm(0));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
one_way_jmp(sink, CC::L, handle_negative);
@ -3334,14 +3334,14 @@ pub(crate) fn emit(
);
let inst = Inst::jmp_known(done);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(handle_negative, state.ctrl_plane_mut());
// Divide x by two to get it in range for the signed conversion, keep the LSB, and
// scale it back up on the FP side.
let inst = Inst::gen_move(tmp_gpr1, src, types::I64);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// tmp_gpr1 := src >> 1
let inst = Inst::shift_r(
@ -3351,10 +3351,10 @@ pub(crate) fn emit(
tmp_gpr1.to_reg(),
tmp_gpr1,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::gen_move(tmp_gpr2, src, types::I64);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::alu_rmi_r(
OperandSize::Size64,
@ -3362,7 +3362,7 @@ pub(crate) fn emit(
RegMemImm::imm(1),
tmp_gpr2,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::alu_rmi_r(
OperandSize::Size64,
@ -3370,7 +3370,7 @@ pub(crate) fn emit(
RegMemImm::reg(tmp_gpr1.to_reg()),
tmp_gpr2,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
emit_signed_cvt(
sink,
@ -3387,7 +3387,7 @@ pub(crate) fn emit(
SseOpcode::Addss
};
let inst = Inst::xmm_rm_r(add_op, RegMem::reg(dst.to_reg()), dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(done, state.ctrl_plane_mut());
}
@ -3462,18 +3462,18 @@ pub(crate) fn emit(
// The truncation.
let inst = Inst::xmm_to_gpr(trunc_op, src, dst, *dst_size);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Compare against 1, in case of overflow the dst operand was INT_MIN.
let inst = Inst::cmp_rmi_r(*dst_size, dst.to_reg(), RegMemImm::imm(1));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
one_way_jmp(sink, CC::NO, done); // no overflow => done
// Check for NaN.
let inst = Inst::xmm_cmp_rm_r(cmp_op, src, RegMem::reg(src));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
if *is_saturating {
let not_nan = sink.get_label();
@ -3486,10 +3486,10 @@ pub(crate) fn emit(
RegMemImm::reg(dst.to_reg()),
dst,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(not_nan, state.ctrl_plane_mut());
@ -3497,10 +3497,10 @@ pub(crate) fn emit(
// Zero out tmp_xmm.
let inst = Inst::xmm_rm_r(SseOpcode::Xorpd, RegMem::reg(tmp_xmm.to_reg()), tmp_xmm);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::xmm_cmp_rm_r(cmp_op, tmp_xmm.to_reg(), RegMem::reg(src));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Jump if >= to done.
one_way_jmp(sink, CC::NB, done);
@ -3508,14 +3508,14 @@ pub(crate) fn emit(
// Otherwise, put INT_MAX.
if *dst_size == OperandSize::Size64 {
let inst = Inst::imm(OperandSize::Size64, 0x7fffffffffffffff, dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
} else {
let inst = Inst::imm(OperandSize::Size32, 0x7fffffff, dst);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
} else {
let inst = Inst::trap_if(CC::P, TrapCode::BadConversionToInteger);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// Check if INT_MIN was the correct result: determine the smallest floating point
// number that would convert to INT_MIN, put it in a temporary register, and compare
@ -3529,7 +3529,7 @@ pub(crate) fn emit(
OperandSize::Size32 => {
let cst = Ieee32::pow2(output_bits - 1).neg().bits();
let inst = Inst::imm(OperandSize::Size32, cst as u64, tmp_gpr);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
OperandSize::Size64 => {
// An f64 can represent `i32::min_value() - 1` exactly with precision to spare,
@ -3541,34 +3541,34 @@ pub(crate) fn emit(
Ieee64::pow2(output_bits - 1).neg()
};
let inst = Inst::imm(OperandSize::Size64, cst.bits(), tmp_gpr);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
_ => unreachable!(),
}
let inst =
Inst::gpr_to_xmm(cast_op, RegMem::reg(tmp_gpr.to_reg()), *src_size, tmp_xmm);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::xmm_cmp_rm_r(cmp_op, src, RegMem::reg(tmp_xmm.to_reg()));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// no trap if src >= or > threshold
let inst = Inst::trap_if(no_overflow_cc.invert(), TrapCode::IntegerOverflow);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// If positive, it was a real overflow.
// Zero out the tmp_xmm register.
let inst = Inst::xmm_rm_r(SseOpcode::Xorpd, RegMem::reg(tmp_xmm.to_reg()), tmp_xmm);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::xmm_cmp_rm_r(cmp_op, tmp_xmm.to_reg(), RegMem::reg(src));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
// no trap if 0 >= src
let inst = Inst::trap_if(CC::B, TrapCode::IntegerOverflow);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
sink.bind_label(done, state.ctrl_plane_mut());
@ -3651,13 +3651,13 @@ pub(crate) fn emit(
};
let inst = Inst::imm(*src_size, cst, tmp_gpr);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::gpr_to_xmm(cast_op, RegMem::reg(tmp_gpr.to_reg()), *src_size, tmp_xmm);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::xmm_cmp_rm_r(cmp_op, src, RegMem::reg(tmp_xmm.to_reg()));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let handle_large = sink.get_label();
one_way_jmp(sink, CC::NB, handle_large); // jump to handle_large if src >= large_threshold
@ -3672,25 +3672,25 @@ pub(crate) fn emit(
RegMemImm::reg(dst.to_reg()),
dst,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(not_nan, state.ctrl_plane_mut());
} else {
// Trap.
let inst = Inst::trap_if(CC::P, TrapCode::BadConversionToInteger);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
// Actual truncation for small inputs: if the result is not positive, then we had an
// overflow.
let inst = Inst::xmm_to_gpr(trunc_op, src, dst, *dst_size);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::cmp_rmi_r(*dst_size, dst.to_reg(), RegMemImm::imm(0));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
one_way_jmp(sink, CC::NL, done); // if dst >= 0, jump to done
@ -3703,14 +3703,14 @@ pub(crate) fn emit(
RegMemImm::reg(dst.to_reg()),
dst,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
} else {
// Trap.
let inst = Inst::trap(TrapCode::IntegerOverflow);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
// Now handle large inputs.
@ -3718,16 +3718,16 @@ pub(crate) fn emit(
sink.bind_label(handle_large, state.ctrl_plane_mut());
let inst = Inst::gen_move(tmp_xmm2, src, types::F64);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::xmm_rm_r(sub_op, RegMem::reg(tmp_xmm.to_reg()), tmp_xmm2);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::xmm_to_gpr(trunc_op, tmp_xmm2.to_reg(), dst, *dst_size);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::cmp_rmi_r(*dst_size, dst.to_reg(), RegMemImm::imm(0));
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
if *is_saturating {
let next_is_large = sink.get_label();
@ -3744,19 +3744,19 @@ pub(crate) fn emit(
},
dst,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
sink.bind_label(next_is_large, state.ctrl_plane_mut());
} else {
let inst = Inst::trap_if(CC::L, TrapCode::IntegerOverflow);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
if *dst_size == OperandSize::Size64 {
let inst = Inst::imm(OperandSize::Size64, 1 << 63, tmp_gpr);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
let inst = Inst::alu_rmi_r(
OperandSize::Size64,
@ -3764,7 +3764,7 @@ pub(crate) fn emit(
RegMemImm::reg(tmp_gpr.to_reg()),
dst,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
} else {
let inst = Inst::alu_rmi_r(
OperandSize::Size32,
@ -3772,7 +3772,7 @@ pub(crate) fn emit(
RegMemImm::imm(1 << 31),
dst,
);
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
sink.bind_label(done, state.ctrl_plane_mut());
@ -3897,14 +3897,14 @@ pub(crate) fn emit(
// mov{zbq,zwq,zlq,q} (%r_address), %rax
// No need to call `add_trap` here, since the `i1` emit will do that.
let i1 = Inst::load(*ty, mem.clone(), dst_old, ExtKind::ZeroExtend);
i1.emit(&[], sink, info, state);
i1.emit(sink, info, state);
// again:
sink.bind_label(again_label, state.ctrl_plane_mut());
// movq %rax, %r_temp
let i2 = Inst::mov_r_r(OperandSize::Size64, dst_old.to_reg(), temp);
i2.emit(&[], sink, info, state);
i2.emit(sink, info, state);
let operand_rmi = RegMemImm::reg(operand);
use inst_common::MachAtomicRmwOp as RmwOp;
@ -3912,17 +3912,17 @@ pub(crate) fn emit(
RmwOp::Xchg => {
// movq %r_operand, %r_temp
let i3 = Inst::mov_r_r(OperandSize::Size64, operand, temp);
i3.emit(&[], sink, info, state);
i3.emit(sink, info, state);
}
RmwOp::Nand => {
// andq %r_operand, %r_temp
let i3 =
Inst::alu_rmi_r(OperandSize::Size64, AluRmiROpcode::And, operand_rmi, temp);
i3.emit(&[], sink, info, state);
i3.emit(sink, info, state);
// notq %r_temp
let i4 = Inst::not(OperandSize::Size64, temp);
i4.emit(&[], sink, info, state);
i4.emit(sink, info, state);
}
RmwOp::Umin | RmwOp::Umax | RmwOp::Smin | RmwOp::Smax => {
// cmp %r_temp, %r_operand
@ -3931,7 +3931,7 @@ pub(crate) fn emit(
operand,
RegMemImm::reg(temp.to_reg()),
);
i3.emit(&[], sink, info, state);
i3.emit(sink, info, state);
// cmovcc %r_operand, %r_temp
let cc = match op {
@ -3942,7 +3942,7 @@ pub(crate) fn emit(
_ => unreachable!(),
};
let i4 = Inst::cmove(OperandSize::Size64, cc, RegMem::reg(operand), temp);
i4.emit(&[], sink, info, state);
i4.emit(sink, info, state);
}
_ => {
// opq %r_operand, %r_temp
@ -3960,7 +3960,7 @@ pub(crate) fn emit(
| RmwOp::Smax => unreachable!(),
};
let i3 = Inst::alu_rmi_r(OperandSize::Size64, alu_op, operand_rmi, temp);
i3.emit(&[], sink, info, state);
i3.emit(sink, info, state);
}
}
@ -3973,7 +3973,7 @@ pub(crate) fn emit(
mem: mem.into(),
dst_old,
};
i4.emit(&[], sink, info, state);
i4.emit(sink, info, state);
// jnz again
one_way_jmp(sink, CC::NZ, again_label);
@ -4227,14 +4227,10 @@ fn emit_return_call_common_sequence(
let tmp = call_info.tmp.to_writable_reg();
for u in call_info.uses.iter() {
let _ = u.vreg;
}
for inst in
X64ABIMachineSpec::gen_clobber_restore(CallConv::Tail, &info.flags, state.frame_layout())
{
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
for inst in X64ABIMachineSpec::gen_epilogue_frame_restore(
@ -4243,19 +4239,19 @@ fn emit_return_call_common_sequence(
&info.isa_flags,
state.frame_layout(),
) {
inst.emit(&[], sink, info, state);
inst.emit(sink, info, state);
}
let incoming_args_diff = state.frame_layout().tail_args_size - call_info.new_stack_arg_size;
if incoming_args_diff > 0 {
// Move the saved return address up by `incoming_args_diff`
Inst::mov64_m_r(Amode::imm_reg(0, regs::rsp()), tmp).emit(&[], sink, info, state);
Inst::mov64_m_r(Amode::imm_reg(0, regs::rsp()), tmp).emit(sink, info, state);
Inst::mov_r_m(
OperandSize::Size64,
tmp.to_reg(),
Amode::imm_reg(i32::try_from(incoming_args_diff).unwrap(), regs::rsp()),
)
.emit(&[], sink, info, state);
.emit(sink, info, state);
// Increment the stack pointer to shrink the argument area for the new call.
Inst::alu_rmi_r(
@ -4264,6 +4260,6 @@ fn emit_return_call_common_sequence(
RegMemImm::imm(incoming_args_diff),
Writable::from_reg(regs::rsp()),
)
.emit(&[], sink, info, state);
.emit(sink, info, state);
}
}

4
cranelift/codegen/src/isa/x64/inst/emit_tests.rs

@ -5154,11 +5154,11 @@ fn test_x64_emit() {
let emit_info = EmitInfo::new(flags, isa_flags);
for (insn, expected_encoding, expected_printing) in insns {
// Check the printed text is as expected.
let actual_printing = insn.pretty_print_inst(&[], &mut Default::default());
let actual_printing = insn.pretty_print_inst(&mut Default::default());
assert_eq!(expected_printing, actual_printing);
let mut buffer = MachBuffer::new();
insn.emit(&[], &mut buffer, &emit_info, &mut Default::default());
insn.emit(&mut buffer, &emit_info, &mut Default::default());
// Allow one label just after the instruction (so the offset is 0).
let label = buffer.get_label();

18
cranelift/codegen/src/isa/x64/inst/mod.rs

@ -11,7 +11,7 @@ use crate::isa::{CallConv, FunctionAlignment};
use crate::{machinst::*, trace};
use crate::{settings, CodegenError, CodegenResult};
use alloc::boxed::Box;
use regalloc2::{Allocation, PRegSet};
use regalloc2::PRegSet;
use smallvec::{smallvec, SmallVec};
use std::fmt::{self, Write};
use std::string::{String, ToString};
@ -1885,11 +1885,7 @@ impl PrettyPrint for Inst {
impl fmt::Debug for Inst {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"{}",
self.pretty_print_inst(&[], &mut Default::default())
)
write!(fmt, "{}", self.pretty_print_inst(&mut Default::default()))
}
}
@ -2734,17 +2730,11 @@ impl MachInstEmit for Inst {
type State = EmitState;
type Info = EmitInfo;
fn emit(
&self,
_allocs: &[Allocation],
sink: &mut MachBuffer<Inst>,
info: &Self::Info,
state: &mut Self::State,
) {
fn emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State) {
emit::emit(self, sink, info, state);
}
fn pretty_print_inst(&self, _allocs: &[Allocation], _: &mut Self::State) -> String {
fn pretty_print_inst(&self, _: &mut Self::State) -> String {
PrettyPrint::pretty_print(self, 0)
}
}

70
cranelift/codegen/src/machinst/buffer.rs

@ -2099,7 +2099,7 @@ mod test {
buf.reserve_labels_for_blocks(2);
buf.bind_label(label(0), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(1) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
let buf = buf.finish(&constants, state.ctrl_plane_mut());
assert_eq!(0, buf.total_size());
@ -2120,15 +2120,15 @@ mod test {
taken: target(1),
not_taken: target(2),
};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(3) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(2), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(3) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(3), state.ctrl_plane_mut());
@ -2151,17 +2151,17 @@ mod test {
taken: target(1),
not_taken: target(2),
};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(2), state.ctrl_plane_mut());
let inst = Inst::Udf {
trap_code: TrapCode::Interrupt,
};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(3), state.ctrl_plane_mut());
@ -2173,9 +2173,9 @@ mod test {
kind: CondBrKind::NotZero(xreg(0)),
trap_code: TrapCode::Interrupt,
};
inst.emit(&[], &mut buf2, &info, &mut state);
inst.emit(&mut buf2, &info, &mut state);
let inst = Inst::Nop4;
inst.emit(&[], &mut buf2, &info, &mut state);
inst.emit(&mut buf2, &info, &mut state);
let buf2 = buf2.finish(&constants, state.ctrl_plane_mut());
@ -2197,7 +2197,7 @@ mod test {
taken: target(2),
not_taken: target(3),
};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
while buf.cur_offset() < 2000000 {
@ -2205,16 +2205,16 @@ mod test {
buf.emit_island(0, state.ctrl_plane_mut());
}
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
}
buf.bind_label(label(2), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(3), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
let buf = buf.finish(&constants, state.ctrl_plane_mut());
@ -2244,7 +2244,7 @@ mod test {
// go directly to the target.
not_taken: BranchTarget::ResolvedOffset(2000000 + 4 - 4),
};
inst.emit(&[], &mut buf2, &info, &mut state);
inst.emit(&mut buf2, &info, &mut state);
let buf2 = buf2.finish(&constants, state.ctrl_plane_mut());
@ -2262,16 +2262,16 @@ mod test {
buf.bind_label(label(0), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(2), state.ctrl_plane_mut());
while buf.cur_offset() < 2000000 {
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
}
buf.bind_label(label(3), state.ctrl_plane_mut());
@ -2280,7 +2280,7 @@ mod test {
taken: target(0),
not_taken: target(1),
};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
let buf = buf.finish(&constants, state.ctrl_plane_mut());
@ -2293,11 +2293,11 @@ mod test {
taken: BranchTarget::ResolvedOffset(8),
not_taken: BranchTarget::ResolvedOffset(4 - (2000000 + 4)),
};
inst.emit(&[], &mut buf2, &info, &mut state);
inst.emit(&mut buf2, &info, &mut state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(-(2000000 + 8)),
};
inst.emit(&[], &mut buf2, &info, &mut state);
inst.emit(&mut buf2, &info, &mut state);
let buf2 = buf2.finish(&constants, state.ctrl_plane_mut());
@ -2352,38 +2352,38 @@ mod test {
taken: target(1),
not_taken: target(2),
};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(3) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(2), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
let inst = Inst::Jump { dest: target(0) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(3), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(4) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(4), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(5) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(5), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(7) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(6), state.ctrl_plane_mut());
let inst = Inst::Nop4;
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(7), state.ctrl_plane_mut());
let inst = Inst::Ret {};
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
let buf = buf.finish(&constants, state.ctrl_plane_mut());
@ -2425,23 +2425,23 @@ mod test {
buf.bind_label(label(0), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(1) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(1), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(2) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(2), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(3) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(3), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(4) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
buf.bind_label(label(4), state.ctrl_plane_mut());
let inst = Inst::Jump { dest: target(1) };
inst.emit(&[], &mut buf, &info, &mut state);
inst.emit(&mut buf, &info, &mut state);
let buf = buf.finish(&constants, state.ctrl_plane_mut());

12
cranelift/codegen/src/machinst/mod.rs

@ -56,7 +56,7 @@ use alloc::vec::Vec;
use core::fmt::Debug;
use cranelift_control::ControlPlane;
use cranelift_entity::PrimaryMap;
use regalloc2::{Allocation, VReg};
use regalloc2::VReg;
use smallvec::{smallvec, SmallVec};
use std::string::String;
@ -287,15 +287,9 @@ pub trait MachInstEmit: MachInst {
/// Constant information used in `emit` invocations.
type Info;
/// Emit the instruction.
fn emit(
&self,
allocs: &[Allocation],
code: &mut MachBuffer<Self>,
info: &Self::Info,
state: &mut Self::State,
);
fn emit(&self, code: &mut MachBuffer<Self>, info: &Self::Info, state: &mut Self::State);
/// Pretty-print the instruction.
fn pretty_print_inst(&self, allocs: &[Allocation], state: &mut Self::State) -> String;
fn pretty_print_inst(&self, state: &mut Self::State) -> String;
}
/// A trait describing the emission state carried between MachInsts when

22
cranelift/codegen/src/machinst/vcode.rs

@ -775,20 +775,19 @@ impl<I: VCodeInst> VCode<I> {
while new_offset > buffer.cur_offset() {
// Pad with NOPs up to the aligned block offset.
let nop = I::gen_nop((new_offset - buffer.cur_offset()) as usize);
nop.emit(&[], &mut buffer, &self.emit_info, &mut Default::default());
nop.emit(&mut buffer, &self.emit_info, &mut Default::default());
}
assert_eq!(buffer.cur_offset(), new_offset);
let do_emit = |inst: &I,
allocs: &[Allocation],
disasm: &mut String,
buffer: &mut MachBuffer<I>,
state: &mut I::State| {
if want_disasm && !inst.is_args() {
let mut s = state.clone();
writeln!(disasm, " {}", inst.pretty_print_inst(allocs, &mut s)).unwrap();
writeln!(disasm, " {}", inst.pretty_print_inst(&mut s)).unwrap();
}
inst.emit(allocs, buffer, &self.emit_info, state);
inst.emit(buffer, &self.emit_info, state);
};
// Is this the first block? Emit the prologue directly if so.
@ -796,7 +795,7 @@ impl<I: VCodeInst> VCode<I> {
trace!(" -> entry block");
buffer.start_srcloc(Default::default());
for inst in &self.abi.gen_prologue() {
do_emit(&inst, &[], &mut disasm, &mut buffer, &mut state);
do_emit(&inst, &mut disasm, &mut buffer, &mut state);
}
buffer.end_srcloc();
}
@ -829,7 +828,7 @@ impl<I: VCodeInst> VCode<I> {
self.block_order.is_indirect_branch_target(block),
is_forward_edge_cfi_enabled,
) {
do_emit(&block_start, &[], &mut disasm, &mut buffer, &mut state);
do_emit(&block_start, &mut disasm, &mut buffer, &mut state);
}
for inst_or_edit in regalloc.block_insts_and_edits(&self, block) {
@ -903,7 +902,7 @@ impl<I: VCodeInst> VCode<I> {
// epilogue will contain it).
if self.insts[iix.index()].is_term() == MachTerminator::Ret {
for inst in self.abi.gen_epilogue() {
do_emit(&inst, &[], &mut disasm, &mut buffer, &mut state);
do_emit(&inst, &mut disasm, &mut buffer, &mut state);
}
} else {
// Update the operands for this inst using the
@ -929,7 +928,6 @@ impl<I: VCodeInst> VCode<I> {
// Emit the instruction!
do_emit(
&self.insts[iix.index()],
&[],
&mut disasm,
&mut buffer,
&mut state,
@ -948,21 +946,21 @@ impl<I: VCodeInst> VCode<I> {
debug_assert_eq!(from.class(), to.class());
let ty = I::canonical_type_for_rc(from.class());
let mv = I::gen_move(to_rreg, from_rreg, ty);
do_emit(&mv, &[], &mut disasm, &mut buffer, &mut state);
do_emit(&mv, &mut disasm, &mut buffer, &mut state);
}
(Some(from), None) => {
// Spill from register to spillslot.
let to = to.as_stack().unwrap();
let from_rreg = RealReg::from(from);
let spill = self.abi.gen_spill(to, from_rreg);
do_emit(&spill, &[], &mut disasm, &mut buffer, &mut state);
do_emit(&spill, &mut disasm, &mut buffer, &mut state);
}
(None, Some(to)) => {
// Load from spillslot to register.
let from = from.as_stack().unwrap();
let to_rreg = Writable::from_reg(RealReg::from(to));
let reload = self.abi.gen_reload(to_rreg, from);
do_emit(&reload, &[], &mut disasm, &mut buffer, &mut state);
do_emit(&reload, &mut disasm, &mut buffer, &mut state);
}
(None, None) => {
panic!("regalloc2 should have eliminated stack-to-stack moves!");
@ -1374,7 +1372,7 @@ impl<I: VCodeInst> fmt::Debug for VCode<I> {
f,
" Inst {}: {}",
inst,
self.insts[inst].pretty_print_inst(&[], &mut state)
self.insts[inst].pretty_print_inst(&mut state)
)?;
if !self.operands.is_empty() {
for operand in self.inst_operands(InsnIndex::new(inst)) {

2
winch/codegen/src/isa/aarch64/asm.rs

@ -72,7 +72,7 @@ impl Assembler {
}
fn emit(&mut self, inst: Inst) {
inst.emit(&[], &mut self.buffer, &self.emit_info, &mut self.emit_state);
inst.emit(&mut self.buffer, &self.emit_info, &mut self.emit_state);
}
/// Load a constant into a register.

2
winch/codegen/src/isa/x64/asm.rs

@ -220,7 +220,7 @@ impl Assembler {
}
fn emit(&mut self, inst: Inst) {
inst.emit(&[], &mut self.buffer, &self.emit_info, &mut self.emit_state);
inst.emit(&mut self.buffer, &self.emit_info, &mut self.emit_state);
}
fn to_synthetic_amode(

Loading…
Cancel
Save