Browse Source

Honour the emit_all_ones_funcaddrs() settings when creating unpatched locations;

pull/1538/head
Benjamin Bouvier 5 years ago
parent
commit
a7ca37e493
  1. 57
      cranelift/codegen/src/isa/aarch64/inst/emit.rs
  2. 2
      cranelift/codegen/src/machinst/mod.rs
  3. 11
      cranelift/codegen/src/machinst/vcode.rs

57
cranelift/codegen/src/isa/aarch64/inst/emit.rs

@ -315,7 +315,7 @@ fn enc_fround(top22: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
}
impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
fn emit(&self, sink: &mut O) {
fn emit(&self, sink: &mut O, flags: &settings::Flags) {
match self {
&Inst::AluRRR { alu_op, rd, rn, rm } => {
let top11 = match alu_op {
@ -582,7 +582,7 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
let (mem_insts, mem) = mem_finalize(sink.cur_offset_from_start(), mem);
for inst in mem_insts.into_iter() {
inst.emit(sink);
inst.emit(sink, flags);
}
// ldst encoding helpers take Reg, not Writable<Reg>.
@ -725,7 +725,7 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
let (mem_insts, mem) = mem_finalize(sink.cur_offset_from_start(), mem);
for inst in mem_insts.into_iter() {
inst.emit(sink);
inst.emit(sink, flags);
}
let op = match self {
@ -949,11 +949,11 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink);
inst.emit(sink, flags);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(8),
};
inst.emit(sink);
inst.emit(sink, flags);
sink.put4(const_data.to_bits());
}
&Inst::LoadFpuConst64 { rd, const_data } => {
@ -962,11 +962,11 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink);
inst.emit(sink, flags);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(sink);
inst.emit(sink, flags);
sink.put8(const_data.to_bits());
}
&Inst::FpuCSel32 { rd, rn, rm, cond } => {
@ -1053,7 +1053,7 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
if top22 != 0 {
sink.put4(enc_extend(top22, rd, rn));
} else {
Inst::mov32(rd, rn).emit(sink);
Inst::mov32(rd, rn).emit(sink, flags);
}
}
&Inst::Extend {
@ -1076,7 +1076,7 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
rn: zero_reg(),
rm: rd.to_reg(),
};
sub_inst.emit(sink);
sub_inst.emit(sink, flags);
}
&Inst::Extend {
rd,
@ -1217,13 +1217,13 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
// Save index in a tmp (the live range of ridx only goes to start of this
// sequence; rtmp1 or rtmp2 may overwrite it).
let inst = Inst::gen_move(rtmp2, ridx, I64);
inst.emit(sink);
inst.emit(sink, flags);
// Load address of jump table
let inst = Inst::Adr {
rd: rtmp1,
label: MemLabel::PCRel(16),
};
inst.emit(sink);
inst.emit(sink, flags);
// Load value out of jump table
let inst = Inst::SLoad32 {
rd: rtmp2,
@ -1235,7 +1235,7 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink);
inst.emit(sink, flags);
// Add base of jump table to jump-table-sourced block offset
let inst = Inst::AluRRR {
alu_op: ALUOp::Add64,
@ -1243,14 +1243,14 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
rn: rtmp1.to_reg(),
rm: rtmp2.to_reg(),
};
inst.emit(sink);
inst.emit(sink, flags);
// Branch to computed address. (`targets` here is only used for successor queries
// and is not needed for emission.)
let inst = Inst::IndirectBr {
rn: rtmp1.to_reg(),
targets: vec![],
};
inst.emit(sink);
inst.emit(sink, flags);
// Emit jump table (table of 32-bit offsets).
for target in targets {
let off = target.as_offset_words() * 4;
@ -1266,11 +1266,11 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink);
inst.emit(sink, flags);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(sink);
inst.emit(sink, flags);
sink.put8(const_data);
}
&Inst::LoadExtName {
@ -1284,13 +1284,17 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink);
inst.emit(sink, flags);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(sink);
inst.emit(sink, flags);
sink.add_reloc(srcloc, Reloc::Abs8, name, offset);
sink.put8(0);
if flags.emit_all_ones_funcaddrs() {
sink.put8(u64::max_value());
} else {
sink.put8(0);
}
}
&Inst::LoadAddr { rd, ref mem } => match *mem {
MemArg::FPOffset(fp_off) => {
@ -1307,12 +1311,12 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
imm12,
rn: fp_reg(),
};
inst.emit(sink);
inst.emit(sink, flags);
} else {
let const_insts =
Inst::load_constant(rd, u64::try_from(fp_off.abs()).unwrap());
for inst in const_insts {
inst.emit(sink);
inst.emit(sink, flags);
}
let inst = Inst::AluRRR {
alu_op,
@ -1320,7 +1324,7 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
rn: fp_reg(),
rm: rd.to_reg(),
};
inst.emit(sink);
inst.emit(sink, flags);
}
}
_ => unimplemented!("{:?}", mem),
@ -1330,14 +1334,14 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
rd,
rm: xreg(PINNED_REG),
};
inst.emit(sink);
inst.emit(sink, flags);
}
&Inst::SetPinnedReg { rm } => {
let inst = Inst::Mov {
rd: Writable::from_reg(xreg(PINNED_REG)),
rm,
};
inst.emit(sink);
inst.emit(sink, flags);
}
}
}
@ -1351,6 +1355,7 @@ mod test {
#[test]
fn test_aarch64_binemit() {
let flags = settings::Flags::new(settings::builder());
let mut insns = Vec::<(Inst, &str, &str)>::new();
// N.B.: the architecture is little-endian, so when transcribing the 32-bit
@ -4165,7 +4170,7 @@ mod test {
// Check the encoding is as expected.
let text_size = {
let mut code_sec = MachSectionSize::new(0);
insn.emit(&mut code_sec);
insn.emit(&mut code_sec, &flags);
code_sec.size()
};
@ -4173,7 +4178,7 @@ mod test {
let mut sections = MachSections::new();
let code_idx = sections.add_section(0, text_size);
let code_sec = sections.get_section(code_idx);
insn.emit(code_sec);
insn.emit(code_sec, &flags);
sections.emit(&mut sink);
let actual_encoding = &sink.stringify();
assert_eq!(expected_encoding, actual_encoding);

2
cranelift/codegen/src/machinst/mod.rs

@ -218,7 +218,7 @@ pub enum MachTerminator<'a> {
/// A trait describing the ability to encode a MachInst into binary machine code.
pub trait MachInstEmit<O: MachSectionOutput> {
/// Emit the instruction.
fn emit(&self, code: &mut O);
fn emit(&self, code: &mut O, flags: &Flags);
}
/// The result of a `MachBackend::compile_function()` call. Contains machine

11
cranelift/codegen/src/machinst/vcode.rs

@ -468,6 +468,8 @@ impl<I: VCodeInst> VCode<I> {
}
}
let flags = self.abi.flags();
// Compute block offsets.
let mut code_section = MachSectionSize::new(0);
let mut block_offsets = vec![0; self.num_blocks()];
@ -476,7 +478,7 @@ impl<I: VCodeInst> VCode<I> {
block_offsets[block as usize] = code_section.offset;
let (start, end) = self.block_ranges[block as usize];
for iix in start..end {
self.insts[iix as usize].emit(&mut code_section);
self.insts[iix as usize].emit(&mut code_section, flags);
}
}
@ -495,7 +497,7 @@ impl<I: VCodeInst> VCode<I> {
for iix in start..end {
self.insts[iix as usize]
.with_block_offsets(code_section.offset, &self.final_block_offsets[..]);
self.insts[iix as usize].emit(&mut code_section);
self.insts[iix as usize].emit(&mut code_section, flags);
}
}
}
@ -509,18 +511,19 @@ impl<I: VCodeInst> VCode<I> {
let code_idx = sections.add_section(0, self.code_size);
let code_section = sections.get_section(code_idx);
let flags = self.abi.flags();
for &block in &self.final_block_order {
let new_offset = I::align_basic_block(code_section.cur_offset_from_start());
while new_offset > code_section.cur_offset_from_start() {
// Pad with NOPs up to the aligned block offset.
let nop = I::gen_nop((new_offset - code_section.cur_offset_from_start()) as usize);
nop.emit(code_section);
nop.emit(code_section, flags);
}
assert_eq!(code_section.cur_offset_from_start(), new_offset);
let (start, end) = self.block_ranges[block as usize];
for iix in start..end {
self.insts[iix as usize].emit(code_section);
self.insts[iix as usize].emit(code_section, flags);
}
}

Loading…
Cancel
Save