Browse Source

cranelift: Introduce a feature to enable `trace` logs (#4484)

* Don't use `log::trace` directly but a feature-enabled `trace` macro
* Don't emit disassembly based on the log level
pull/4560/head
Benjamin Bouvier 2 years ago
committed by GitHub
parent
commit
8d0224341c
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      cranelift/codegen/Cargo.toml
  2. 19
      cranelift/codegen/src/alias_analysis.rs
  3. 4
      cranelift/codegen/src/ir/layout.rs
  4. 5
      cranelift/codegen/src/isa/aarch64/inst/emit.rs
  5. 14
      cranelift/codegen/src/isa/aarch64/lower.rs
  6. 1
      cranelift/codegen/src/isa/aarch64/mod.rs
  7. 3
      cranelift/codegen/src/isa/s390x/inst/emit.rs
  8. 1
      cranelift/codegen/src/isa/s390x/mod.rs
  9. 2
      cranelift/codegen/src/isa/x64/inst/emit.rs
  10. 4
      cranelift/codegen/src/isa/x64/inst/mod.rs
  11. 3
      cranelift/codegen/src/isa/x64/lower.rs
  12. 1
      cranelift/codegen/src/isa/x64/mod.rs
  13. 11
      cranelift/codegen/src/lib.rs
  14. 18
      cranelift/codegen/src/machinst/abi_impl.rs
  15. 6
      cranelift/codegen/src/machinst/blockorder.rs
  16. 2
      cranelift/codegen/src/machinst/buffer.rs
  17. 3
      cranelift/codegen/src/machinst/compile.rs
  18. 62
      cranelift/codegen/src/machinst/lower.rs
  19. 7
      cranelift/codegen/src/machinst/vcode.rs
  20. 6
      cranelift/codegen/src/unreachable_code.rs

3
cranelift/codegen/Cargo.toml

@ -54,6 +54,9 @@ core = ["hashbrown"]
# can significantly increase the size of the library.
testing_hooks = []
# Enables detailed logging which can be somewhat expensive.
trace-log = []
# This enables unwind info generation functionality.
unwind = ["gimli"]

19
cranelift/codegen/src/alias_analysis.rs

@ -69,6 +69,7 @@ use crate::{
has_memory_fence_semantics, inst_addr_offset_type, inst_store_data, visit_block_succs,
},
ir::{immediates::Offset32, Block, Function, Inst, Opcode, Type, Value},
trace,
};
use cranelift_entity::{packed_option::PackedOption, EntityRef};
@ -198,7 +199,7 @@ pub struct AliasAnalysis<'a> {
impl<'a> AliasAnalysis<'a> {
/// Perform an alias analysis pass.
pub fn new(func: &'a mut Function, domtree: &'a DominatorTree) -> AliasAnalysis<'a> {
log::trace!("alias analysis: input is:\n{:?}", func);
trace!("alias analysis: input is:\n{:?}", func);
let mut analysis = AliasAnalysis {
func,
domtree,
@ -225,7 +226,7 @@ impl<'a> AliasAnalysis<'a> {
.or_insert_with(|| LastStores::default())
.clone();
log::trace!(
trace!(
"alias analysis: input to block{} is {:?}",
block.index(),
state
@ -233,7 +234,7 @@ impl<'a> AliasAnalysis<'a> {
for inst in self.func.layout.block_insts(block) {
state.update(self.func, inst);
log::trace!("after inst{}: state is {:?}", inst.index(), state);
trace!("after inst{}: state is {:?}", inst.index(), state);
}
visit_block_succs(self.func, block, |_inst, succ| {
@ -279,7 +280,7 @@ impl<'a> AliasAnalysis<'a> {
.unwrap_or_else(|| LastStores::default());
while let Some(inst) = pos.next_inst() {
log::trace!(
trace!(
"alias analysis: scanning at inst{} with state {:?} ({:?})",
inst.index(),
state,
@ -300,7 +301,7 @@ impl<'a> AliasAnalysis<'a> {
ty,
extending_opcode: get_ext_opcode(opcode),
};
log::trace!(
trace!(
"alias analysis: at inst{}: store with data v{} at loc {:?}",
inst.index(),
store_data.index(),
@ -317,7 +318,7 @@ impl<'a> AliasAnalysis<'a> {
ty,
extending_opcode: get_ext_opcode(opcode),
};
log::trace!(
trace!(
"alias analysis: at inst{}: load with last_store inst{} at loc {:?}",
inst.index(),
last_store.map(|inst| inst.index()).unwrap_or(usize::MAX),
@ -337,13 +338,13 @@ impl<'a> AliasAnalysis<'a> {
let aliased = if let Some((def_inst, value)) =
self.mem_values.get(&mem_loc).cloned()
{
log::trace!(
trace!(
" -> sees known value v{} from inst{}",
value.index(),
def_inst.index()
);
if self.domtree.dominates(def_inst, inst, &pos.func.layout) {
log::trace!(
trace!(
" -> dominates; value equiv from v{} to v{} inserted",
load_result.index(),
value.index()
@ -363,7 +364,7 @@ impl<'a> AliasAnalysis<'a> {
// Otherwise, we can keep *this* load around
// as a new equivalent value.
if !aliased {
log::trace!(
trace!(
" -> inserting load result v{} at loc {:?}",
load_result.index(),
mem_loc

4
cranelift/codegen/src/ir/layout.rs

@ -8,7 +8,7 @@ use crate::ir::dfg::DataFlowGraph;
use crate::ir::progpoint::{ExpandedProgramPoint, ProgramOrder};
use crate::ir::{Block, Inst};
use crate::packed_option::PackedOption;
use crate::timing;
use crate::{timing, trace};
use core::cmp;
use core::iter::{IntoIterator, Iterator};
@ -327,7 +327,7 @@ impl Layout {
next_inst = self.insts[inst].next.expand();
}
}
log::trace!("Renumbered {} program points", seq / MAJOR_STRIDE);
trace!("Renumbered {} program points", seq / MAJOR_STRIDE);
}
}

5
cranelift/codegen/src/isa/aarch64/inst/emit.rs

@ -7,6 +7,7 @@ use crate::ir::types::*;
use crate::ir::{LibCall, MemFlags, TrapCode};
use crate::isa::aarch64::inst::*;
use crate::machinst::{ty_bits, Reg, RegClass, Writable};
use crate::trace;
use core::convert::TryFrom;
/// Memory label/reference finalization: convert a MemLabel to a PC-relative
@ -39,7 +40,7 @@ pub fn mem_finalize(
};
let adj = match mem {
&AMode::NominalSPOffset(..) => {
log::trace!(
trace!(
"mem_finalize: nominal SP offset {} + adj {} -> {}",
off,
state.virtual_sp_offset,
@ -3068,7 +3069,7 @@ impl MachInstEmit for Inst {
}
}
&Inst::VirtualSPOffsetAdj { offset } => {
log::trace!(
trace!(
"virtual sp offset adjusted by {} -> {}",
offset,
state.virtual_sp_offset + offset,

14
cranelift/codegen/src/isa/aarch64/lower.rs

@ -16,8 +16,8 @@ use crate::ir::{Opcode, Type, Value};
use crate::isa::aarch64::inst::*;
use crate::isa::aarch64::AArch64Backend;
use crate::machinst::lower::*;
use crate::machinst::*;
use crate::machinst::{Reg, Writable};
use crate::{machinst::*, trace};
use crate::{CodegenError, CodegenResult};
use smallvec::SmallVec;
use std::cmp;
@ -236,7 +236,7 @@ fn lower_value_to_regs<C: LowerCtx<I = Inst>>(
ctx: &mut C,
value: Value,
) -> (ValueRegs<Reg>, Type, bool) {
log::trace!("lower_value_to_regs: value {:?}", value);
trace!("lower_value_to_regs: value {:?}", value);
let ty = ctx.value_ty(value);
let inputs = ctx.get_value_as_source_or_const(value);
let is_const = inputs.constant.is_some();
@ -608,7 +608,7 @@ pub(crate) fn lower_pair_address<C: LowerCtx<I = Inst>>(
let (mut addends64, mut addends32, args_offset) = collect_address_addends(ctx, roots);
let offset = args_offset + (offset as i64);
log::trace!(
trace!(
"lower_pair_address: addends64 {:?}, addends32 {:?}, offset {}",
addends64,
addends32,
@ -669,7 +669,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
let (mut addends64, mut addends32, args_offset) = collect_address_addends(ctx, roots);
let mut offset = args_offset + (offset as i64);
log::trace!(
trace!(
"lower_address: addends64 {:?}, addends32 {:?}, offset {}",
addends64,
addends32,
@ -1120,7 +1120,7 @@ pub(crate) fn maybe_input_insn<C: LowerCtx<I = Inst>>(
op: Opcode,
) -> Option<IRInst> {
let inputs = c.get_input_as_source_or_const(input.insn, input.input);
log::trace!(
trace!(
"maybe_input_insn: input {:?} has options {:?}; looking for op {:?}",
input,
inputs,
@ -1128,7 +1128,7 @@ pub(crate) fn maybe_input_insn<C: LowerCtx<I = Inst>>(
);
if let Some((src_inst, _)) = inputs.inst.as_inst() {
let data = c.data(src_inst);
log::trace!(" -> input inst {:?}", data);
trace!(" -> input inst {:?}", data);
if data.opcode() == op {
return Some(src_inst);
}
@ -1228,7 +1228,7 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
condcode: IntCC,
output: IcmpOutput,
) -> CodegenResult<IcmpResult> {
log::trace!(
trace!(
"lower_icmp: insn {}, condcode: {}, output: {:?}",
insn,
condcode,

1
cranelift/codegen/src/isa/aarch64/mod.rs

@ -73,7 +73,6 @@ impl TargetIsa for AArch64Backend {
let flags = self.flags();
let (vcode, regalloc_result) = self.compile_vcode(func, flags.clone())?;
let want_disasm = want_disasm || log::log_enabled!(log::Level::Debug);
let emit_result = vcode.emit(&regalloc_result, want_disasm, flags.machine_code_cfg_info());
let frame_size = emit_result.frame_size;
let value_labels_ranges = emit_result.value_labels_ranges;

3
cranelift/codegen/src/isa/s390x/inst/emit.rs

@ -7,6 +7,7 @@ use crate::isa::s390x::inst::*;
use crate::isa::s390x::settings as s390x_settings;
use crate::machinst::reg::count_operands;
use crate::machinst::{Reg, RegClass};
use crate::trace;
use core::convert::TryFrom;
use regalloc2::Allocation;
@ -3239,7 +3240,7 @@ impl MachInstEmit for Inst {
}
&Inst::VirtualSPOffsetAdj { offset } => {
log::trace!(
trace!(
"virtual sp offset adjusted by {} -> {}",
offset,
state.virtual_sp_offset + offset

1
cranelift/codegen/src/isa/s390x/mod.rs

@ -72,7 +72,6 @@ impl TargetIsa for S390xBackend {
let flags = self.flags();
let (vcode, regalloc_result) = self.compile_vcode(func, flags.clone())?;
let want_disasm = want_disasm || log::log_enabled!(log::Level::Debug);
let emit_result = vcode.emit(&regalloc_result, want_disasm, flags.machine_code_cfg_info());
let frame_size = emit_result.frame_size;
let value_labels_ranges = emit_result.value_labels_ranges;

2
cranelift/codegen/src/isa/x64/inst/emit.rs

@ -2789,7 +2789,7 @@ pub(crate) fn emit(
}
Inst::VirtualSPOffsetAdj { offset } => {
log::trace!(
trace!(
"virtual sp offset adjusted by {} -> {}",
offset,
state.virtual_sp_offset + offset

4
cranelift/codegen/src/isa/x64/inst/mod.rs

@ -6,7 +6,7 @@ use crate::isa::x64::abi::X64ABIMachineSpec;
use crate::isa::x64::inst::regs::pretty_print_reg;
use crate::isa::x64::settings as x64_settings;
use crate::isa::CallConv;
use crate::machinst::*;
use crate::{machinst::*, trace};
use crate::{settings, CodegenError, CodegenResult};
use alloc::boxed::Box;
use alloc::vec::Vec;
@ -2236,7 +2236,7 @@ impl MachInst for Inst {
}
fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst {
log::trace!(
trace!(
"Inst::gen_move {:?} -> {:?} (type: {:?})",
src_reg,
dst_reg.to_reg(),

3
cranelift/codegen/src/isa/x64/lower.rs

@ -14,11 +14,10 @@ use crate::isa::x64::inst::args::*;
use crate::isa::x64::inst::*;
use crate::isa::{x64::settings as x64_settings, x64::X64Backend, CallConv};
use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::{Flags, TlsModel};
use crate::{machinst::*, trace};
use alloc::boxed::Box;
use log::trace;
use smallvec::SmallVec;
use std::convert::TryFrom;
use target_lexicon::Triple;

1
cranelift/codegen/src/isa/x64/mod.rs

@ -67,7 +67,6 @@ impl TargetIsa for X64Backend {
let flags = self.flags();
let (vcode, regalloc_result) = self.compile_vcode(func, flags.clone())?;
let want_disasm = want_disasm || log::log_enabled!(log::Level::Debug);
let emit_result = vcode.emit(&regalloc_result, want_disasm, flags.machine_code_cfg_info());
let frame_size = emit_result.frame_size;
let value_labels_ranges = emit_result.value_labels_ranges;

11
cranelift/codegen/src/lib.rs

@ -116,4 +116,15 @@ mod souper_harvest;
pub use crate::result::{CodegenError, CodegenResult};
/// Even when trace logging is disabled, the trace macro has a significant performance cost so we
/// disable it by default.
#[macro_export]
macro_rules! trace {
($($tt:tt)*) => {
if cfg!(feature = "trace-log") {
::log::trace!($($tt)*);
}
};
}
include!(concat!(env!("OUT_DIR"), "/version.rs"));

18
cranelift/codegen/src/machinst/abi_impl.rs

@ -128,10 +128,10 @@ use crate::binemit::StackMap;
use crate::ir::types::*;
use crate::ir::{ArgumentExtension, ArgumentPurpose, DynamicStackSlot, Signature, StackSlot};
use crate::isa::TargetIsa;
use crate::machinst::*;
use crate::settings;
use crate::CodegenResult;
use crate::{ir, isa};
use crate::{machinst::*, trace};
use alloc::vec::Vec;
use regalloc2::{PReg, PRegSet};
use smallvec::{smallvec, SmallVec};
@ -596,7 +596,7 @@ impl ABISig {
}
}
log::trace!(
trace!(
"ABISig: sig {:?} => args = {:?} rets = {:?} arg stack = {} ret stack = {} stack_ret_arg = {:?} copy_to_arg_order = {:?}",
sig,
args,
@ -791,7 +791,7 @@ fn get_special_purpose_param_register(
impl<M: ABIMachineSpec> ABICalleeImpl<M> {
/// Create a new body ABI instance.
pub fn new(f: &ir::Function, isa: &dyn TargetIsa) -> CodegenResult<Self> {
log::trace!("ABI: func signature {:?}", f.signature);
trace!("ABI: func signature {:?}", f.signature);
let flags = isa.flags().clone();
let ir_sig = ensure_struct_return_ptr_is_returned(&f.signature);
@ -1265,14 +1265,14 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
if let Some(i) = self.sig.stack_ret_arg {
let insts = self.gen_copy_arg_to_regs(i, ValueRegs::one(self.ret_area_ptr.unwrap()));
let inst = insts.into_iter().next().unwrap();
log::trace!(
trace!(
"gen_retval_area_setup: inst {:?}; ptr reg is {:?}",
inst,
self.ret_area_ptr.unwrap().to_reg()
);
Some(inst)
} else {
log::trace!("gen_retval_area_setup: not needed");
trace!("gen_retval_area_setup: not needed");
None
}
}
@ -1347,7 +1347,7 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
let islot = slot.index() as i64;
let spill_off = islot * M::word_bytes() as i64;
let sp_off = self.stackslots_size as i64 + spill_off;
log::trace!("load_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
trace!("load_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
gen_load_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), into_regs, ty)
}
@ -1363,7 +1363,7 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
let islot = slot.index() as i64;
let spill_off = islot * M::word_bytes() as i64;
let sp_off = self.stackslots_size as i64 + spill_off;
log::trace!("store_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
trace!("store_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
gen_store_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), from_regs, ty)
}
@ -1376,7 +1376,7 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
let virtual_sp_offset = M::get_virtual_sp_offset_from_state(state);
let nominal_sp_to_fp = M::get_nominal_sp_to_fp(state);
assert!(virtual_sp_offset >= 0);
log::trace!(
trace!(
"spillslots_to_stackmap: slots = {:?}, state = {:?}",
slots,
state
@ -1508,7 +1508,7 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
insts.push(M::gen_ret(vec![]));
}
log::trace!("Epilogue: {:?}", insts);
trace!("Epilogue: {:?}", insts);
insts
}

6
cranelift/codegen/src/machinst/blockorder.rs

@ -73,7 +73,7 @@ use crate::entity::SecondaryMap;
use crate::fx::{FxHashMap, FxHashSet};
use crate::inst_predicates::visit_block_succs;
use crate::ir::{Block, Function, Inst, Opcode};
use crate::machinst::*;
use crate::{machinst::*, trace};
use smallvec::SmallVec;
@ -214,7 +214,7 @@ impl LoweredBlock {
impl BlockLoweringOrder {
/// Compute and return a lowered block order for `f`.
pub fn new(f: &Function) -> BlockLoweringOrder {
log::trace!("BlockLoweringOrder: function body {:?}", f);
trace!("BlockLoweringOrder: function body {:?}", f);
// Step 1: compute the in-edge and out-edge count of every block.
let mut block_in_count = SecondaryMap::with_default(0);
@ -477,7 +477,7 @@ impl BlockLoweringOrder {
orig_map,
cold_blocks,
};
log::trace!("BlockLoweringOrder: {:?}", result);
trace!("BlockLoweringOrder: {:?}", result);
result
}

2
cranelift/codegen/src/machinst/buffer.rs

@ -147,8 +147,8 @@ use crate::machinst::{
BlockIndex, MachInstLabelUse, TextSectionBuilder, VCodeConstant, VCodeConstants, VCodeInst,
};
use crate::timing;
use crate::trace;
use cranelift_entity::{entity_impl, SecondaryMap};
use log::trace;
use smallvec::SmallVec;
use std::convert::TryFrom;
use std::mem;

3
cranelift/codegen/src/machinst/compile.rs

@ -4,6 +4,7 @@ use crate::ir::Function;
use crate::isa::TargetIsa;
use crate::machinst::*;
use crate::timing;
use crate::trace;
use regalloc2::RegallocOptions;
use regalloc2::{self, MachineEnv};
@ -27,7 +28,7 @@ pub fn compile<B: LowerBackend + TargetIsa>(
lower.lower(b)?
};
log::trace!("vcode from lowering: \n{:?}", vcode);
trace!("vcode from lowering: \n{:?}", vcode);
// Perform register allocation.
let regalloc_result = {

62
cranelift/codegen/src/machinst/lower.rs

@ -20,7 +20,7 @@ use crate::machinst::{
LoweredBlock, MachLabel, Reg, VCode, VCodeBuilder, VCodeConstant, VCodeConstantData,
VCodeConstants, VCodeInst, ValueRegs, Writable,
};
use crate::CodegenResult;
use crate::{trace, CodegenResult};
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::convert::TryInto;
@ -506,7 +506,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
if value_regs[param].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[param] = regs;
log::trace!("bb {} param {}: regs {:?}", bb, param, regs);
trace!("bb {} param {}: regs {:?}", bb, param, regs);
}
}
for inst in f.layout.block_insts(bb) {
@ -515,7 +515,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
if value_regs[result].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[result] = regs;
log::trace!(
trace!(
"bb {} inst {} ({:?}): result {} regs {:?}",
bb,
inst,
@ -543,7 +543,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
for ret in &vcode.abi().signature().returns.clone() {
let regs = alloc_vregs(ret.value_type, &mut next_vreg, &mut vcode)?;
retval_regs.push(regs);
log::trace!("retval gets regs {:?}", regs);
trace!("retval gets regs {:?}", regs);
}
// Compute instruction colors, find constant instructions, and find instructions with
@ -557,16 +557,16 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
for inst in f.layout.block_insts(bb) {
let side_effect = has_lowering_side_effect(f, inst);
log::trace!("bb {} inst {} has color {}", bb, inst, cur_color);
trace!("bb {} inst {} has color {}", bb, inst, cur_color);
if side_effect {
side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
log::trace!(" -> side-effecting; incrementing color for next inst");
trace!(" -> side-effecting; incrementing color for next inst");
cur_color += 1;
}
// Determine if this is a constant; if so, add to the table.
if let Some(c) = is_constant_64bit(f, inst) {
log::trace!(" -> constant: {}", c);
trace!(" -> constant: {}", c);
inst_constants.insert(inst, c);
}
}
@ -627,7 +627,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
// Push args for a given inst onto the DFS stack.
let push_args_on_stack = |stack: &mut StackVec<'a>, value| {
log::trace!(" -> pushing args for {} onto stack", value);
trace!(" -> pushing args for {} onto stack", value);
if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
stack.push(f.dfg.inst_args(src_inst).iter());
}
@ -640,7 +640,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
while let Some(iter) = stack.last_mut() {
if let Some(&value) = iter.next() {
let value = f.dfg.resolve_aliases(value);
log::trace!(" -> DFS reaches {}", value);
trace!(" -> DFS reaches {}", value);
if value_ir_uses[value] == ValueUseState::Multiple {
// Truncate DFS here: no need to go further,
// as whole subtree must already be Multiple.
@ -658,7 +658,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
continue;
}
value_ir_uses[value] = ValueUseState::Multiple;
log::trace!(" -> became Multiple");
trace!(" -> became Multiple");
push_args_on_stack(stack, value);
} else {
// Empty iterator, discard.
@ -683,7 +683,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
let arg = f.dfg.resolve_aliases(arg);
let old = value_ir_uses[arg];
if force_multiple {
log::trace!(
trace!(
"forcing arg {} to Multiple because of multiple results of user inst",
arg
);
@ -692,7 +692,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
value_ir_uses[arg].inc();
}
let new = value_ir_uses[arg];
log::trace!("arg {} used, old state {:?}, new {:?}", arg, old, new,);
trace!("arg {} used, old state {:?}, new {:?}", arg, old, new,);
// On transition to Multiple, do DFS.
if old != ValueUseState::Multiple && new == ValueUseState::Multiple {
push_args_on_stack(&mut stack, arg);
@ -706,7 +706,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
fn gen_arg_setup(&mut self) {
if let Some(entry_bb) = self.f.layout.entry_block() {
log::trace!(
trace!(
"gen_arg_setup: entry BB {} args are:\n{:?}",
entry_bb,
self.f.dfg.block_params(entry_bb)
@ -830,7 +830,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
}
// Are any outputs used at least once?
let value_needed = self.is_any_inst_result_needed(inst);
log::trace!(
trace!(
"lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
block,
inst,
@ -860,7 +860,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
// Normal instruction: codegen if the instruction is side-effecting
// or any of its outputs its used.
if has_side_effect || value_needed {
log::trace!("lowering: inst {}: {:?}", inst, self.f.dfg[inst]);
trace!("lowering: inst {}: {:?}", inst, self.f.dfg[inst]);
backend.lower(self, inst)?;
}
if data.opcode().is_return() {
@ -905,7 +905,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
if let Some(ref values_labels) = self.f.dfg.values_labels {
log::trace!(
trace!(
"get_value_labels: val {} -> {} -> {:?}",
val,
self.f.dfg.resolve_aliases(val),
@ -937,7 +937,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
.map(|&ValueLabelStart { label, .. }| label)
.collect::<FxHashSet<_>>();
for label in labels {
log::trace!(
trace!(
"value labeling: defines val {:?} -> reg {:?} -> label {:?}",
val,
reg,
@ -953,7 +953,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
return;
}
log::trace!(
trace!(
"value labeling: srcloc {}: inst {}",
self.srcloc(inst),
inst
@ -968,7 +968,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
return;
}
log::trace!("value labeling: block {}", block);
trace!("value labeling: block {}", block);
for &arg in self.f.dfg.block_params(block) {
self.emit_value_label_marks_for_value(arg);
}
@ -999,7 +999,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
branches: &SmallVec<[Inst; 2]>,
targets: &SmallVec<[MachLabel; 2]>,
) -> CodegenResult<()> {
log::trace!(
trace!(
"lower_clif_branches: block {} branches {:?} targets {:?}",
block,
branches,
@ -1061,7 +1061,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
/// Lower the function.
pub fn lower<B: LowerBackend<MInst = I>>(mut self, backend: &B) -> CodegenResult<VCode<I>> {
log::trace!("about to lower function: {:?}", self.f);
trace!("about to lower function: {:?}", self.f);
// Initialize the ABI object, giving it a temp if requested.
let maybe_tmp = if let Some(temp_ty) = self.vcode.abi().temp_needed() {
@ -1152,7 +1152,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
// Now that we've emitted all instructions into the
// VCodeBuilder, let's build the VCode.
let vcode = self.vcode.build();
log::trace!("built vcode: {:?}", vcode);
trace!("built vcode: {:?}", vcode);
Ok(vcode)
}
@ -1287,7 +1287,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
}
fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
log::trace!(
trace!(
"get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
val,
self.cur_inst,
@ -1317,8 +1317,8 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
// prior to the sunk instruction) to sink.
ValueDef::Result(src_inst, result_idx) => {
let src_side_effect = has_lowering_side_effect(self.f, src_inst);
log::trace!(" -> src inst {}", src_inst);
log::trace!(" -> has lowering side effect: {}", src_side_effect);
trace!(" -> src inst {}", src_inst);
trace!(" -> has lowering side effect: {}", src_side_effect);
if !src_side_effect {
// Pure instruction: always possible to
// sink. Let's determine whether we are the only
@ -1332,7 +1332,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
// Side-effect: test whether this is the only use of the
// only result of the instruction, and whether colors allow
// the code-motion.
log::trace!(
trace!(
" -> side-effecting op {} for val {}: use state {:?}",
src_inst,
val,
@ -1369,7 +1369,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
let val = self.f.dfg.resolve_aliases(val);
log::trace!("put_value_in_regs: val {}", val);
trace!("put_value_in_regs: val {}", val);
// Assert that the value is not `iflags`/`fflags`-typed; these
// cannot be reified into normal registers. TODO(#3249)
@ -1387,7 +1387,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
.and_then(|inst| self.get_constant(inst))
{
let regs = self.alloc_tmp(ty);
log::trace!(" -> regs {:?}", regs);
trace!(" -> regs {:?}", regs);
assert!(regs.is_valid());
let insts = I::gen_constant(regs, c.into(), ty, |ty| {
@ -1400,7 +1400,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
}
let mut regs = self.value_regs[val];
log::trace!(" -> regs {:?}", regs);
trace!(" -> regs {:?}", regs);
assert!(regs.is_valid());
self.value_lowered_uses[val] += 1;
@ -1431,7 +1431,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
}
fn emit(&mut self, mach_inst: I) {
log::trace!("emit: {:?}", mach_inst);
trace!("emit: {:?}", mach_inst);
self.ir_insts.push(mach_inst);
}
@ -1492,7 +1492,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
}
fn set_vreg_alias(&mut self, from: Reg, to: Reg) {
log::trace!("set vreg alias: from {:?} to {:?}", from, to);
trace!("set vreg alias: from {:?} to {:?}", from, to);
self.vcode.set_vreg_alias(from, to);
}
}

7
cranelift/codegen/src/machinst/vcode.rs

@ -24,6 +24,7 @@ use crate::ir::{
};
use crate::machinst::*;
use crate::timing;
use crate::trace;
use crate::ValueLocRange;
use regalloc2::{
Edit, Function as RegallocFunction, InstOrEdit, InstRange, Operand, OperandKind, PReg, PRegSet,
@ -587,7 +588,7 @@ impl<I: VCodeInst> VCodeBuilder<I> {
// Translate blockparam args via the vreg aliases table as well.
for arg in &mut self.vcode.branch_block_args {
let new_arg = Self::resolve_vreg_alias_impl(&self.vcode.vreg_aliases, *arg);
log::trace!("operandcollector: block arg {:?} -> {:?}", arg, new_arg);
trace!("operandcollector: block arg {:?} -> {:?}", arg, new_arg);
*arg = new_arg;
}
}
@ -806,7 +807,7 @@ impl<I: VCodeInst> VCode<I> {
}
for block in final_order {
log::trace!("emitting block {:?}", block);
trace!("emitting block {:?}", block);
let new_offset = I::align_basic_block(buffer.cur_offset());
while new_offset > buffer.cur_offset() {
// Pad with NOPs up to the aligned block offset.
@ -829,7 +830,7 @@ impl<I: VCodeInst> VCode<I> {
// Is this the first block? Emit the prologue directly if so.
if block == self.entry {
log::trace!(" -> entry block");
trace!(" -> entry block");
buffer.start_srcloc(SourceLoc::default());
state.pre_sourceloc(SourceLoc::default());
for inst in &prologue_insts {

6
cranelift/codegen/src/unreachable_code.rs

@ -3,8 +3,8 @@
use crate::cursor::{Cursor, FuncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir;
use crate::timing;
use crate::{ir, trace};
/// Eliminate unreachable code.
///
@ -24,14 +24,14 @@ pub fn eliminate_unreachable_code(
continue;
}
log::trace!("Eliminating unreachable {}", block);
trace!("Eliminating unreachable {}", block);
// Move the cursor out of the way and make sure the next lop iteration goes to the right
// block.
pos.prev_block();
// Remove all instructions from `block`.
while let Some(inst) = pos.func.layout.first_inst(block) {
log::trace!(" - {}", pos.func.dfg.display_inst(inst));
trace!(" - {}", pos.func.dfg.display_inst(inst));
pos.func.layout.remove_inst(inst);
}

Loading…
Cancel
Save