Browse Source

Various cleanups to the ABI handling code (part 1) (#8903)

* Inline abi_arg_slot_regs into gen_retval

This simplifies the code a bit.

* Remove abi argument of gen_call_common

It isn't actually necessary.

* Define gen_call_common without macro

Rust-analyzer disables most ide functionality inside macros. Defining
gen_call_common without a macro makes it easier to modify it.

* Use mem::take instead of mem::replace
pull/8875/head
bjorn3 4 months ago
committed by GitHub
parent
commit
50d82f22e5
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 5
      cranelift/codegen/src/isa/aarch64/lower/isle.rs
  2. 5
      cranelift/codegen/src/isa/riscv64/lower/isle.rs
  3. 14
      cranelift/codegen/src/isa/x64/lower.rs
  4. 39
      cranelift/codegen/src/isa/x64/lower/isle.rs
  5. 47
      cranelift/codegen/src/machinst/abi.rs
  6. 140
      cranelift/codegen/src/machinst/isle.rs

5
cranelift/codegen/src/isa/aarch64/lower/isle.rs

@ -18,7 +18,6 @@ use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInf
use crate::isa::aarch64::AArch64Backend;
use crate::isle_common_prelude_methods;
use crate::machinst::isle::*;
use crate::machinst::valueregs;
use crate::{
binemit::CodeOffset,
ir::{
@ -70,10 +69,6 @@ pub struct ExtendedValue {
extend: ExtendOp,
}
impl IsleContext<'_, '_, MInst, AArch64Backend> {
isle_prelude_method_helpers!(AArch64CallSite);
}
impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {
isle_lower_prelude_methods!();
isle_prelude_caller_methods!(

5
cranelift/codegen/src/isa/riscv64/lower/isle.rs

@ -3,7 +3,7 @@
// Pull in the ISLE generated code.
#[allow(unused)]
pub mod generated_code;
use generated_code::{Context, MInst};
use generated_code::MInst;
// Types that the generated ISLE code uses via `use super::*`.
use self::generated_code::{VecAluOpRR, VecLmul};
@ -34,7 +34,6 @@ type BoxReturnCallInfo = Box<ReturnCallInfo>;
type BoxExternalName = Box<ExternalName>;
type VecMachLabel = Vec<MachLabel>;
type VecArgPair = Vec<ArgPair>;
use crate::machinst::valueregs;
pub(crate) struct RV64IsleContext<'a, 'b, I, B>
where
@ -49,8 +48,6 @@ where
}
impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {
isle_prelude_method_helpers!(Riscv64ABICallSite);
fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {
Self {
lower_ctx,

14
cranelift/codegen/src/isa/x64/lower.rs

@ -14,7 +14,7 @@ use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::Flags;
use smallvec::smallvec;
use smallvec::{smallvec, SmallVec};
use target_lexicon::Triple;
//=============================================================================
@ -152,8 +152,7 @@ fn emit_vm_call(
triple: &Triple,
libcall: LibCall,
inputs: &[Reg],
outputs: &[Writable<Reg>],
) -> CodegenResult<()> {
) -> CodegenResult<SmallVec<[Reg; 1]>> {
let extname = ExternalName::LibCall(libcall);
let dist = if flags.use_colocated_libcalls() {
@ -182,8 +181,11 @@ fn emit_vm_call(
}
let mut retval_insts: SmallInstVec<_> = smallvec![];
for (i, output) in outputs.iter().enumerate() {
retval_insts.extend(abi.gen_retval(ctx, i, ValueRegs::one(*output)).into_iter());
let mut outputs: SmallVec<[_; 1]> = smallvec![];
for i in 0..ctx.sigs().num_rets(ctx.sigs().abi_sig_for_signature(&sig)) {
let (retval_inst, retval_regs) = abi.gen_retval(ctx, i);
retval_insts.extend(retval_inst.into_iter());
outputs.push(retval_regs.only_reg().unwrap());
}
abi.emit_call(ctx);
@ -192,7 +194,7 @@ fn emit_vm_call(
ctx.emit(inst);
}
Ok(())
Ok(outputs)
}
/// Returns whether the given input is a shift by a constant value less or equal than 3.

39
cranelift/codegen/src/isa/x64/lower/isle.rs

@ -22,8 +22,8 @@ use crate::{
inst::{args::*, regs, CallInfo, ReturnCallInfo},
},
machinst::{
isle::*, valueregs, ArgPair, InsnInput, InstOutput, MachAtomicRmwOp, MachInst,
VCodeConstant, VCodeConstantData,
isle::*, ArgPair, InsnInput, InstOutput, MachAtomicRmwOp, MachInst, VCodeConstant,
VCodeConstantData,
},
};
use alloc::vec::Vec;
@ -689,57 +689,48 @@ impl Context for IsleContext<'_, '_, MInst, X64Backend> {
}
fn libcall_1(&mut self, libcall: &LibCall, a: Reg) -> Reg {
let call_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let ret_ty = libcall.signature(call_conv, I64).returns[0].value_type;
let output_reg = self.lower_ctx.alloc_tmp(ret_ty).only_reg().unwrap();
emit_vm_call(
let outputs = emit_vm_call(
self.lower_ctx,
&self.backend.flags,
&self.backend.triple,
libcall.clone(),
&[a],
&[output_reg],
)
.expect("Failed to emit LibCall");
output_reg.to_reg()
debug_assert_eq!(outputs.len(), 1);
outputs[0]
}
fn libcall_2(&mut self, libcall: &LibCall, a: Reg, b: Reg) -> Reg {
let call_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let ret_ty = libcall.signature(call_conv, I64).returns[0].value_type;
let output_reg = self.lower_ctx.alloc_tmp(ret_ty).only_reg().unwrap();
emit_vm_call(
let outputs = emit_vm_call(
self.lower_ctx,
&self.backend.flags,
&self.backend.triple,
libcall.clone(),
&[a, b],
&[output_reg],
)
.expect("Failed to emit LibCall");
output_reg.to_reg()
debug_assert_eq!(outputs.len(), 1);
outputs[0]
}
fn libcall_3(&mut self, libcall: &LibCall, a: Reg, b: Reg, c: Reg) -> Reg {
let call_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let ret_ty = libcall.signature(call_conv, I64).returns[0].value_type;
let output_reg = self.lower_ctx.alloc_tmp(ret_ty).only_reg().unwrap();
emit_vm_call(
let outputs = emit_vm_call(
self.lower_ctx,
&self.backend.flags,
&self.backend.triple,
libcall.clone(),
&[a, b, c],
&[output_reg],
)
.expect("Failed to emit LibCall");
output_reg.to_reg()
debug_assert_eq!(outputs.len(), 1);
outputs[0]
}
#[inline]
@ -1005,8 +996,6 @@ impl Context for IsleContext<'_, '_, MInst, X64Backend> {
}
impl IsleContext<'_, '_, MInst, X64Backend> {
isle_prelude_method_helpers!(X64CallSite);
fn load_xmm_unaligned(&mut self, addr: SyntheticAmode) -> Xmm {
let tmp = self.lower_ctx.alloc_tmp(types::F32X4).only_reg().unwrap();
self.lower_ctx.emit(MInst::XmmUnaryRmRUnaligned {

47
cranelift/codegen/src/machinst/abi.rs

@ -2094,6 +2094,11 @@ impl<M: ABIMachineSpec> CallSite<M> {
sigs.num_args(self.sig)
}
/// Get the number of return values expected.
pub fn num_rets(&self, sigs: &SigSet) -> usize {
sigs.num_rets(self.sig)
}
/// Emit a copy of a large argument into its associated stack buffer, if
/// any. We must be careful to perform all these copies (as necessary)
/// before setting up the argument registers, since we may have to invoke
@ -2297,25 +2302,28 @@ impl<M: ABIMachineSpec> CallSite<M> {
/// Define a return value after the call returns.
pub fn gen_retval(
&mut self,
ctx: &Lower<M::I>,
ctx: &mut Lower<M::I>,
idx: usize,
into_regs: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<M::I> {
) -> (SmallInstVec<M::I>, ValueRegs<Reg>) {
let mut insts = smallvec![];
match &ctx.sigs().rets(self.sig)[idx] {
&ABIArg::Slots { ref slots, .. } => {
assert_eq!(into_regs.len(), slots.len());
for (slot, into_reg) in slots.iter().zip(into_regs.regs().iter()) {
let mut into_regs: SmallVec<[Reg; 2]> = smallvec![];
let ret = ctx.sigs().rets(self.sig)[idx].clone();
match ret {
ABIArg::Slots { ref slots, .. } => {
for slot in slots {
match slot {
// Extension mode doesn't matter because we're copying out, not in,
// and we ignore high bits in our own registers by convention.
&ABIArgSlot::Reg { reg, .. } => {
&ABIArgSlot::Reg { reg, ty, .. } => {
let into_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
self.defs.push(CallRetPair {
vreg: *into_reg,
vreg: into_reg,
preg: reg.into(),
});
into_regs.push(into_reg.to_reg());
}
&ABIArgSlot::Stack { offset, ty, .. } => {
let into_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
let sig_data = &ctx.sigs()[self.sig];
// The outgoing argument area must always be restored after a call,
// ensuring that the return values will be in a consistent place after
@ -2323,21 +2331,28 @@ impl<M: ABIMachineSpec> CallSite<M> {
let ret_area_base = sig_data.sized_stack_arg_space();
insts.push(M::gen_load_stack(
StackAMode::OutgoingArg(offset + ret_area_base),
*into_reg,
into_reg,
ty,
));
into_regs.push(into_reg.to_reg());
}
}
}
}
&ABIArg::StructArg { .. } => {
ABIArg::StructArg { .. } => {
panic!("StructArg not supported in return position");
}
&ABIArg::ImplicitPtrArg { .. } => {
ABIArg::ImplicitPtrArg { .. } => {
panic!("ImplicitPtrArg not supported in return position");
}
}
insts
let value_regs = match *into_regs {
[a] => ValueRegs::one(a),
[a, b] => ValueRegs::two(a, b),
_ => panic!("Expected to see one or two slots only from {:?}", ret),
};
(insts, value_regs)
}
/// Emit the call itself.
@ -2365,10 +2380,8 @@ impl<M: ABIMachineSpec> CallSite<M> {
self.gen_arg(ctx, i.into(), ValueRegs::one(rd.to_reg()));
}
let (uses, defs) = (
mem::replace(&mut self.uses, Default::default()),
mem::replace(&mut self.defs, Default::default()),
);
let uses = mem::take(&mut self.uses);
let defs = mem::take(&mut self.defs);
let sig = &ctx.sigs()[self.sig];
let callee_pop_size = if sig.call_conv() == isa::CallConv::Tail {

140
cranelift/codegen/src/machinst/isle.rs

@ -12,8 +12,8 @@ pub use crate::ir::{
};
pub use crate::isa::{unwind::UnwindInst, TargetIsa};
pub use crate::machinst::{
ABIArg, ABIArgSlot, InputSourceInst, Lower, LowerBackend, RealReg, Reg, RelocDistance, Sig,
VCodeInst, Writable,
ABIArg, ABIArgSlot, ABIMachineSpec, CallSite, InputSourceInst, Lower, LowerBackend, RealReg,
Reg, RelocDistance, Sig, VCodeInst, Writable,
};
pub use crate::settings::TlsModel;
@ -758,7 +758,6 @@ macro_rules! isle_prelude_caller_methods {
let caller_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let sig = &self.lower_ctx.dfg().signatures[sig_ref];
let num_rets = sig.returns.len();
let abi = self.lower_ctx.sigs().abi_sig_for_sig_ref(sig_ref);
let caller = <$abicaller>::from_func(
self.lower_ctx.sigs(),
sig_ref,
@ -774,7 +773,7 @@ macro_rules! isle_prelude_caller_methods {
sig.params.len()
);
self.gen_call_common(abi, num_rets, caller, args)
crate::machinst::isle::gen_call_common(&mut self.lower_ctx, num_rets, caller, args)
}
fn gen_call_indirect(
@ -787,7 +786,6 @@ macro_rules! isle_prelude_caller_methods {
let ptr = self.put_in_reg(val);
let sig = &self.lower_ctx.dfg().signatures[sig_ref];
let num_rets = sig.returns.len();
let abi = self.lower_ctx.sigs().abi_sig_for_sig_ref(sig_ref);
let caller = <$abicaller>::from_ptr(
self.lower_ctx.sigs(),
sig_ref,
@ -802,98 +800,62 @@ macro_rules! isle_prelude_caller_methods {
sig.params.len()
);
self.gen_call_common(abi, num_rets, caller, args)
crate::machinst::isle::gen_call_common(&mut self.lower_ctx, num_rets, caller, args)
}
};
}
/// Helpers for the above ISLE prelude implementations. Meant to go
/// inside the `impl` for the context type, not the trait impl.
#[macro_export]
#[doc(hidden)]
macro_rules! isle_prelude_method_helpers {
($abicaller:ty) => {
fn gen_call_common_args(&mut self, call_site: &mut $abicaller, (inputs, off): ValueSlice) {
let num_args = call_site.num_args(self.lower_ctx.sigs());
assert_eq!(
inputs.len(&self.lower_ctx.dfg().value_lists) - off,
num_args
);
let mut arg_regs = vec![];
for i in 0..num_args {
let input = inputs
.get(off + i, &self.lower_ctx.dfg().value_lists)
.unwrap();
arg_regs.push(self.put_in_regs(input));
}
for (i, arg_regs) in arg_regs.iter().enumerate() {
call_site.emit_copy_regs_to_buffer(self.lower_ctx, i, *arg_regs);
}
for (i, arg_regs) in arg_regs.iter().enumerate() {
call_site.gen_arg(self.lower_ctx, i, *arg_regs);
}
}
fn gen_call_common(
&mut self,
abi: Sig,
num_rets: usize,
mut caller: $abicaller,
args: ValueSlice,
) -> InstOutput {
self.gen_call_common_args(&mut caller, args);
// Handle retvals prior to emitting call, so the
// constraints are on the call instruction; but buffer the
// instructions till after the call.
let mut outputs = InstOutput::new();
let mut retval_insts = crate::machinst::abi::SmallInstVec::new();
// We take the *last* `num_rets` returns of the sig:
// this skips a StructReturn, if any, that is present.
let sigdata_num_rets = self.lower_ctx.sigs().num_rets(abi);
debug_assert!(num_rets <= sigdata_num_rets);
for i in (sigdata_num_rets - num_rets)..sigdata_num_rets {
// Borrow `sigdata` again so we don't hold a `self`
// borrow across the `&mut self` arg to
// `abi_arg_slot_regs()` below.
let ret = self.lower_ctx.sigs().get_ret(abi, i);
let retval_regs = self.abi_arg_slot_regs(&ret).unwrap();
retval_insts.extend(
caller
.gen_retval(self.lower_ctx, i, retval_regs.clone())
.into_iter(),
);
outputs.push(valueregs::non_writable_value_regs(retval_regs));
}
fn gen_call_common_args<M: ABIMachineSpec>(
ctx: &mut Lower<'_, M::I>,
call_site: &mut CallSite<M>,
(inputs, off): ValueSlice,
) {
let num_args = call_site.num_args(ctx.sigs());
assert_eq!(inputs.len(&ctx.dfg().value_lists) - off, num_args);
let mut arg_regs = vec![];
for i in 0..num_args {
let input = inputs.get(off + i, &ctx.dfg().value_lists).unwrap();
arg_regs.push(ctx.put_value_in_regs(input));
}
for (i, arg_regs) in arg_regs.iter().enumerate() {
call_site.emit_copy_regs_to_buffer(ctx, i, *arg_regs);
}
for (i, arg_regs) in arg_regs.iter().enumerate() {
call_site.gen_arg(ctx, i, *arg_regs);
}
}
caller.emit_call(self.lower_ctx);
pub fn gen_call_common<M: ABIMachineSpec>(
ctx: &mut Lower<'_, M::I>,
num_rets: usize,
mut caller: CallSite<M>,
args: ValueSlice,
) -> InstOutput {
gen_call_common_args(ctx, &mut caller, args);
// Handle retvals prior to emitting call, so the
// constraints are on the call instruction; but buffer the
// instructions till after the call.
let mut outputs = InstOutput::new();
let mut retval_insts = crate::machinst::abi::SmallInstVec::new();
// We take the *last* `num_rets` returns of the sig:
// this skips a StructReturn, if any, that is present.
let sigdata_num_rets = caller.num_rets(ctx.sigs());
debug_assert!(num_rets <= sigdata_num_rets);
for i in (sigdata_num_rets - num_rets)..sigdata_num_rets {
let (retval_inst, retval_regs) = caller.gen_retval(ctx, i);
retval_insts.extend(retval_inst.into_iter());
outputs.push(retval_regs);
}
for inst in retval_insts {
self.lower_ctx.emit(inst);
}
caller.emit_call(ctx);
outputs
}
for inst in retval_insts {
ctx.emit(inst);
}
fn abi_arg_slot_regs(&mut self, arg: &ABIArg) -> Option<WritableValueRegs> {
match arg {
&ABIArg::Slots { ref slots, .. } => match slots.len() {
1 => {
let a = self.temp_writable_reg(slots[0].get_type());
Some(WritableValueRegs::one(a))
}
2 => {
let a = self.temp_writable_reg(slots[0].get_type());
let b = self.temp_writable_reg(slots[1].get_type());
Some(WritableValueRegs::two(a, b))
}
_ => panic!("Expected to see one or two slots only from {:?}", arg),
},
_ => None,
}
}
};
outputs
}
/// This structure is used to implement the ISLE-generated `Context` trait and

Loading…
Cancel
Save