Browse Source

winch: Improve scratch register handling (#8971)

* winch: Improve scratch register handling

This commit doesn't introduce any new behavior. It's mostly a follow-up
to https://github.com/bytecodealliance/wasmtime/pull/7977.

This commit tries to reduce the repetitive pattern used to obtain the
scrtach register by introducing a macro similar to the existing `vmctx!`
macro.

This commit also improves the macro definition by using fully qualified
paths.

* Fix unused imports
pull/8989/head
Saúl Cabrera 4 months ago
committed by GitHub
parent
commit
c879eafcbe
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 14
      winch/codegen/src/abi/mod.rs
  2. 6
      winch/codegen/src/codegen/bounds.rs
  3. 6
      winch/codegen/src/codegen/call.rs
  4. 8
      winch/codegen/src/codegen/context.rs
  5. 12
      winch/codegen/src/codegen/mod.rs
  6. 6
      winch/codegen/src/masm.rs

14
winch/codegen/src/abi/mod.rs

@ -67,10 +67,22 @@ pub(super) enum ParamsOrReturns {
/// Macro to get the pinned register holding the [VMContext]. /// Macro to get the pinned register holding the [VMContext].
macro_rules! vmctx { macro_rules! vmctx {
($m:ident) => { ($m:ident) => {
<$m::ABI as ABI>::vmctx_reg() <$m::ABI as $crate::abi::ABI>::vmctx_reg()
}; };
} }
/// Macro to get the designated general purpose scratch register or the
/// designated scratch register for the given type.
macro_rules! scratch {
($m:ident) => {
<$m::ABI as $crate::abi::ABI>::scratch_reg()
};
($m:ident, $wasm_type:expr) => {
<$m::ABI as $crate::abi::ABI>::scratch_for($wasm_type)
};
}
pub(crate) use scratch;
pub(crate) use vmctx; pub(crate) use vmctx;
/// Constructs an [ABISig] using Winch's ABI. /// Constructs an [ABISig] using Winch's ABI.

6
winch/codegen/src/codegen/bounds.rs

@ -3,7 +3,7 @@
//! recommended when working on this area of Winch. //! recommended when working on this area of Winch.
use super::env::{HeapData, HeapStyle}; use super::env::{HeapData, HeapStyle};
use crate::{ use crate::{
abi::{vmctx, ABI}, abi::{scratch, vmctx},
codegen::CodeGenContext, codegen::CodeGenContext,
isa::reg::Reg, isa::reg::Reg,
masm::{IntCmpKind, MacroAssembler, OperandSize, RegImm, TrapCode}, masm::{IntCmpKind, MacroAssembler, OperandSize, RegImm, TrapCode},
@ -96,7 +96,7 @@ where
masm.mov(RegImm::i64(max_size as i64), dst, ptr_size) masm.mov(RegImm::i64(max_size as i64), dst, ptr_size)
} }
(_, HeapStyle::Dynamic) => { (_, HeapStyle::Dynamic) => {
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
let base = if let Some(offset) = heap.import_from { let base = if let Some(offset) = heap.import_from {
let addr = masm.address_at_vmctx(offset); let addr = masm.address_at_vmctx(offset);
masm.load_ptr(addr, scratch); masm.load_ptr(addr, scratch);
@ -199,7 +199,7 @@ pub(crate) fn load_heap_addr_unchecked<M>(
let base = if let Some(offset) = heap.import_from { let base = if let Some(offset) = heap.import_from {
// If the WebAssembly memory is imported, load the address into // If the WebAssembly memory is imported, load the address into
// the scratch register. // the scratch register.
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
masm.load_ptr(masm.address_at_vmctx(offset), scratch); masm.load_ptr(masm.address_at_vmctx(offset), scratch);
scratch scratch
} else { } else {

6
winch/codegen/src/codegen/call.rs

@ -57,7 +57,7 @@
//! └──────────────────────────────────────────────────┘ ------> Stack pointer when emitting the call //! └──────────────────────────────────────────────────┘ ------> Stack pointer when emitting the call
use crate::{ use crate::{
abi::{vmctx, ABIOperand, ABISig, RetArea, ABI}, abi::{scratch, vmctx, ABIOperand, ABISig, RetArea},
codegen::{BuiltinFunction, BuiltinType, Callee, CodeGenContext}, codegen::{BuiltinFunction, BuiltinType, Callee, CodeGenContext},
masm::{ masm::{
CalleeKind, ContextArgs, MacroAssembler, MemMoveDirection, OperandSize, SPOffset, CalleeKind, ContextArgs, MacroAssembler, MemMoveDirection, OperandSize, SPOffset,
@ -299,7 +299,7 @@ impl FnCall {
&ABIOperand::Stack { ty, offset, .. } => { &ABIOperand::Stack { ty, offset, .. } => {
let addr = masm.address_at_sp(SPOffset::from_u32(offset)); let addr = masm.address_at_sp(SPOffset::from_u32(offset));
let size: OperandSize = ty.into(); let size: OperandSize = ty.into();
let scratch = <M::ABI as ABI>::scratch_for(&ty); let scratch = scratch!(M, &ty);
context.move_val_to_reg(val, scratch, masm); context.move_val_to_reg(val, scratch, masm);
masm.store(scratch.into(), addr, size); masm.store(scratch.into(), addr, size);
} }
@ -319,7 +319,7 @@ impl FnCall {
let slot = masm.address_at_sp(SPOffset::from_u32(offset)); let slot = masm.address_at_sp(SPOffset::from_u32(offset));
// Don't rely on `ABI::scratch_for` as we always use // Don't rely on `ABI::scratch_for` as we always use
// an int register as the return pointer. // an int register as the return pointer.
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
masm.load_addr(addr, scratch, ty.into()); masm.load_addr(addr, scratch, ty.into());
masm.store(scratch.into(), slot, ty.into()); masm.store(scratch.into(), slot, ty.into());
} }

8
winch/codegen/src/codegen/context.rs

@ -2,7 +2,7 @@ use wasmtime_environ::{VMOffsets, WasmHeapType, WasmValType};
use super::ControlStackFrame; use super::ControlStackFrame;
use crate::{ use crate::{
abi::{vmctx, ABIOperand, ABIResults, RetArea, ABI}, abi::{scratch, vmctx, ABIOperand, ABIResults, RetArea},
frame::Frame, frame::Frame,
isa::reg::RegClass, isa::reg::RegClass,
masm::{MacroAssembler, OperandSize, RegImm, SPOffset, ShiftKind, StackSlot}, masm::{MacroAssembler, OperandSize, RegImm, SPOffset, ShiftKind, StackSlot},
@ -185,13 +185,13 @@ impl<'a> CodeGenContext<'a> {
Val::F64(v) => masm.store(RegImm::f64(v.bits()), addr, size), Val::F64(v) => masm.store(RegImm::f64(v.bits()), addr, size),
Val::Local(local) => { Val::Local(local) => {
let slot = self.frame.get_wasm_local(local.index); let slot = self.frame.get_wasm_local(local.index);
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
let local_addr = masm.local_address(&slot); let local_addr = masm.local_address(&slot);
masm.load(local_addr, scratch, size); masm.load(local_addr, scratch, size);
masm.store(scratch.into(), addr, size); masm.store(scratch.into(), addr, size);
} }
Val::Memory(_) => { Val::Memory(_) => {
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
masm.pop(scratch, size); masm.pop(scratch, size);
masm.store(scratch.into(), addr, size); masm.store(scratch.into(), addr, size);
} }
@ -576,7 +576,7 @@ impl<'a> CodeGenContext<'a> {
Val::Local(local) => { Val::Local(local) => {
let slot = frame.get_wasm_local(local.index); let slot = frame.get_wasm_local(local.index);
let addr = masm.local_address(&slot); let addr = masm.local_address(&slot);
let scratch = <M::ABI as ABI>::scratch_for(&slot.ty); let scratch = scratch!(M, &slot.ty);
masm.load(addr, scratch, slot.ty.into()); masm.load(addr, scratch, slot.ty.into());
let stack_slot = masm.push(scratch, slot.ty.into()); let stack_slot = masm.push(scratch, slot.ty.into());
*v = Val::mem(slot.ty, stack_slot); *v = Val::mem(slot.ty, stack_slot);

12
winch/codegen/src/codegen/mod.rs

@ -1,5 +1,5 @@
use crate::{ use crate::{
abi::{vmctx, ABIOperand, ABISig, RetArea, ABI}, abi::{scratch, vmctx, ABIOperand, ABISig, RetArea},
codegen::BlockSig, codegen::BlockSig,
isa::reg::Reg, isa::reg::Reg,
masm::{ masm::{
@ -329,7 +329,7 @@ where
.checked_mul(sig_index_bytes.into()) .checked_mul(sig_index_bytes.into())
.unwrap(); .unwrap();
let signatures_base_offset = self.env.vmoffsets.ptr.vmctx_type_ids_array(); let signatures_base_offset = self.env.vmoffsets.ptr.vmctx_type_ids_array();
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
let funcref_sig_offset = self.env.vmoffsets.ptr.vm_func_ref_type_index(); let funcref_sig_offset = self.env.vmoffsets.ptr.vm_func_ref_type_index();
// Load the signatures address into the scratch register. // Load the signatures address into the scratch register.
@ -443,7 +443,7 @@ where
let addr = if data.imported { let addr = if data.imported {
let global_base = self.masm.address_at_reg(vmctx!(M), data.offset); let global_base = self.masm.address_at_reg(vmctx!(M), data.offset);
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
self.masm.load_ptr(global_base, scratch); self.masm.load_ptr(global_base, scratch);
self.masm.address_at_reg(scratch, 0) self.masm.address_at_reg(scratch, 0)
} else { } else {
@ -754,7 +754,7 @@ where
base: Reg, base: Reg,
table_data: &TableData, table_data: &TableData,
) -> M::Address { ) -> M::Address {
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
let bound = self.context.any_gpr(self.masm); let bound = self.context.any_gpr(self.masm);
let tmp = self.context.any_gpr(self.masm); let tmp = self.context.any_gpr(self.masm);
let ptr_size: OperandSize = self.env.ptr_type().into(); let ptr_size: OperandSize = self.env.ptr_type().into();
@ -811,7 +811,7 @@ where
/// Retrieves the size of the table, pushing the result to the value stack. /// Retrieves the size of the table, pushing the result to the value stack.
pub fn emit_compute_table_size(&mut self, table_data: &TableData) { pub fn emit_compute_table_size(&mut self, table_data: &TableData) {
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
let size = self.context.any_gpr(self.masm); let size = self.context.any_gpr(self.masm);
let ptr_size: OperandSize = self.env.ptr_type().into(); let ptr_size: OperandSize = self.env.ptr_type().into();
@ -834,7 +834,7 @@ where
/// Retrieves the size of the memory, pushing the result to the value stack. /// Retrieves the size of the memory, pushing the result to the value stack.
pub fn emit_compute_memory_size(&mut self, heap_data: &HeapData) { pub fn emit_compute_memory_size(&mut self, heap_data: &HeapData) {
let size_reg = self.context.any_gpr(self.masm); let size_reg = self.context.any_gpr(self.masm);
let scratch = <M::ABI as ABI>::scratch_reg(); let scratch = scratch!(M);
let base = if let Some(offset) = heap_data.import_from { let base = if let Some(offset) = heap_data.import_from {
self.masm self.masm

6
winch/codegen/src/masm.rs

@ -1,4 +1,4 @@
use crate::abi::{self, align_to, LocalSlot}; use crate::abi::{self, align_to, scratch, LocalSlot};
use crate::codegen::{CodeGenContext, FuncEnv}; use crate::codegen::{CodeGenContext, FuncEnv};
use crate::isa::reg::Reg; use crate::isa::reg::Reg;
use cranelift_codegen::{ use cranelift_codegen::{
@ -592,7 +592,7 @@ pub(crate) trait MacroAssembler {
debug_assert!(bytes % 4 == 0); debug_assert!(bytes % 4 == 0);
let mut remaining = bytes; let mut remaining = bytes;
let word_bytes = <Self::ABI as abi::ABI>::word_bytes(); let word_bytes = <Self::ABI as abi::ABI>::word_bytes();
let scratch = <Self::ABI as abi::ABI>::scratch_reg(); let scratch = scratch!(Self);
let mut dst_offs = dst.as_u32() - bytes; let mut dst_offs = dst.as_u32() - bytes;
let mut src_offs = src.as_u32() - bytes; let mut src_offs = src.as_u32() - bytes;
@ -869,7 +869,7 @@ pub(crate) trait MacroAssembler {
// Add an upper bound to this generation; // Add an upper bound to this generation;
// given a considerably large amount of slots // given a considerably large amount of slots
// this will be inefficient. // this will be inefficient.
let zero = <Self::ABI as abi::ABI>::scratch_reg(); let zero = scratch!(Self);
self.zero(zero); self.zero(zero);
let zero = RegImm::reg(zero); let zero = RegImm::reg(zero);

Loading…
Cancel
Save