Browse Source

[lightbeam] Fix clippy warnings + update dependencies (#661)

* [lightbeam] fix compiler warnings

* [lightbeam] fix sign mask clippy warning

* [lightbeam] fix clippy warning: match -> if let

* [lightbeam] fix some hex-literal clippy warnings

* [lightbeam] fix some more simple clippy warnings

* [lightbeam] convert if -> match (clippy suggestion)

* [lightbeam] fix some more clippy warnings

* [lightbeam] add #Safety section to doc comment of execute_func_unchecked

* [lightbeam] rename into_temp_reg -> put_into_temp_register

* [lightbeam] rename to_temp_reg -> clone_to_temp_register

* [lightbeam] rename into_reg -> put_into_register

* [lightbeam] rename to_reg -> clone_to_register

* [lightbeam] rename into_temp_loc -> put_into_temp_location

* [lightbeam] apply rustfmt

* [lightbeam] update dynasm 0.5.1 -> 0.5.2

* [lightbeam] update wasmparser 0.39.1 -> 0.44.0

* [lightbeam] update other dependencies
pull/693/head
Hero Bird 5 years ago
committed by Sergei Pepyakin
parent
commit
3716a863be
  1. 12
      crates/lightbeam/Cargo.toml
  2. 289
      crates/lightbeam/src/backend.rs
  3. 14
      crates/lightbeam/src/function_body.rs
  4. 120
      crates/lightbeam/src/microwasm.rs
  5. 8
      crates/lightbeam/src/module.rs

12
crates/lightbeam/Cargo.toml

@ -12,13 +12,13 @@ edition = "2018"
[dependencies]
smallvec = "1.0.0"
dynasm = "0.5.1"
dynasmrt = "0.5.1"
wasmparser = "0.39.1"
memoffset = "0.5.1"
itertools = "0.8"
dynasm = "0.5.2"
dynasmrt = "0.5.2"
wasmparser = "0.44.0"
memoffset = "0.5.3"
itertools = "0.8.2"
capstone = "0.6.0"
thiserror = "1.0.4"
thiserror = "1.0.9"
cranelift-codegen = "0.50.0"
multi_mut = "0.1"
either = "1.5"

289
crates/lightbeam/src/backend.rs

@ -12,6 +12,7 @@ use either::Either;
use more_asserts::assert_le;
use std::{
any::{Any, TypeId},
cmp::Ordering,
collections::HashMap,
convert::{TryFrom, TryInto},
fmt::Display,
@ -231,9 +232,9 @@ pub mod registers {
pub const NUM_GPRS: u8 = 16;
}
const SIGN_MASK_F64: u64 = 0b1000000000000000000000000000000000000000000000000000000000000000;
const SIGN_MASK_F64: u64 = 0x8000_0000_0000_0000;
const REST_MASK_F64: u64 = !SIGN_MASK_F64;
const SIGN_MASK_F32: u32 = 0b10000000000000000000000000000000;
const SIGN_MASK_F32: u32 = 0x8000_0000;
const REST_MASK_F32: u32 = !SIGN_MASK_F32;
impl GPRs {
@ -871,7 +872,7 @@ macro_rules! int_div {
false
}
ValueLocation::Reg(_) => {
let reg = self.into_reg(GPRType::Rq, &mut divisor).unwrap();
let reg = self.put_into_register(GPRType::Rq, &mut divisor).unwrap();
dynasm!(self.asm
; cmp $reg_ty(reg.rq().unwrap()), -1
);
@ -965,7 +966,7 @@ macro_rules! unop {
ValueLocation::Reg(temp)
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let reg = self.into_reg(GPRType::Rq, &mut val).unwrap();
let reg = self.put_into_register(GPRType::Rq, &mut val).unwrap();
let temp = self.take_reg(Type::for_::<$typ>()).unwrap();
dynasm!(self.asm
; $instr $reg_ty(temp.rq().unwrap()), $reg_ty(reg.rq().unwrap())
@ -1011,7 +1012,7 @@ macro_rules! conversion {
ValueLocation::Reg(temp)
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let reg = self.into_reg(Type::for_::<$in_typ>(), &mut val).unwrap();
let reg = self.put_into_register(Type::for_::<$in_typ>(), &mut val).unwrap();
let temp = self.take_reg(Type::for_::<$out_typ>()).unwrap();
dynasm!(self.asm
@ -1039,7 +1040,7 @@ macro_rules! shift {
if let Some(imm) = count.immediate() {
if let Some(imm) = imm.as_int() {
if let Ok(imm) = i8::try_from(imm) {
let reg = self.into_temp_reg($ty, &mut val).unwrap();
let reg = self.put_into_temp_register($ty, &mut val).unwrap();
dynasm!(self.asm
; $instr $reg_ty(reg.rq().unwrap()), imm
@ -1075,7 +1076,7 @@ macro_rules! shift {
match other {
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let gpr = self.into_reg(I32, &mut count).unwrap();
let gpr = self.put_into_register(I32, &mut count).unwrap();
dynasm!(self.asm
; mov cl, Rb(gpr.rq().unwrap())
);
@ -1101,7 +1102,7 @@ macro_rules! shift {
self.block_state.regs.mark_used(RCX);
count = ValueLocation::Reg(RCX);
let reg = self.into_temp_reg($ty, &mut val).unwrap();
let reg = self.put_into_temp_register($ty, &mut val).unwrap();
dynasm!(self.asm
; $instr $reg_ty(reg.rq().unwrap()), cl
@ -1138,7 +1139,7 @@ macro_rules! cmp_i32 {
ValueLocation::Cond($reverse_flags)
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let rreg = self.into_reg(I32, &mut right).unwrap();
let rreg = self.put_into_register(I32, &mut right).unwrap();
dynasm!(self.asm
; cmp Rd(rreg.rq().unwrap()), i
);
@ -1155,7 +1156,7 @@ macro_rules! cmp_i32 {
}
}
} else {
let lreg = self.into_reg(I32, &mut left).unwrap();
let lreg = self.put_into_register(I32, &mut left).unwrap();
match right {
ValueLocation::Stack(offset) => {
@ -1165,7 +1166,7 @@ macro_rules! cmp_i32 {
);
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let rreg = self.into_reg(I32, &mut right).unwrap();
let rreg = self.put_into_register(I32, &mut right).unwrap();
dynasm!(self.asm
; cmp Rd(lreg.rq().unwrap()), Rd(rreg.rq().unwrap())
);
@ -1203,7 +1204,7 @@ macro_rules! cmp_i64 {
; cmp QWORD [rsp + offset], i
);
} else {
let lreg = self.into_reg(I32, &mut left).unwrap();
let lreg = self.put_into_register(I32, &mut left).unwrap();
dynasm!(self.asm
; cmp QWORD [rsp + offset], Rq(lreg.rq().unwrap())
);
@ -1211,13 +1212,13 @@ macro_rules! cmp_i64 {
ValueLocation::Cond($reverse_flags)
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let rreg = self.into_reg(I32, &mut right).unwrap();
let rreg = self.put_into_register(I32, &mut right).unwrap();
if let Some(i) = i.try_into().ok() {
dynasm!(self.asm
; cmp Rq(rreg.rq().unwrap()), i
);
} else {
let lreg = self.into_reg(I32, &mut left).unwrap();
let lreg = self.put_into_register(I32, &mut left).unwrap();
dynasm!(self.asm
; cmp Rq(rreg.rq().unwrap()), Rq(lreg.rq().unwrap())
);
@ -1235,7 +1236,7 @@ macro_rules! cmp_i64 {
}
}
} else {
let lreg = self.into_reg(I64, &mut left).unwrap();
let lreg = self.put_into_register(I64, &mut left).unwrap();
match right {
ValueLocation::Stack(offset) => {
@ -1245,7 +1246,7 @@ macro_rules! cmp_i64 {
);
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let rreg = self.into_reg(I32, &mut right).unwrap();
let rreg = self.put_into_register(I32, &mut right).unwrap();
dynasm!(self.asm
; cmp Rq(lreg.rq().unwrap()), Rq(rreg.rq().unwrap())
);
@ -1257,7 +1258,7 @@ macro_rules! cmp_i64 {
; cmp Rq(lreg.rq().unwrap()), i
);
} else {
let rreg = self.into_reg(I32, &mut right).unwrap();
let rreg = self.put_into_register(I32, &mut right).unwrap();
dynasm!(self.asm
; cmp Rq(lreg.rq().unwrap()), Rq(rreg.rq().unwrap())
);
@ -1313,8 +1314,8 @@ macro_rules! eq_float {
_ => (right, left)
};
let lreg = self.into_temp_reg(GPRType::Rx, &mut left).unwrap();
let rreg = self.into_reg(GPRType::Rx, &mut right).unwrap();
let lreg = self.put_into_temp_register(GPRType::Rx, &mut left).unwrap();
let rreg = self.put_into_register(GPRType::Rx, &mut right).unwrap();
let out = self.take_reg(I32).unwrap();
dynasm!(self.asm
@ -1359,8 +1360,8 @@ macro_rules! minmax_float {
_ => (right, left)
};
let lreg = self.into_temp_reg(GPRType::Rx, &mut left).unwrap();
let rreg = self.into_reg(GPRType::Rx, &mut right).unwrap();
let lreg = self.put_into_temp_register(GPRType::Rx, &mut left).unwrap();
let rreg = self.put_into_register(GPRType::Rx, &mut right).unwrap();
dynasm!(self.asm
; $cmpinstr Rx(lreg.rx().unwrap()), Rx(rreg.rx().unwrap())
@ -1407,7 +1408,7 @@ macro_rules! cmp_float {
ValueLocation::Immediate(0i32.into())
}
} else {
let lreg = this.into_reg(GPRType::Rx, left).unwrap();
let lreg = this.put_into_register(GPRType::Rx, left).unwrap();
let result = this.take_reg(I32).unwrap();
match right {
@ -1421,7 +1422,7 @@ macro_rules! cmp_float {
);
}
right => {
let rreg = this.into_reg(GPRType::Rx, right).unwrap();
let rreg = this.put_into_register(GPRType::Rx, right).unwrap();
dynasm!(this.asm
; xor Rq(result.rq().unwrap()), Rq(result.rq().unwrap())
@ -1656,12 +1657,12 @@ macro_rules! binop {
}
let (mut left, mut right) = $map_op(left, right);
let lreg = self.into_temp_reg($ty, &mut left).unwrap();
let lreg = self.put_into_temp_register($ty, &mut left).unwrap();
match right {
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
// This handles the case where we (for example) have a float in an `Rq` reg
let right_reg = self.into_reg($ty, &mut right).unwrap();
let right_reg = self.put_into_register($ty, &mut right).unwrap();
dynasm!(self.asm
; $instr $reg_ty(lreg.$reg_fn().unwrap()), $reg_ty(right_reg.$reg_fn().unwrap())
);
@ -1735,7 +1736,7 @@ macro_rules! load {
}
Err(gpr) => {
if offset == 0 {
ctx.to_reg(I32, ValueLocation::Reg(gpr)).unwrap()
ctx.clone_to_register(I32, ValueLocation::Reg(gpr)).unwrap()
} else if offset > 0 {
let addr_reg = ctx.take_reg(I64).unwrap();
dynasm!(ctx.asm
@ -1790,7 +1791,7 @@ macro_rules! load {
load_to_reg(self, temp, (offset as _, Ok(i.as_i32().unwrap())));
}
mut base => {
let gpr = self.into_reg(I32, &mut base).unwrap();
let gpr = self.put_into_register(I32, &mut base).unwrap();
load_to_reg(self, temp, (offset as _, Err(gpr)));
self.free_value(base);
}
@ -1907,7 +1908,7 @@ macro_rules! store {
}
Err(gpr) => {
if offset == 0 {
ctx.to_reg(I32, ValueLocation::Reg(gpr)).unwrap()
ctx.clone_to_register(I32, ValueLocation::Reg(gpr)).unwrap()
} else if offset > 0 {
let addr_reg = ctx.take_reg(I64).unwrap();
dynasm!(ctx.asm
@ -1961,14 +1962,14 @@ macro_rules! store {
// `store_from_reg` frees `src`
// TODO: Would it be better to free it outside `store_from_reg`?
let src_reg = self.into_reg(None, &mut src).unwrap();
let src_reg = self.put_into_register(None, &mut src).unwrap();
match base {
ValueLocation::Immediate(i) => {
store_from_reg(self, src_reg, (offset as i32, Ok(i.as_i32().unwrap())));
}
mut base => {
let gpr = self.into_reg(I32, &mut base).unwrap();
let gpr = self.put_into_register(I32, &mut base).unwrap();
store_from_reg(self, src_reg, (offset as i32, Err(gpr)));
self.free_value(base);
}
@ -1980,7 +1981,7 @@ macro_rules! store {
$name,
$int_reg_ty,
|ctx: &mut Context<_>, mem_ptr_reg: GPR, runtime_offset: Result<i32, GPR>, offset: i32, src| {
let src_reg = ctx.into_temp_reg(GPRType::Rq, &mut ValueLocation::Reg(src)).unwrap();
let src_reg = ctx.put_into_temp_register(GPRType::Rq, &mut ValueLocation::Reg(src)).unwrap();
match runtime_offset {
Ok(imm) => {
@ -2184,7 +2185,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
return;
}
let reg = self.into_reg(I32, &mut val).unwrap();
let reg = self.put_into_register(I32, &mut val).unwrap();
let out = self.take_reg(I32).unwrap();
dynasm!(self.asm
@ -2213,7 +2214,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
return;
}
let reg = self.into_reg(I64, &mut val).unwrap();
let reg = self.put_into_register(I64, &mut val).unwrap();
let out = self.take_reg(I64).unwrap();
dynasm!(self.asm
@ -2279,7 +2280,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let cond = match val {
ValueLocation::Cond(cc) => !cc,
_ => {
let predicate = self.into_reg(I32, &mut val).unwrap();
let predicate = self.put_into_register(I32, &mut val).unwrap();
dynasm!(self.asm
; test Rd(predicate.rq().unwrap()), Rd(predicate.rq().unwrap())
);
@ -2312,7 +2313,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let cond = match val {
ValueLocation::Cond(cc) => cc,
_ => {
let predicate = self.into_reg(I32, &mut val).unwrap();
let predicate = self.put_into_register(I32, &mut val).unwrap();
dynasm!(self.asm
; test Rd(predicate.rq().unwrap()), Rd(predicate.rq().unwrap())
);
@ -2371,7 +2372,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
if count > 0 {
let (selector_reg, pop_selector) = self
.into_temp_reg(GPRType::Rq, &mut selector)
.put_into_temp_register(GPRType::Rq, &mut selector)
.map(|r| (r, false))
.unwrap_or_else(|| {
self.push_physical(ValueLocation::Reg(RAX));
@ -2450,41 +2451,39 @@ impl<'this, M: ModuleContext> Context<'this, M> {
fn set_stack_depth(&mut self, depth: StackDepth) {
if self.block_state.depth.0 != depth.0 {
let diff = depth.0 as i32 - self.block_state.depth.0 as i32;
let emit_lea = if diff.abs() == 1 {
if self.block_state.depth.0 < depth.0 {
for _ in 0..diff {
dynasm!(self.asm
; push rax
);
}
false
} else if self.block_state.depth.0 > depth.0 {
if let Some(trash) = self.take_reg(I64) {
for _ in 0..self.block_state.depth.0 - depth.0 {
let emit_lea = if diff.abs() != 1 {
true
} else {
match self.block_state.depth.0.cmp(&depth.0) {
Ordering::Less => {
for _ in 0..diff {
dynasm!(self.asm
; pop Rq(trash.rq().unwrap())
; push rax
);
}
self.block_state.regs.release(trash);
false
} else {
true
}
} else {
false
Ordering::Greater => {
if let Some(trash) = self.take_reg(I64) {
for _ in 0..self.block_state.depth.0 - depth.0 {
dynasm!(self.asm
; pop Rq(trash.rq().unwrap())
);
}
self.block_state.regs.release(trash);
false
} else {
true
}
}
Ordering::Equal => false,
}
} else {
true
};
if emit_lea {
dynasm!(self.asm
; lea rsp, [rsp + (self.block_state.depth.0 as i32 - depth.0 as i32) * WORD_SIZE as i32]
);
}
self.block_state.depth = depth;
}
}
@ -2528,7 +2527,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
// TODO: We can use stack slots for values already on the stack but we
// don't refcount stack slots right now
out_args.push(self.into_temp_loc(None, &mut val));
out_args.push(self.put_into_temp_location(None, &mut val));
}
out_args.reverse();
@ -2551,7 +2550,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let mut val = self.pop();
// TODO: We can use stack slots for values already on the stack but we
// don't refcount stack slots right now
let loc = self.into_temp_loc(None, &mut val);
let loc = self.put_into_temp_location(None, &mut val);
out.push(loc);
}
@ -2628,7 +2627,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(Some(reg), 0)
});
let val_reg = self.into_reg(GPRType::Rq, &mut val).unwrap();
let val_reg = self.put_into_register(GPRType::Rq, &mut val).unwrap();
let vmctx = GPR::Rq(VMCTX);
// We always use `Rq` (even for floats) since the globals are not necessarily aligned to 128 bits
@ -2938,7 +2937,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let out_offset = -(self.block_state.depth.0 as i32 + 1);
match value {
ValueLocation::Reg(_) | ValueLocation::Immediate(_) | ValueLocation::Cond(_) => {
if let Some(gpr) = self.into_reg(GPRType::Rq, &mut value) {
if let Some(gpr) = self.put_into_register(GPRType::Rq, &mut value) {
dynasm!(self.asm
; push Rq(gpr.rq().unwrap())
);
@ -2968,7 +2967,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
fn push(&mut self, value: ValueLocation) {
if let Some(mut top) = self.block_state.stack.pop() {
if let ValueLocation::Cond(_) = top {
self.into_reg(I32, &mut top).unwrap();
self.put_into_register(I32, &mut top).unwrap();
}
self.block_state.stack.push(top);
@ -3005,25 +3004,29 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
fn free_value(&mut self, val: ValueLocation) {
match val {
ValueLocation::Reg(r) => {
self.block_state.regs.release(r);
}
// TODO: Refcounted stack slots
_ => {}
if let ValueLocation::Reg(r) = val {
self.block_state.regs.release(r);
}
}
/// Puts this value into a register so that it can be efficiently read
fn into_reg(&mut self, ty: impl Into<Option<GPRType>>, val: &mut ValueLocation) -> Option<GPR> {
let out = self.to_reg(ty, *val)?;
fn put_into_register(
&mut self,
ty: impl Into<Option<GPRType>>,
val: &mut ValueLocation,
) -> Option<GPR> {
let out = self.clone_to_register(ty, *val)?;
self.free_value(*val);
*val = ValueLocation::Reg(out);
Some(out)
}
/// Clones this value into a register so that it can be efficiently read
fn to_reg(&mut self, ty: impl Into<Option<GPRType>>, val: ValueLocation) -> Option<GPR> {
fn clone_to_register(
&mut self,
ty: impl Into<Option<GPRType>>,
val: ValueLocation,
) -> Option<GPR> {
let ty = ty.into();
match val {
ValueLocation::Reg(r) if ty.map(|t| t == r.type_()).unwrap_or(true) => {
@ -3042,21 +3045,25 @@ impl<'this, M: ModuleContext> Context<'this, M> {
/// Puts this value into a temporary register so that operations
/// on that register don't write to a local.
fn into_temp_reg(
fn put_into_temp_register(
&mut self,
ty: impl Into<Option<GPRType>>,
val: &mut ValueLocation,
) -> Option<GPR> {
let out = self.to_temp_reg(ty, *val)?;
let out = self.clone_to_temp_register(ty, *val)?;
self.free_value(*val);
*val = ValueLocation::Reg(out);
Some(out)
}
fn into_temp_loc(&mut self, ty: impl Into<Option<GPRType>>, val: &mut ValueLocation) -> CCLoc {
fn put_into_temp_location(
&mut self,
ty: impl Into<Option<GPRType>>,
val: &mut ValueLocation,
) -> CCLoc {
match val {
_ => {
if let Some(gpr) = self.into_temp_reg(ty, val) {
if let Some(gpr) = self.put_into_temp_register(ty, val) {
CCLoc::Reg(gpr)
} else {
let out = CCLoc::Stack(self.push_physical(*val).stack().unwrap());
@ -3069,7 +3076,11 @@ impl<'this, M: ModuleContext> Context<'this, M> {
/// Clones this value into a temporary register so that operations
/// on that register don't write to a local.
fn to_temp_reg(&mut self, ty: impl Into<Option<GPRType>>, val: ValueLocation) -> Option<GPR> {
fn clone_to_temp_register(
&mut self,
ty: impl Into<Option<GPRType>>,
val: ValueLocation,
) -> Option<GPR> {
// If we have `None` as the type then it always matches (`.unwrap_or(true)`)
match val {
ValueLocation::Reg(r) => {
@ -3087,7 +3098,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Some(scratch)
}
}
val => self.to_reg(ty, val),
val => self.clone_to_register(ty, val),
}
}
@ -3099,7 +3110,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee32::from_bits((-f32::from_bits(i.to_bits())).to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, &mut val).unwrap();
let reg = self.put_into_temp_register(GPRType::Rx, &mut val).unwrap();
let const_label = self.aligned_label(16, LabelValue::I32(SIGN_MASK_F32 as i32));
dynasm!(self.asm
@ -3120,7 +3131,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee64::from_bits((-f64::from_bits(i.to_bits())).to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, &mut val).unwrap();
let reg = self.put_into_temp_register(GPRType::Rx, &mut val).unwrap();
let const_label = self.aligned_label(16, LabelValue::I64(SIGN_MASK_F64 as i64));
dynasm!(self.asm
@ -3141,7 +3152,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee32::from_bits(f32::from_bits(i.to_bits()).abs().to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, &mut val).unwrap();
let reg = self.put_into_temp_register(GPRType::Rx, &mut val).unwrap();
let const_label = self.aligned_label(16, LabelValue::I32(REST_MASK_F32 as i32));
dynasm!(self.asm
@ -3162,7 +3173,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee64::from_bits(f64::from_bits(i.to_bits()).abs().to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, &mut val).unwrap();
let reg = self.put_into_temp_register(GPRType::Rx, &mut val).unwrap();
let const_label = self.aligned_label(16, LabelValue::I64(REST_MASK_F64 as i64));
dynasm!(self.asm
@ -3183,7 +3194,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee32::from_bits(f32::from_bits(i.to_bits()).sqrt().to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, &mut val).unwrap();
let reg = self.put_into_temp_register(GPRType::Rx, &mut val).unwrap();
dynasm!(self.asm
; sqrtss Rx(reg.rx().unwrap()), Rx(reg.rx().unwrap())
@ -3203,7 +3214,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee64::from_bits(f64::from_bits(i.to_bits()).sqrt().to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, &mut val).unwrap();
let reg = self.put_into_temp_register(GPRType::Rx, &mut val).unwrap();
dynasm!(self.asm
; sqrtsd Rx(reg.rx().unwrap()), Rx(reg.rx().unwrap())
@ -3227,8 +3238,8 @@ impl<'this, M: ModuleContext> Context<'this, M> {
.into(),
)
} else {
let lreg = self.into_temp_reg(GPRType::Rx, &mut left).unwrap();
let rreg = self.into_reg(GPRType::Rx, &mut right).unwrap();
let lreg = self.put_into_temp_register(GPRType::Rx, &mut left).unwrap();
let rreg = self.put_into_register(GPRType::Rx, &mut right).unwrap();
let sign_mask = self.aligned_label(16, LabelValue::I32(SIGN_MASK_F32 as i32));
let rest_mask = self.aligned_label(16, LabelValue::I32(REST_MASK_F32 as i32));
@ -3258,8 +3269,8 @@ impl<'this, M: ModuleContext> Context<'this, M> {
.into(),
)
} else {
let lreg = self.into_temp_reg(GPRType::Rx, &mut left).unwrap();
let rreg = self.into_reg(GPRType::Rx, &mut right).unwrap();
let lreg = self.put_into_temp_register(GPRType::Rx, &mut left).unwrap();
let rreg = self.put_into_register(GPRType::Rx, &mut right).unwrap();
let sign_mask = self.aligned_label(16, LabelValue::I64(SIGN_MASK_F64 as i64));
let rest_mask = self.aligned_label(16, LabelValue::I64(REST_MASK_F64 as i64));
@ -3308,7 +3319,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let reg = self.into_reg(GPRType::Rq, &mut val).unwrap();
let reg = self.put_into_register(GPRType::Rq, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
if is_x86_feature_detected!("lzcnt") {
@ -3364,7 +3375,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let reg = self.into_reg(GPRType::Rq, &mut val).unwrap();
let reg = self.put_into_register(GPRType::Rq, &mut val).unwrap();
let temp = self.take_reg(I64).unwrap();
if is_x86_feature_detected!("lzcnt") {
@ -3418,7 +3429,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let reg = self.into_reg(GPRType::Rq, &mut val).unwrap();
let reg = self.put_into_register(GPRType::Rq, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
if is_x86_feature_detected!("lzcnt") {
@ -3470,7 +3481,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
}
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let reg = self.into_reg(GPRType::Rq, &mut val).unwrap();
let reg = self.put_into_register(GPRType::Rq, &mut val).unwrap();
let temp = self.take_reg(I64).unwrap();
dynasm!(self.asm
@ -3598,11 +3609,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f32::from_bits(imm.as_f32().unwrap().to_bits()) as i32).into(),
),
_ => {
let reg = self.into_reg(F32, &mut val).unwrap();
let reg = self.put_into_register(F32, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
let sign_mask = self.aligned_label(4, LabelValue::I32(SIGN_MASK_F32 as i32));
let float_cmp_mask = self.aligned_label(16, LabelValue::I32(0xcf000000u32 as i32));
let float_cmp_mask =
self.aligned_label(16, LabelValue::I32(0xCF00_0000_u32 as i32));
let zero = self.aligned_label(16, LabelValue::I32(0));
let trap_label = self.trap_label();
@ -3636,11 +3648,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f32::from_bits(imm.as_f32().unwrap().to_bits()) as i32).into(),
),
_ => {
let reg = self.into_temp_reg(F32, &mut val).unwrap();
let reg = self.put_into_temp_register(F32, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
let sign_mask = self.aligned_label(4, LabelValue::I32(SIGN_MASK_F32 as i32));
let float_cmp_mask = self.aligned_label(16, LabelValue::I32(0x4f000000u32 as i32));
let float_cmp_mask =
self.aligned_label(16, LabelValue::I32(0x4F00_0000_u32 as i32));
let trap_label = self.trap_label();
dynasm!(self.asm
@ -3677,12 +3690,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f64::from_bits(imm.as_f64().unwrap().to_bits()) as i32).into(),
),
_ => {
let reg = self.into_reg(F32, &mut val).unwrap();
let reg = self.put_into_register(F32, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
let sign_mask = self.aligned_label(4, LabelValue::I32(SIGN_MASK_F32 as i32));
let float_cmp_mask =
self.aligned_label(16, LabelValue::I64(0xc1e0000000200000u64 as i64));
self.aligned_label(16, LabelValue::I64(0xC1E0_0000_0020_0000_u64 as i64));
let zero = self.aligned_label(16, LabelValue::I64(0));
let trap_label = self.trap_label();
@ -3716,12 +3729,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f64::from_bits(imm.as_f64().unwrap().to_bits()) as u32).into(),
),
_ => {
let reg = self.into_temp_reg(F32, &mut val).unwrap();
let reg = self.put_into_temp_register(F32, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
let sign_mask = self.aligned_label(4, LabelValue::I32(SIGN_MASK_F32 as i32));
let float_cmp_mask =
self.aligned_label(16, LabelValue::I64(0x41e0000000000000u64 as i64));
self.aligned_label(16, LabelValue::I64(0x41E0_0000_0000_0000_u64 as i64));
let trap_label = self.trap_label();
dynasm!(self.asm
@ -3807,11 +3820,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f32::from_bits(imm.as_f32().unwrap().to_bits()) as i64).into(),
),
_ => {
let reg = self.into_temp_reg(F32, &mut val).unwrap();
let reg = self.put_into_temp_register(F32, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
let sign_mask = self.aligned_label(16, LabelValue::I64(SIGN_MASK_F64 as i64));
let float_cmp_mask = self.aligned_label(16, LabelValue::I32(0xdf000000u32 as i32));
let float_cmp_mask =
self.aligned_label(16, LabelValue::I32(0xDF00_0000_u32 as i32));
let zero = self.aligned_label(16, LabelValue::I64(0));
let trap_label = self.trap_label();
@ -3845,12 +3859,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f64::from_bits(imm.as_f64().unwrap().to_bits()) as i64).into(),
),
_ => {
let reg = self.into_reg(F32, &mut val).unwrap();
let reg = self.put_into_register(F32, &mut val).unwrap();
let temp = self.take_reg(I32).unwrap();
let sign_mask = self.aligned_label(8, LabelValue::I64(SIGN_MASK_F64 as i64));
let float_cmp_mask =
self.aligned_label(16, LabelValue::I64(0xc3e0000000000000u64 as i64));
self.aligned_label(16, LabelValue::I64(0xC3E0_0000_0000_0000_u64 as i64));
let zero = self.aligned_label(16, LabelValue::I64(0));
let trap_label = self.trap_label();
@ -3884,11 +3898,11 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f32::from_bits(imm.as_f32().unwrap().to_bits()) as u64).into(),
),
_ => {
let reg = self.into_reg(F32, &mut val).unwrap();
let reg = self.put_into_register(F32, &mut val).unwrap();
let temp = self.take_reg(I64).unwrap();
let sign_mask = self.aligned_label(16, LabelValue::I64(SIGN_MASK_F64 as i64));
let u64_trunc_f32_const = self.aligned_label(16, LabelValue::I32(0x5F000000));
let u64_trunc_f32_const = self.aligned_label(16, LabelValue::I32(0x5F00_0000_i32));
let trap_label = self.trap_label();
dynasm!(self.asm
@ -3925,12 +3939,12 @@ impl<'this, M: ModuleContext> Context<'this, M> {
(f64::from_bits(imm.as_f64().unwrap().to_bits()) as u64).into(),
),
_ => {
let reg = self.into_reg(F64, &mut val).unwrap();
let reg = self.put_into_register(F64, &mut val).unwrap();
let temp = self.take_reg(I64).unwrap();
let sign_mask = self.aligned_label(16, LabelValue::I64(SIGN_MASK_F64 as i64));
let u64_trunc_f64_const =
self.aligned_label(16, LabelValue::I64(0x43e0000000000000));
self.aligned_label(16, LabelValue::I64(0x43E0_0000_0000_0000_i64));
let trap_label = self.trap_label();
dynasm!(self.asm
@ -3967,7 +3981,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee32::from_bits((imm.as_i32().unwrap() as u32 as f32).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I32, &mut val).unwrap();
let reg = self.put_into_register(I32, &mut val).unwrap();
let temp = self.take_reg(F32).unwrap();
@ -3993,7 +4007,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee64::from_bits((imm.as_i32().unwrap() as u32 as f64).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I32, &mut val).unwrap();
let reg = self.put_into_register(I32, &mut val).unwrap();
let temp = self.take_reg(F64).unwrap();
dynasm!(self.asm
@ -4018,7 +4032,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee32::from_bits((imm.as_i64().unwrap() as u64 as f32).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I64, &mut val).unwrap();
let reg = self.put_into_register(I64, &mut val).unwrap();
let out = self.take_reg(F32).unwrap();
let temp = self.take_reg(I64).unwrap();
@ -4056,7 +4070,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
Ieee64::from_bits((imm.as_i64().unwrap() as u64 as f64).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I64, &mut val).unwrap();
let reg = self.put_into_register(I64, &mut val).unwrap();
let out = self.take_reg(F32).unwrap();
let temp = self.take_reg(I64).unwrap();
@ -4502,7 +4516,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
);
}
ValueLocation::Immediate(_) | ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let r = this.into_reg(I32, divisor).unwrap();
let r = this.put_into_register(I32, divisor).unwrap();
dynasm!(this.asm
; xor edx, edx
; div Rd(r.rq().unwrap())
@ -4529,7 +4543,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
);
}
ValueLocation::Immediate(_) | ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let r = this.into_reg(I32, divisor).unwrap();
let r = this.put_into_register(I32, divisor).unwrap();
dynasm!(this.asm
; cdq
; idiv Rd(r.rq().unwrap())
@ -4556,7 +4570,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
);
}
ValueLocation::Immediate(_) | ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let r = this.into_reg(I64, divisor).unwrap();
let r = this.put_into_register(I64, divisor).unwrap();
dynasm!(this.asm
; xor rdx, rdx
; div Rq(r.rq().unwrap())
@ -4583,7 +4597,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
);
}
ValueLocation::Immediate(_) | ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let r = this.into_reg(I64, divisor).unwrap();
let r = this.put_into_register(I64, divisor).unwrap();
dynasm!(this.asm
; cqo
; idiv Rq(r.rq().unwrap())
@ -4620,8 +4634,8 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let out = match right {
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let rreg = self.into_reg(I32, &mut right).unwrap();
let lreg = self.into_temp_reg(I32, &mut left).unwrap();
let rreg = self.put_into_register(I32, &mut right).unwrap();
let lreg = self.put_into_temp_register(I32, &mut left).unwrap();
dynasm!(self.asm
; imul Rd(lreg.rq().unwrap()), Rd(rreg.rq().unwrap())
);
@ -4630,14 +4644,14 @@ impl<'this, M: ModuleContext> Context<'this, M> {
ValueLocation::Stack(offset) => {
let offset = self.adjusted_offset(offset);
let lreg = self.into_temp_reg(I32, &mut left).unwrap();
let lreg = self.put_into_temp_register(I32, &mut left).unwrap();
dynasm!(self.asm
; imul Rd(lreg.rq().unwrap()), [rsp + offset]
);
left
}
ValueLocation::Immediate(i) => {
let lreg = self.into_reg(I32, &mut left).unwrap();
let lreg = self.put_into_register(I32, &mut left).unwrap();
let new_reg = self.take_reg(I32).unwrap();
dynasm!(self.asm
; imul Rd(new_reg.rq().unwrap()), Rd(lreg.rq().unwrap()), i.as_i32().unwrap()
@ -4679,8 +4693,8 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let out = match right {
ValueLocation::Reg(_) | ValueLocation::Cond(_) => {
let rreg = self.into_reg(I64, &mut right).unwrap();
let lreg = self.into_temp_reg(I64, &mut left).unwrap();
let rreg = self.put_into_register(I64, &mut right).unwrap();
let lreg = self.put_into_temp_register(I64, &mut left).unwrap();
dynasm!(self.asm
; imul Rq(lreg.rq().unwrap()), Rq(rreg.rq().unwrap())
);
@ -4689,7 +4703,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
ValueLocation::Stack(offset) => {
let offset = self.adjusted_offset(offset);
let lreg = self.into_temp_reg(I64, &mut left).unwrap();
let lreg = self.put_into_temp_register(I64, &mut left).unwrap();
dynasm!(self.asm
; imul Rq(lreg.rq().unwrap()), [rsp + offset]
);
@ -4697,9 +4711,9 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
ValueLocation::Immediate(i) => {
let i = i.as_i64().unwrap();
if let Some(i) = i.try_into().ok() {
if let Ok(i) = i.try_into() {
let new_reg = self.take_reg(I64).unwrap();
let lreg = self.into_reg(I64, &mut left).unwrap();
let lreg = self.put_into_register(I64, &mut left).unwrap();
dynasm!(self.asm
; imul Rq(new_reg.rq().unwrap()), Rq(lreg.rq().unwrap()), i
@ -4709,8 +4723,8 @@ impl<'this, M: ModuleContext> Context<'this, M> {
ValueLocation::Reg(new_reg)
} else {
let rreg = self.into_reg(I64, &mut right).unwrap();
let lreg = self.into_temp_reg(I64, &mut left).unwrap();
let rreg = self.put_into_register(I64, &mut right).unwrap();
let lreg = self.put_into_temp_register(I64, &mut left).unwrap();
dynasm!(self.asm
; imul Rq(lreg.rq().unwrap()), Rq(rreg.rq().unwrap())
);
@ -4856,7 +4870,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let cond_code = match cond {
ValueLocation::Cond(cc) => cc,
_ => {
let cond_reg = self.into_reg(I32, &mut cond).unwrap();
let cond_reg = self.put_into_register(I32, &mut cond).unwrap();
dynasm!(self.asm
; test Rd(cond_reg.rq().unwrap()), Rd(cond_reg.rq().unwrap())
);
@ -4869,13 +4883,13 @@ impl<'this, M: ModuleContext> Context<'this, M> {
let else_ = if let ValueLocation::Stack(offset) = else_ {
CCLoc::Stack(offset)
} else {
CCLoc::Reg(self.into_reg(I32, &mut else_).unwrap())
CCLoc::Reg(self.put_into_register(I32, &mut else_).unwrap())
};
let then = if let ValueLocation::Stack(offset) = then {
CCLoc::Stack(offset)
} else {
CCLoc::Reg(self.into_reg(I32, &mut then).unwrap())
CCLoc::Reg(self.put_into_register(I32, &mut then).unwrap())
};
let out_gpr = match (then, else_) {
@ -4909,14 +4923,9 @@ impl<'this, M: ModuleContext> Context<'this, M> {
pub fn pick(&mut self, depth: u32) {
let idx = self.block_state.stack.len() - 1 - depth as usize;
let v = self.block_state.stack[idx];
match v {
ValueLocation::Reg(r) => {
self.block_state.regs.mark_used(r);
}
_ => {}
if let ValueLocation::Reg(r) = v {
self.block_state.regs.mark_used(r);
}
self.block_state.stack.push(v);
}
@ -4959,7 +4968,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
);
let temp = self.take_reg(I64).unwrap();
dynasm!(self.asm
; mov Rq(temp.rq().unwrap()), QWORD 0xdeadbeefdeadbeefu64 as i64
; mov Rq(temp.rq().unwrap()), QWORD 0xDEAD_BEEF_DEAD_BEEF_u64 as i64
; call Rq(temp.rq().unwrap())
);
self.block_state.regs.release(temp);
@ -5180,7 +5189,7 @@ impl<'this, M: ModuleContext> Context<'this, M> {
}
let mut callee = self.pop();
let callee_reg = self.into_temp_reg(I32, &mut callee).unwrap();
let callee_reg = self.put_into_temp_register(I32, &mut callee).unwrap();
for &loc in &locs {
if let CCLoc::Reg(r) = loc {

14
crates/lightbeam/src/function_body.rs

@ -1,13 +1,15 @@
use crate::backend::{
ret_locs, BlockCallingConvention, CodeGenSession, Context, Label, Registers, ValueLocation,
VirtualCallingConvention,
ret_locs, BlockCallingConvention, CodeGenSession, Context, Label, VirtualCallingConvention,
};
#[cfg(debug_assertions)]
use crate::backend::{Registers, ValueLocation};
use crate::error::Error;
use crate::microwasm::*;
use crate::module::{ModuleContext, SigType, Signature};
use cranelift_codegen::binemit;
use dynasmrt::DynasmApi;
use either::{Either, Left, Right};
#[cfg(debug_assertions)]
use more_asserts::assert_ge;
use multi_mut::HashMapMultiMut;
use std::{collections::HashMap, hash::Hash};
@ -779,8 +781,8 @@ where
Operator::Load { ty: F32, memarg } => ctx.f32_load(memarg.offset),
Operator::Load { ty: I64, memarg } => ctx.i64_load(memarg.offset),
Operator::Load { ty: F64, memarg } => ctx.f64_load(memarg.offset),
Operator::Store8 { ty: _, memarg } => ctx.store8(memarg.offset),
Operator::Store16 { ty: _, memarg } => ctx.store16(memarg.offset),
Operator::Store8 { memarg, .. } => ctx.store8(memarg.offset),
Operator::Store16 { memarg, .. } => ctx.store16(memarg.offset),
Operator::Store32 { memarg }
| Operator::Store { ty: I32, memarg }
| Operator::Store { ty: F32, memarg } => ctx.store32(memarg.offset),
@ -792,10 +794,10 @@ where
Operator::Select => {
ctx.select();
}
Operator::MemorySize { reserved: _ } => {
Operator::MemorySize { .. } => {
ctx.memory_size();
}
Operator::MemoryGrow { reserved: _ } => {
Operator::MemoryGrow { .. } => {
ctx.memory_grow();
}
Operator::Call { function_index } => {

120
crates/lightbeam/src/microwasm.rs

@ -283,18 +283,14 @@ impl SignlessType {
Type::I64 => Ok(I64),
Type::F32 => Ok(F32),
Type::F64 => Ok(F64),
Type::EmptyBlockType => {
return Err(BinaryReaderError {
message: "SignlessType with EmptyBlockType",
offset: -1isize as usize,
})
}
_ => {
return Err(BinaryReaderError {
message: "SignlessType unimplemented",
offset: -1isize as usize,
})
}
Type::EmptyBlockType => Err(BinaryReaderError {
message: "SignlessType with EmptyBlockType",
offset: -1isize as usize,
}),
_ => Err(BinaryReaderError {
message: "SignlessType unimplemented",
offset: -1isize as usize,
}),
}
}
}
@ -304,12 +300,10 @@ fn create_returns_from_wasm_type(
) -> Result<Vec<SignlessType>, BinaryReaderError> {
match ty {
wasmparser::TypeOrFuncType::Type(ty) => Ok(Vec::from_iter(Type::from_wasm(ty))),
wasmparser::TypeOrFuncType::FuncType(_) => {
return Err(BinaryReaderError {
message: "Unsupported func type",
offset: -1isize as usize,
})
}
wasmparser::TypeOrFuncType::FuncType(_) => Err(BinaryReaderError {
message: "Unsupported func type",
offset: -1isize as usize,
}),
}
}
@ -1184,26 +1178,26 @@ where
// `Select` pops 3 elements and pushes 1
WasmOperator::Select => sig!((T, T, I32) -> (T)),
WasmOperator::GetLocal { local_index } => {
WasmOperator::LocalGet { local_index } => {
let ty = self.stack[*local_index as usize];
sig!(() -> (ty))
}
WasmOperator::SetLocal { local_index } => {
WasmOperator::LocalSet { local_index } => {
let ty = self.stack[*local_index as usize];
sig!((ty) -> ())
}
WasmOperator::TeeLocal { local_index } => {
WasmOperator::LocalTee { local_index } => {
let ty = self.stack[*local_index as usize];
sig!((ty) -> (ty))
}
WasmOperator::GetGlobal { global_index } => {
WasmOperator::GlobalGet { global_index } => {
sig!(() -> (self.module.global_type(*global_index).to_microwasm_type()))
}
WasmOperator::SetGlobal { global_index } => {
WasmOperator::GlobalSet { global_index } => {
sig!((self.module.global_type(*global_index).to_microwasm_type()) -> ())
}
@ -1366,16 +1360,16 @@ where
| WasmOperator::F64Copysign => sig!((F64, F64) -> (F64)),
WasmOperator::I32WrapI64 => sig!((I64) -> (I32)),
WasmOperator::I32TruncSF32 | WasmOperator::I32TruncUF32 => sig!((F32) -> (I32)),
WasmOperator::I32TruncSF64 | WasmOperator::I32TruncUF64 => sig!((F64) -> (I32)),
WasmOperator::I64ExtendSI32 | WasmOperator::I64ExtendUI32 => sig!((I32) -> (I64)),
WasmOperator::I64TruncSF32 | WasmOperator::I64TruncUF32 => sig!((F32) -> (I64)),
WasmOperator::I64TruncSF64 | WasmOperator::I64TruncUF64 => sig!((F64) -> (I64)),
WasmOperator::F32ConvertSI32 | WasmOperator::F32ConvertUI32 => sig!((I32) -> (F32)),
WasmOperator::F32ConvertSI64 | WasmOperator::F32ConvertUI64 => sig!((I64) -> (F32)),
WasmOperator::I32TruncF32S | WasmOperator::I32TruncF32U => sig!((F32) -> (I32)),
WasmOperator::I32TruncF64S | WasmOperator::I32TruncF64U => sig!((F64) -> (I32)),
WasmOperator::I64ExtendI32S | WasmOperator::I64ExtendI32U => sig!((I32) -> (I64)),
WasmOperator::I64TruncF32S | WasmOperator::I64TruncF32U => sig!((F32) -> (I64)),
WasmOperator::I64TruncF64S | WasmOperator::I64TruncF64U => sig!((F64) -> (I64)),
WasmOperator::F32ConvertI32S | WasmOperator::F32ConvertI32U => sig!((I32) -> (F32)),
WasmOperator::F32ConvertI64S | WasmOperator::F32ConvertI64U => sig!((I64) -> (F32)),
WasmOperator::F32DemoteF64 => sig!((F64) -> (F32)),
WasmOperator::F64ConvertSI32 | WasmOperator::F64ConvertUI32 => sig!((I32) -> (F64)),
WasmOperator::F64ConvertSI64 | WasmOperator::F64ConvertUI64 => sig!((I64) -> (F64)),
WasmOperator::F64ConvertI32S | WasmOperator::F64ConvertI32U => sig!((I32) -> (F64)),
WasmOperator::F64ConvertI64S | WasmOperator::F64ConvertI64U => sig!((I64) -> (F64)),
WasmOperator::F64PromoteF32 => sig!((F32) -> (F64)),
WasmOperator::I32ReinterpretF32 => sig!((F32) -> (I32)),
WasmOperator::I64ReinterpretF64 => sig!((F64) -> (I64)),
@ -1893,7 +1887,7 @@ where
WasmOperator::Drop => smallvec![Operator::Drop(0..=0)],
WasmOperator::Select => smallvec![Operator::Select],
WasmOperator::GetLocal { local_index } => {
WasmOperator::LocalGet { local_index } => {
// `- 1` because we apply the stack difference _before_ this point
let depth = self.local_depth(local_index).checked_sub(1)?;
let depth = match depth.try_into() {
@ -1907,7 +1901,7 @@ where
};
smallvec![Operator::Pick(depth)]
}
WasmOperator::SetLocal { local_index } => {
WasmOperator::LocalSet { local_index } => {
// `+ 1` because we apply the stack difference _before_ this point
let depth = self.local_depth(local_index).checked_add(1)?;
let depth = match depth.try_into() {
@ -1921,7 +1915,7 @@ where
};
smallvec![Operator::Swap(depth), Operator::Drop(0..=0)]
}
WasmOperator::TeeLocal { local_index } => {
WasmOperator::LocalTee { local_index } => {
// `+ 1` because we `pick` before `swap`
let depth = self.local_depth(local_index).checked_add(1)?;
let depth = match depth.try_into() {
@ -1939,10 +1933,10 @@ where
Operator::Drop(0..=0),
]
}
WasmOperator::GetGlobal { global_index } => {
WasmOperator::GlobalGet { global_index } => {
smallvec![Operator::GetGlobal(global_index)]
}
WasmOperator::SetGlobal { global_index } => {
WasmOperator::GlobalSet { global_index } => {
smallvec![Operator::SetGlobal(global_index)]
}
@ -2165,73 +2159,73 @@ where
WasmOperator::F64Max => smallvec![Operator::Max(Size::_64)],
WasmOperator::F64Copysign => smallvec![Operator::Copysign(Size::_64)],
WasmOperator::I32WrapI64 => smallvec![Operator::I32WrapFromI64],
WasmOperator::I32TruncSF32 => smallvec![Operator::ITruncFromF {
WasmOperator::I32TruncF32S => smallvec![Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::I32
}],
WasmOperator::I32TruncUF32 => smallvec![Operator::ITruncFromF {
WasmOperator::I32TruncF32U => smallvec![Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::U32
}],
WasmOperator::I32TruncSF64 => smallvec![Operator::ITruncFromF {
WasmOperator::I32TruncF64S => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::I32
}],
WasmOperator::I32TruncUF64 => smallvec![Operator::ITruncFromF {
WasmOperator::I32TruncF64U => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::U32
}],
WasmOperator::I64ExtendSI32 => smallvec![Operator::Extend {
WasmOperator::I64ExtendI32S => smallvec![Operator::Extend {
sign: Signedness::Signed
}],
WasmOperator::I64ExtendUI32 => smallvec![Operator::Extend {
WasmOperator::I64ExtendI32U => smallvec![Operator::Extend {
sign: Signedness::Unsigned
}],
WasmOperator::I64TruncSF32 => smallvec![Operator::ITruncFromF {
WasmOperator::I64TruncF32S => smallvec![Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::I64,
}],
WasmOperator::I64TruncUF32 => smallvec![Operator::ITruncFromF {
WasmOperator::I64TruncF32U => smallvec![Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::U64,
}],
WasmOperator::I64TruncSF64 => smallvec![Operator::ITruncFromF {
WasmOperator::I64TruncF64S => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::I64,
}],
WasmOperator::I64TruncUF64 => smallvec![Operator::ITruncFromF {
WasmOperator::I64TruncF64U => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::U64,
}],
WasmOperator::F32ConvertSI32 => smallvec![Operator::FConvertFromI {
WasmOperator::F32ConvertI32S => smallvec![Operator::FConvertFromI {
input_ty: sint::I32,
output_ty: Size::_32
}],
WasmOperator::F32ConvertUI32 => smallvec![Operator::FConvertFromI {
WasmOperator::F32ConvertI32U => smallvec![Operator::FConvertFromI {
input_ty: sint::U32,
output_ty: Size::_32
}],
WasmOperator::F32ConvertSI64 => smallvec![Operator::FConvertFromI {
WasmOperator::F32ConvertI64S => smallvec![Operator::FConvertFromI {
input_ty: sint::I64,
output_ty: Size::_32
}],
WasmOperator::F32ConvertUI64 => smallvec![Operator::FConvertFromI {
WasmOperator::F32ConvertI64U => smallvec![Operator::FConvertFromI {
input_ty: sint::U64,
output_ty: Size::_32
}],
WasmOperator::F64ConvertSI32 => smallvec![Operator::FConvertFromI {
WasmOperator::F64ConvertI32S => smallvec![Operator::FConvertFromI {
input_ty: sint::I32,
output_ty: Size::_64
}],
WasmOperator::F64ConvertUI32 => smallvec![Operator::FConvertFromI {
WasmOperator::F64ConvertI32U => smallvec![Operator::FConvertFromI {
input_ty: sint::U32,
output_ty: Size::_64
}],
WasmOperator::F64ConvertSI64 => smallvec![Operator::FConvertFromI {
WasmOperator::F64ConvertI64S => smallvec![Operator::FConvertFromI {
input_ty: sint::I64,
output_ty: Size::_64
}],
WasmOperator::F64ConvertUI64 => smallvec![Operator::FConvertFromI {
WasmOperator::F64ConvertI64U => smallvec![Operator::FConvertFromI {
input_ty: sint::U64,
output_ty: Size::_64
}],
@ -2274,49 +2268,49 @@ where
// 0xFC operators
// Non-trapping Float-to-int Conversions
WasmOperator::I32TruncSSatF32 => {
WasmOperator::I32TruncSatF32S => {
return Some(Err(BinaryReaderError {
message: "I32TruncSSatF32 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I32TruncUSatF32 => {
WasmOperator::I32TruncSatF32U => {
return Some(Err(BinaryReaderError {
message: "I32TruncUSatF32 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I32TruncSSatF64 => {
WasmOperator::I32TruncSatF64S => {
return Some(Err(BinaryReaderError {
message: "I32TruncSSatF64 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I32TruncUSatF64 => {
WasmOperator::I32TruncSatF64U => {
return Some(Err(BinaryReaderError {
message: "I32TruncUSatF64 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I64TruncSSatF32 => {
WasmOperator::I64TruncSatF32S => {
return Some(Err(BinaryReaderError {
message: "I64TruncSSatF32 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I64TruncUSatF32 => {
WasmOperator::I64TruncSatF32U => {
return Some(Err(BinaryReaderError {
message: "I64TruncUSatF32 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I64TruncSSatF64 => {
WasmOperator::I64TruncSatF64S => {
return Some(Err(BinaryReaderError {
message: "I64TruncSSatF64 unimplemented",
offset: -1isize as usize,
}))
}
WasmOperator::I64TruncUSatF64 => {
WasmOperator::I64TruncSatF64U => {
return Some(Err(BinaryReaderError {
message: "I64TruncUSatF64 unimplemented",
offset: -1isize as usize,

8
crates/lightbeam/src/module.rs

@ -148,8 +148,12 @@ pub struct ExecutableModule {
}
impl ExecutableModule {
/// Executes the function _without checking types_. This can cause undefined
/// memory to be accessed.
/// Executes the function identified by `func_idx`.
///
/// # Safety
///
/// Executes the function _without_ checking the argument types.
/// This can cause undefined memory to be accessed.
pub unsafe fn execute_func_unchecked<Args: FunctionArgs<T>, T>(
&self,
func_idx: u32,

Loading…
Cancel
Save