Browse Source

wiggle: Refactor with fewer raw pointers (#5268)

This commit refactors the internals of `wiggle` to have fewer raw pointers and more liberally use `&[UnsafeCell<_>]`. The purpose of this refactoring is to more strictly thread through lifetime information throughout the crate to avoid getting it wrong. Additionally storing `UnsafeCell<T>` at rest pushes the unsafety of access to the leaves of modifications where Rust safety guarantees are upheld. Finally this provides what I believe is a safer internal representation of `WasmtimeGuestMemory` since it technically holds onto `&mut [u8]` un-soundly as other `&mut T` pointers are handed out.

Additionally generated `GuestTypeTransparent` impls in the `wiggle` macro were removed because they are not safe for shared memories as-is and otherwise aren't needed for WASI today. The trait has been updated to indicate that all bit patterns must be valid in addition to having the same representation on the host as in the guest to accomodate this.
pull/5054/head
Alex Crichton 2 years ago
committed by GitHub
parent
commit
6dcdabf37e
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 10
      crates/wiggle/generate/src/types/flags.rs
  2. 8
      crates/wiggle/generate/src/types/handle.rs
  3. 28
      crates/wiggle/generate/src/types/record.rs
  4. 2
      crates/wiggle/generate/src/wasmtime.rs
  5. 141
      crates/wiggle/src/guest_type.rs
  6. 221
      crates/wiggle/src/lib.rs
  7. 34
      crates/wiggle/src/wasmtime.rs
  8. 13
      crates/wiggle/test-helpers/src/lib.rs

10
crates/wiggle/generate/src/types/flags.rs

@ -87,15 +87,5 @@ pub(super) fn define_flags(
#repr::write(&location.cast(), val)
}
}
unsafe impl<'a> #rt::GuestTypeTransparent<'a> for #ident {
#[inline]
fn validate(location: *mut #ident) -> Result<(), #rt::GuestError> {
use std::convert::TryFrom;
// Validate value in memory using #ident::try_from(reprval)
let reprval = unsafe { (location as *mut #repr).read() };
let _val = #ident::try_from(reprval)?;
Ok(())
}
}
}
}

8
crates/wiggle/generate/src/types/handle.rs

@ -70,14 +70,6 @@ pub(super) fn define_handle(
u32::write(&location.cast(), val.0)
}
}
unsafe impl<'a> #rt::GuestTypeTransparent<'a> for #ident {
#[inline]
fn validate(_location: *mut #ident) -> Result<(), #rt::GuestError> {
// All bit patterns accepted
Ok(())
}
}
}
}

28
crates/wiggle/generate/src/types/record.rs

@ -85,32 +85,6 @@ pub(super) fn define_struct(
(quote!(), quote!(, PartialEq))
};
let transparent = if s.is_transparent() {
let member_validate = s.member_layout().into_iter().map(|ml| {
let offset = ml.offset;
let typename = names.type_ref(&ml.member.tref, anon_lifetime());
quote! {
// SAFETY: caller has validated bounds and alignment of `location`.
// member_layout gives correctly-aligned pointers inside that area.
#typename::validate(
unsafe { (location as *mut u8).add(#offset) as *mut _ }
)?;
}
});
quote! {
unsafe impl<'a> #rt::GuestTypeTransparent<'a> for #ident {
#[inline]
fn validate(location: *mut #ident) -> Result<(), #rt::GuestError> {
#(#member_validate)*
Ok(())
}
}
}
} else {
quote!()
};
quote! {
#[derive(Clone, Debug #extra_derive)]
pub struct #ident #struct_lifetime {
@ -136,8 +110,6 @@ pub(super) fn define_struct(
Ok(())
}
}
#transparent
}
}

2
crates/wiggle/generate/src/wasmtime.rs

@ -118,7 +118,7 @@ fn generate_func(
};
let (mem , ctx) = mem.data_and_store_mut(&mut caller);
let ctx = get_cx(ctx);
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem, false);
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem);
Ok(<#ret_ty>::from(#abi_func(ctx, &mem #(, #arg_names)*) #await_ ?))
};

141
crates/wiggle/src/guest_type.rs

@ -1,4 +1,4 @@
use crate::{region::Region, GuestError, GuestPtr};
use crate::{GuestError, GuestPtr};
use std::mem;
use std::sync::atomic::{
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering,
@ -51,17 +51,12 @@ pub trait GuestType<'a>: Sized {
/// as in Rust. These types can be used with the `GuestPtr::as_slice` method to
/// view as a slice.
///
/// Unsafe trait because a correct GuestTypeTransparent implemengation ensures that the
/// GuestPtr::as_slice methods are safe. This trait should only ever be implemented
/// by wiggle_generate-produced code.
pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> {
/// Checks that the memory at `ptr` is a valid representation of `Self`.
///
/// Assumes that memory safety checks have already been performed: `ptr`
/// has been checked to be aligned correctly and reside in memory using
/// `GuestMemory::validate_size_align`
fn validate(ptr: *mut Self) -> Result<(), GuestError>;
}
/// Unsafe trait because a correct `GuestTypeTransparent` implementation ensures
/// that the `GuestPtr::as_slice` methods are safe, notably that the
/// representation on the host matches the guest and all bit patterns are
/// valid. This trait should only ever be implemented by
/// wiggle_generate-produced code.
pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> {}
macro_rules! integer_primitives {
($([$ty:ident, $ty_atomic:ident],)*) => ($(
@ -71,71 +66,55 @@ macro_rules! integer_primitives {
#[inline]
fn read(ptr: &GuestPtr<'a, Self>) -> Result<Self, GuestError> {
// Any bit pattern for any primitive implemented with this
// macro is safe, so our `validate_size_align` method will
// guarantee that if we are given a pointer it's valid for the
// size of our type as well as properly aligned. Consequently we
// should be able to safely ready the pointer just after we
// validated it, returning it along here.
// Use `validate_size_align` to validate offset and alignment
// internally. The `host_ptr` type will be `&UnsafeCell<Self>`
// indicating that the memory is valid, and next safety checks
// are required to access it.
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
offset,
Self::guest_align(),
size,
)?;
let region = Region {
start: offset,
len: size,
};
let (host_ptr, region) = super::validate_size_align::<Self>(ptr.mem(), offset, 1)?;
let host_ptr = &host_ptr[0];
// If this memory is mutable borrowed then it cannot be read
// here, so skip this operation.
//
// Note that shared memories don't allow borrows and other
// shared borrows are ok to overlap with this.
if ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
Ok($ty::from_le(atomic_value_ref.load(Ordering::Relaxed)))
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
let val = atomic_value_ref.load(Ordering::Relaxed);
// And as a final operation convert from the little-endian wasm
// value to a native-endian value for the host.
Ok($ty::from_le(val))
}
#[inline]
fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> {
// See `read` above for various checks here.
let val = val.to_le();
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
offset,
Self::guest_align(),
size,
)?;
let region = Region {
start: offset,
len: size,
};
let (host_ptr, region) = super::validate_size_align::<Self>(ptr.mem(), offset, 1)?;
let host_ptr = &host_ptr[0];
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
atomic_value_ref.store(val.to_le(), Ordering::Relaxed);
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
atomic_value_ref.store(val, Ordering::Relaxed);
Ok(())
}
}
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
#[inline]
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
// All bit patterns are safe, nothing to do here
Ok(())
}
}
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {}
)*)
}
@ -148,73 +127,45 @@ macro_rules! float_primitives {
#[inline]
fn read(ptr: &GuestPtr<'a, Self>) -> Result<Self, GuestError> {
// Any bit pattern for any primitive implemented with this
// macro is safe, so our `validate_size_align` method will
// guarantee that if we are given a pointer it's valid for the
// size of our type as well as properly aligned. Consequently we
// should be able to safely ready the pointer just after we
// validated it, returning it along here.
// For more commentary see `read` for integers
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
let (host_ptr, region) = super::validate_size_align::<$ty_unsigned>(
ptr.mem(),
offset,
Self::guest_align(),
size,
1,
)?;
let region = Region {
start: offset,
len: size,
};
let host_ptr = &host_ptr[0];
if ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
let value = $ty_unsigned::from_le(atomic_value_ref.load(Ordering::Relaxed));
Ok($ty::from_bits(value))
}
#[inline]
fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> {
// For more commentary see `read`/`write` for integers.
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
let (host_ptr, region) = super::validate_size_align::<$ty_unsigned>(
ptr.mem(),
offset,
Self::guest_align(),
size,
1,
)?;
let region = Region {
start: offset,
len: size,
};
let host_ptr = &host_ptr[0];
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
let le_value = $ty_unsigned::to_le(val.to_bits());
atomic_value_ref.store(le_value, Ordering::Relaxed);
Ok(())
}
}
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
#[inline]
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
// All bit patterns are safe, nothing to do here
Ok(())
}
}
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {}
)*)
}

221
crates/wiggle/src/lib.rs

@ -1,5 +1,7 @@
use anyhow::{bail, Result};
use std::cell::UnsafeCell;
use std::fmt;
use std::mem;
use std::slice;
use std::str;
use std::sync::Arc;
@ -97,56 +99,7 @@ pub unsafe trait GuestMemory: Send + Sync {
/// Note that there are safety guarantees about this method that
/// implementations must uphold, and for more details see the
/// [`GuestMemory`] documentation.
fn base(&self) -> (*mut u8, u32);
/// Validates a guest-relative pointer given various attributes, and returns
/// the corresponding host pointer.
///
/// * `offset` - this is the guest-relative pointer, an offset from the
/// base.
/// * `align` - this is the desired alignment of the guest pointer, and if
/// successful the host pointer will be guaranteed to have this alignment.
/// * `len` - this is the number of bytes, after `offset`, that the returned
/// pointer must be valid for.
///
/// This function will guarantee that the returned pointer is in-bounds of
/// `base`, *at this time*, for `len` bytes and has alignment `align`. If
/// any guarantees are not upheld then an error will be returned.
///
/// Note that the returned pointer is an unsafe pointer. This is not safe to
/// use in general because guest memory can be relocated. Additionally the
/// guest may be modifying/reading memory as well. Consult the
/// [`GuestMemory`] documentation for safety information about using this
/// returned pointer.
fn validate_size_align(
&self,
offset: u32,
align: usize,
len: u32,
) -> Result<*mut u8, GuestError> {
let (base_ptr, base_len) = self.base();
let region = Region { start: offset, len };
// Figure out our pointer to the start of memory
let start = match (base_ptr as usize).checked_add(offset as usize) {
Some(ptr) => ptr,
None => return Err(GuestError::PtrOverflow),
};
// and use that to figure out the end pointer
let end = match start.checked_add(len as usize) {
Some(ptr) => ptr,
None => return Err(GuestError::PtrOverflow),
};
// and then verify that our end doesn't reach past the end of our memory
if end > (base_ptr as usize) + (base_len as usize) {
return Err(GuestError::PtrOutOfBounds(region));
}
// and finally verify that the alignment is correct
if start % align != 0 {
return Err(GuestError::PtrNotAligned(region, align as u32));
}
Ok(start as *mut u8)
}
fn base(&self) -> &[UnsafeCell<u8>];
/// Convenience method for creating a `GuestPtr` at a particular offset.
///
@ -200,6 +153,54 @@ pub unsafe trait GuestMemory: Send + Sync {
}
}
/// Validates a guest-relative pointer given various attributes, and returns
/// the corresponding host pointer.
///
/// * `mem` - this is the guest memory being accessed.
/// * `offset` - this is the guest-relative pointer, an offset from the
/// base.
/// * `len` - this is the number of length, in units of `T`, to return
/// in the resulting slice.
///
/// If the parameters are valid then this function will return a slice into
/// `mem` for units of `T`, assuming everything is in-bounds and properly
/// aligned. Additionally the byte-based `Region` is returned, used for borrows
/// later on.
fn validate_size_align<'a, T: GuestTypeTransparent<'a>>(
mem: &'a dyn GuestMemory,
offset: u32,
len: u32,
) -> Result<(&[UnsafeCell<T>], Region), GuestError> {
let base = mem.base();
let byte_len = len
.checked_mul(T::guest_size())
.ok_or(GuestError::PtrOverflow)?;
let region = Region {
start: offset,
len: byte_len,
};
let offset = usize::try_from(offset)?;
let byte_len = usize::try_from(byte_len)?;
// Slice the input region to the byte range that we're interested in.
let bytes = base
.get(offset..)
.and_then(|s| s.get(..byte_len))
.ok_or(GuestError::PtrOutOfBounds(region))?;
// ... and then align it to `T`, failing if either the head or tail slices
// are nonzero in length. This `unsafe` here is from the standard library
// and should be ok since the input slice is `UnsafeCell<u8>` and the output
// slice is `UnsafeCell<T>`, meaning the only guarantee of the output is
// that it's valid addressable memory, still unsafe to actually access.
assert!(mem::align_of::<T>() <= T::guest_align());
let (start, mid, end) = unsafe { bytes.align_to() };
if start.len() > 0 || end.len() > 0 {
return Err(GuestError::PtrNotAligned(region, T::guest_align() as u32));
}
Ok((mid, region))
}
/// A handle to a borrow on linear memory. It is produced by `{mut, shared}_borrow` and
/// consumed by `{mut, shared}_unborrow`. Only the `GuestMemory` impl should ever construct
/// a `BorrowHandle` or inspect its contents.
@ -208,7 +209,7 @@ pub struct BorrowHandle(pub usize);
// Forwarding trait implementations to the original type
unsafe impl<'a, T: ?Sized + GuestMemory> GuestMemory for &'a T {
fn base(&self) -> (*mut u8, u32) {
fn base(&self) -> &[UnsafeCell<u8>] {
T::base(self)
}
fn has_outstanding_borrows(&self) -> bool {
@ -235,7 +236,7 @@ unsafe impl<'a, T: ?Sized + GuestMemory> GuestMemory for &'a T {
}
unsafe impl<'a, T: ?Sized + GuestMemory> GuestMemory for &'a mut T {
fn base(&self) -> (*mut u8, u32) {
fn base(&self) -> &[UnsafeCell<u8>] {
T::base(self)
}
fn has_outstanding_borrows(&self) -> bool {
@ -262,7 +263,7 @@ unsafe impl<'a, T: ?Sized + GuestMemory> GuestMemory for &'a mut T {
}
unsafe impl<T: ?Sized + GuestMemory> GuestMemory for Box<T> {
fn base(&self) -> (*mut u8, u32) {
fn base(&self) -> &[UnsafeCell<u8>] {
T::base(self)
}
fn has_outstanding_borrows(&self) -> bool {
@ -289,7 +290,7 @@ unsafe impl<T: ?Sized + GuestMemory> GuestMemory for Box<T> {
}
unsafe impl<T: ?Sized + GuestMemory> GuestMemory for Arc<T> {
fn base(&self) -> (*mut u8, u32) {
fn base(&self) -> &[UnsafeCell<u8>] {
T::base(self)
}
fn has_outstanding_borrows(&self) -> bool {
@ -571,28 +572,10 @@ impl<'a, T> GuestPtr<'a, [T]> {
where
T: GuestTypeTransparent<'a>,
{
// Validate the bounds of the region in the original memory.
let len = self.checked_byte_len()?;
let ptr =
self.mem
.validate_size_align(self.pointer.0, T::guest_align(), len)? as *mut T;
let region = Region {
start: self.pointer.0,
len,
};
// Validate all elements in slice. `T::validate` is expected to be a
// noop for `GuestTypeTransparent` so this check may not be entirely
// necessary (TODO).
//
// SAFETY: `ptr` has been validated by `self.mem.validate_size_align`.
for offs in 0..self.pointer.1 {
T::validate(unsafe { ptr.add(offs as usize) })?;
}
let (ptr, region) = validate_size_align(self.mem, self.pointer.0, self.pointer.1)?;
Ok(UnsafeGuestSlice {
ptr,
len: self.pointer.1 as usize,
region,
mem: self.mem,
})
@ -607,8 +590,8 @@ impl<'a, T> GuestPtr<'a, [T]> {
T: GuestTypeTransparent<'a> + Copy + 'a,
{
let guest_slice = self.as_unsafe_slice_mut()?;
let mut vec = Vec::with_capacity(guest_slice.len);
for offs in 0..guest_slice.len {
let mut vec = Vec::with_capacity(guest_slice.ptr.len());
for offs in 0..guest_slice.ptr.len() {
let elem = self.get(offs as u32).expect("already validated the size");
vec.push(elem.read()?);
}
@ -635,18 +618,31 @@ impl<'a, T> GuestPtr<'a, [T]> {
// bounds checks ...
let guest_slice = self.as_unsafe_slice_mut()?;
// ... length check ...
if guest_slice.len != slice.len() {
if guest_slice.ptr.len() != slice.len() {
return Err(GuestError::SliceLengthsDiffer);
}
if slice.len() == 0 {
return Ok(());
}
// ... and copy the bytes.
match guest_slice.mut_borrow() {
UnsafeBorrowResult::Ok(mut dst) => dst.copy_from_slice(slice),
UnsafeBorrowResult::Shared(guest_slice) => {
// SAFETY: in the shared memory case, we copy and accept that
// SAFETY: in the shared memory case, we copy and accept that
// the guest data may be concurrently modified. TODO: audit that
// this use of `std::ptr::copy` is safe with shared memory
// (https://github.com/bytecodealliance/wasmtime/issues/4203)
unsafe { std::ptr::copy(slice.as_ptr(), guest_slice.ptr, guest_slice.len) };
//
// Also note that the validity of `guest_slice` has already been
// determined by the `as_unsafe_slice_mut` call above.
unsafe {
std::ptr::copy(
slice.as_ptr(),
guest_slice.ptr[0].get(),
guest_slice.ptr.len(),
)
};
}
UnsafeBorrowResult::Err(e) => return Err(e),
}
@ -693,17 +689,6 @@ impl<'a, T> GuestPtr<'a, [T]> {
None
}
}
/// Return the number of bytes necessary to represent the pointed-to value.
fn checked_byte_len(&self) -> Result<u32, GuestError>
where
T: GuestTypeTransparent<'a>,
{
match self.pointer.1.checked_mul(T::guest_size()) {
Some(l) => Ok(l),
None => Err(GuestError::PtrOverflow),
}
}
}
impl<'a> GuestPtr<'a, str> {
@ -791,15 +776,27 @@ impl<T: ?Sized + Pointee> fmt::Debug for GuestPtr<'_, T> {
///
/// Usable as a `&'a [T]` via [`std::ops::Deref`].
pub struct GuestSlice<'a, T> {
ptr: &'a [T],
ptr: &'a [UnsafeCell<T>],
mem: &'a dyn GuestMemory,
borrow: BorrowHandle,
}
// This is a wrapper around `&[T]` and must mirror send/sync impls due to the
// interior usage of `&[UnsafeCell<T>]`.
unsafe impl<T: Send> Send for GuestSlice<'_, T> {}
unsafe impl<T: Sync> Sync for GuestSlice<'_, T> {}
impl<'a, T> std::ops::Deref for GuestSlice<'a, T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.ptr
// SAFETY: The presence of `GuestSlice` indicates that this is an
// unshared memory meaning concurrent acceses will not happen.
// Furthermore the validity of the slice has already been established
// and a runtime borrow has been recorded to prevent conflicting views.
// This all adds up to the ability to return a safe slice from this
// method whose lifetime is connected to `self`.
unsafe { slice::from_raw_parts(self.ptr.as_ptr().cast(), self.ptr.len()) }
}
}
@ -814,21 +811,27 @@ impl<'a, T> Drop for GuestSlice<'a, T> {
/// Usable as a `&'a [T]` via [`std::ops::Deref`] and as a `&'a mut [T]` via
/// [`std::ops::DerefMut`].
pub struct GuestSliceMut<'a, T> {
ptr: &'a mut [T],
ptr: &'a [UnsafeCell<T>],
mem: &'a dyn GuestMemory,
borrow: BorrowHandle,
}
// See docs in these impls for `GuestSlice` above.
unsafe impl<T: Send> Send for GuestSliceMut<'_, T> {}
unsafe impl<T: Sync> Sync for GuestSliceMut<'_, T> {}
impl<'a, T> std::ops::Deref for GuestSliceMut<'a, T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.ptr
// SAFETY: See docs in `Deref for GuestSlice`
unsafe { slice::from_raw_parts(self.ptr.as_ptr().cast(), self.ptr.len()) }
}
}
impl<'a, T> std::ops::DerefMut for GuestSliceMut<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.ptr
// SAFETY: See docs in `Deref for GuestSlice`
unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr() as *mut T, self.ptr.len()) }
}
}
@ -858,9 +861,7 @@ impl<'a, T> Drop for GuestSliceMut<'a, T> {
/// [`UnsafeGuestSlice::shared_borrow`], [`UnsafeGuestSlice::mut_borrow`]).
pub struct UnsafeGuestSlice<'a, T> {
/// A raw pointer to the bytes in memory.
ptr: *mut T,
/// The (validated) number of items in the slice.
len: usize,
ptr: &'a [UnsafeCell<T>],
/// The (validated) address bounds of the slice in memory.
region: Region,
/// The original memory.
@ -880,14 +881,11 @@ impl<'a, T> UnsafeGuestSlice<'a, T> {
UnsafeBorrowResult::Shared(self)
} else {
match self.mem.shared_borrow(self.region) {
Ok(borrow) => {
let ptr = unsafe { slice::from_raw_parts(self.ptr, self.len) };
UnsafeBorrowResult::Ok(GuestSlice {
ptr,
mem: self.mem,
borrow,
})
}
Ok(borrow) => UnsafeBorrowResult::Ok(GuestSlice {
ptr: self.ptr,
mem: self.mem,
borrow,
}),
Err(e) => UnsafeBorrowResult::Err(e),
}
}
@ -906,14 +904,11 @@ impl<'a, T> UnsafeGuestSlice<'a, T> {
UnsafeBorrowResult::Shared(self)
} else {
match self.mem.mut_borrow(self.region) {
Ok(borrow) => {
let ptr = unsafe { slice::from_raw_parts_mut(self.ptr, self.len) };
UnsafeBorrowResult::Ok(GuestSliceMut {
ptr,
mem: self.mem,
borrow,
})
}
Ok(borrow) => UnsafeBorrowResult::Ok(GuestSliceMut {
ptr: self.ptr,
mem: self.mem,
borrow,
}),
Err(e) => UnsafeBorrowResult::Err(e),
}
}

34
crates/wiggle/src/wasmtime.rs

@ -1,18 +1,32 @@
use crate::borrow::BorrowChecker;
use crate::{BorrowHandle, GuestError, GuestMemory, Region};
use std::cell::UnsafeCell;
/// Lightweight `wasmtime::Memory` wrapper so we can implement the
/// `wiggle::GuestMemory` trait on it.
pub struct WasmtimeGuestMemory<'a> {
mem: &'a mut [u8],
mem: &'a [UnsafeCell<u8>],
bc: BorrowChecker,
shared: bool,
}
// These need to be reapplied due to the usage of `UnsafeCell` internally.
unsafe impl Send for WasmtimeGuestMemory<'_> {}
unsafe impl Sync for WasmtimeGuestMemory<'_> {}
impl<'a> WasmtimeGuestMemory<'a> {
pub fn new(mem: &'a mut [u8], shared: bool) -> Self {
pub fn new(mem: &'a mut [u8]) -> Self {
Self {
mem,
// SAFETY: here the `&mut [u8]` is casted to `&[UnsafeCell<u8>]`
// which is losing in effect the `&mut` access but retaining the
// borrow. This is done to reflect how the memory is not safe to
// access while multiple borrows are handed out internally, checked
// with `bc` below.
//
// Additionally this allows unshared memories to have the same
// internal representation as shared memories.
mem: unsafe { std::slice::from_raw_parts(mem.as_ptr().cast(), mem.len()) },
// Wiggle does not expose any methods for functions to re-enter
// the WebAssembly instance, or expose the memory via non-wiggle
// mechanisms. However, the user-defined code may end up
@ -23,14 +37,22 @@ impl<'a> WasmtimeGuestMemory<'a> {
// integrated fully with wasmtime:
// https://github.com/bytecodealliance/wasmtime/issues/1917
bc: BorrowChecker::new(),
shared,
shared: false,
}
}
pub fn shared(mem: &'a [UnsafeCell<u8>]) -> Self {
Self {
mem,
bc: BorrowChecker::new(),
shared: true,
}
}
}
unsafe impl GuestMemory for WasmtimeGuestMemory<'_> {
fn base(&self) -> (*mut u8, u32) {
(self.mem.as_ptr() as *mut u8, self.mem.len() as u32)
fn base(&self) -> &[UnsafeCell<u8>] {
self.mem
}
fn has_outstanding_borrows(&self) -> bool {
self.bc.has_outstanding_borrows()

13
crates/wiggle/test-helpers/src/lib.rs

@ -117,11 +117,9 @@ impl HostMemory {
}
unsafe impl GuestMemory for HostMemory {
fn base(&self) -> (*mut u8, u32) {
unsafe {
let ptr = self.buffer.cell.get();
((*ptr).as_mut_ptr(), (*ptr).len() as u32)
}
fn base(&self) -> &[UnsafeCell<u8>] {
let ptr = self.buffer.cell.get();
unsafe { std::slice::from_raw_parts(ptr.cast(), (*ptr).len()) }
}
fn has_outstanding_borrows(&self) -> bool {
self.bc.has_outstanding_borrows()
@ -214,12 +212,13 @@ impl MemArea {
#[cfg(test)]
mod test {
use super::*;
#[test]
fn hostmemory_is_aligned() {
let h = HostMemory::new();
assert_eq!(h.base().0 as usize % 4096, 0);
assert_eq!(h.base().as_ptr() as usize % 4096, 0);
let h = Box::new(h);
assert_eq!(h.base().0 as usize % 4096, 0);
assert_eq!(h.base().as_ptr() as usize % 4096, 0);
}
#[test]

Loading…
Cancel
Save