Browse Source

Add a fuzzer for async wasm (#8440)

* Add a fuzzer for async wasm

This commit revives a very old branch of mine to add a fuzzer for
Wasmtime in async mode. This work was originally blocked on
llvm/llvm-project#53891 and while that's still an issue it now contains
a workaround for that issue. Support for async fuzzing required a good
deal of refactorings and changes, and the highlights are:

* The main part is that new intrinsics,
  `__sanitizer_{start,finish}_fiber_switch` are now invoked around the
  stack-switching routines of fibers. This only works on Unix and is set
  to only compile when ASAN is enabled (otherwise everything is a noop).
  This required refactoring of things to get it all in just the right
  way for ASAN since it appears that these functions not only need to be
  called but more-or-less need to be adjacent to each other in the code.
  My guess is that while we're switching ASAN is in a "weird state" and
  it's not ready to run arbitrary code.

* Stacks are a problem. The above issue in LLVM outlines how stacks
  cannot be deallocated at this time because if the deallocated virtual
  memory is later used for the heap then ASAN will have a false positive
  about stack overflow. To handle this stacks are specially handled in
  asan mode by using a special allocation path that never deallocates
  stacks. This logic additionally applies to the pooling allocator which
  uses a different stack allocation strategy with ASAN.

With all of the above a new fuzzer is added. This fuzzer generates an
arbitrary module, selects an arbitrary means of async (e.g.
epochs/fuel), and then tries to execute the exports of the module with
various values. In general the fuzzer is looking for crashes/panics as
opposed to correct answers as there's no oracle here. This is also
intended to stress the code used to switch on and off stacks.

* Fix non-async build

* Remove unused import

* Review comments

* Fix compile on MIRI

* Fix Windows build
pull/8450/head
Alex Crichton 7 months ago
committed by GitHub
parent
commit
b4ecea38bc
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 1
      Cargo.lock
  2. 12
      crates/fiber/build.rs
  3. 10
      crates/fiber/src/lib.rs
  4. 419
      crates/fiber/src/unix.rs
  5. 4
      crates/fiber/src/windows.rs
  6. 1
      crates/fuzzing/Cargo.toml
  7. 2
      crates/fuzzing/src/generators.rs
  8. 44
      crates/fuzzing/src/generators/async_config.rs
  9. 61
      crates/fuzzing/src/generators/config.rs
  10. 281
      crates/fuzzing/src/oracles.rs
  11. 8
      crates/runtime/build.rs
  12. 82
      crates/runtime/src/instance/allocator/pooling.rs
  13. 66
      crates/runtime/src/instance/allocator/pooling/generic_stack_pool.rs
  14. 2
      crates/runtime/src/instance/allocator/pooling/unix_stack_pool.rs
  15. 12
      crates/wasmtime/src/runtime/store.rs
  16. 6
      fuzz/Cargo.toml
  17. 39
      fuzz/fuzz_targets/call_async.rs

1
Cargo.lock

@ -3754,6 +3754,7 @@ dependencies = [
"component-fuzz-util",
"component-test-util",
"env_logger",
"futures",
"log",
"rand",
"rayon",

12
crates/fiber/build.rs

@ -5,6 +5,18 @@ fn main() {
let mut build = cc::Build::new();
let arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
let os = env::var("CARGO_CFG_TARGET_OS").unwrap();
// NB: Technically `cfg(sanitize = "address")` is not stable and requires a
// `#![feature]` but sort of abuse the fact that cfgs are "leaked" through
// into Cargo ungated via `--print cfg`. Translate that to `cfg(asan)` for
// us to write down in the code.
match env::var("CARGO_CFG_SANITIZE") {
Ok(s) if s == "address" => {
println!("cargo:rustc-cfg=asan");
}
_ => {}
}
if os == "windows" {
println!("cargo:rerun-if-changed=src/windows.c");
build.file("src/windows.c");

10
crates/fiber/src/lib.rs

@ -109,7 +109,7 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> {
/// `Fiber::suspend`.
pub fn new(
stack: FiberStack,
func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return + 'a,
func: impl FnOnce(Resume, &mut Suspend<Resume, Yield, Return>) -> Return + 'a,
) -> io::Result<Self> {
let inner = imp::Fiber::new(&stack.0, func)?;
@ -172,7 +172,7 @@ impl<Resume, Yield, Return> Suspend<Resume, Yield, Return> {
/// # Panics
///
/// Panics if the current thread is not executing a fiber from this library.
pub fn suspend(&self, value: Yield) -> Resume {
pub fn suspend(&mut self, value: Yield) -> Resume {
self.inner
.switch::<Resume, Yield, Return>(RunResult::Yield(value))
}
@ -180,13 +180,13 @@ impl<Resume, Yield, Return> Suspend<Resume, Yield, Return> {
fn execute(
inner: imp::Suspend,
initial: Resume,
func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return,
func: impl FnOnce(Resume, &mut Suspend<Resume, Yield, Return>) -> Return,
) {
let suspend = Suspend {
let mut suspend = Suspend {
inner,
_phantom: PhantomData,
};
let result = panic::catch_unwind(AssertUnwindSafe(|| (func)(initial, &suspend)));
let result = panic::catch_unwind(AssertUnwindSafe(|| (func)(initial, &mut suspend)));
suspend.inner.switch::<Resume, Yield, Return>(match result {
Ok(result) => RunResult::Returned(result),
Err(panic) => RunResult::Panicked(panic),

419
crates/fiber/src/unix.rs

@ -29,28 +29,100 @@
//! `suspend`, which has 0xB000 so it can find this, will read that and write
//! its own resumption information into this slot as well.
#![allow(unused_macros)]
use crate::{RunResult, RuntimeFiberStack};
use std::cell::Cell;
use std::io;
use std::ops::Range;
use std::ptr;
pub enum FiberStack {
Default {
// The top of the stack; for stacks allocated by the fiber implementation itself,
// the base address of the allocation will be `top.sub(len.unwrap())`
top: *mut u8,
// The length of the stack
len: usize,
mmap: bool,
},
Custom(Box<dyn RuntimeFiberStack>),
pub struct FiberStack {
base: *mut u8,
len: usize,
/// Stored here to ensure that when this `FiberStack` the backing storage,
/// if any, is additionally dropped.
_storage: FiberStackStorage,
}
enum FiberStackStorage {
Mmap(#[allow(dead_code)] MmapFiberStack),
Unmanaged,
Custom(#[allow(dead_code)] Box<dyn RuntimeFiberStack>),
}
impl FiberStack {
pub fn new(size: usize) -> io::Result<Self> {
// See comments in `mod asan` below for why asan has a different stack
// allocation strategy.
if cfg!(asan) {
return Self::from_custom(asan::new_fiber_stack(size)?);
}
let page_size = rustix::param::page_size();
let stack = MmapFiberStack::new(size)?;
// An `MmapFiberStack` allocates a guard page at the bottom of the
// region so the base and length of our stack are both offset by a
// single page.
Ok(FiberStack {
base: stack.mapping_base.wrapping_byte_add(page_size),
len: stack.mapping_len - page_size,
_storage: FiberStackStorage::Mmap(stack),
})
}
pub unsafe fn from_raw_parts(base: *mut u8, len: usize) -> io::Result<Self> {
// See comments in `mod asan` below for why asan has a different stack
// allocation strategy.
if cfg!(asan) {
return Self::from_custom(asan::new_fiber_stack(len)?);
}
Ok(FiberStack {
base,
len,
_storage: FiberStackStorage::Unmanaged,
})
}
pub fn from_custom(custom: Box<dyn RuntimeFiberStack>) -> io::Result<Self> {
let range = custom.range();
let page_size = rustix::param::page_size();
let start_ptr = range.start as *mut u8;
assert!(
start_ptr.align_offset(page_size) == 0,
"expected fiber stack base ({start_ptr:?}) to be page aligned ({page_size:#x})",
);
let end_ptr = range.end as *const u8;
assert!(
end_ptr.align_offset(page_size) == 0,
"expected fiber stack end ({end_ptr:?}) to be page aligned ({page_size:#x})",
);
Ok(FiberStack {
base: start_ptr,
len: range.len(),
_storage: FiberStackStorage::Custom(custom),
})
}
pub fn top(&self) -> Option<*mut u8> {
Some(self.base.wrapping_byte_add(self.len))
}
pub fn range(&self) -> Option<Range<usize>> {
let base = self.base as usize;
Some(base..base + self.len)
}
}
struct MmapFiberStack {
mapping_base: *mut u8,
mapping_len: usize,
}
unsafe impl Send for MmapFiberStack {}
unsafe impl Sync for MmapFiberStack {}
impl MmapFiberStack {
fn new(size: usize) -> io::Result<Self> {
// Round up our stack size request to the nearest multiple of the
// page size.
let page_size = rustix::param::page_size();
@ -71,100 +143,34 @@ impl FiberStack {
)?;
rustix::mm::mprotect(
mmap.byte_add(page_size).cast(),
mmap.byte_add(page_size),
size,
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
)?;
Ok(Self::Default {
top: mmap.byte_add(mmap_len).cast(),
len: mmap_len,
mmap: true,
Ok(MmapFiberStack {
mapping_base: mmap.cast(),
mapping_len: mmap_len,
})
}
}
pub unsafe fn from_raw_parts(base: *mut u8, len: usize) -> io::Result<Self> {
Ok(Self::Default {
top: base.add(len),
len,
mmap: false,
})
}
pub fn from_custom(custom: Box<dyn RuntimeFiberStack>) -> io::Result<Self> {
Ok(Self::Custom(custom))
}
pub fn top(&self) -> Option<*mut u8> {
Some(match self {
FiberStack::Default {
top,
len: _,
mmap: _,
} => *top,
FiberStack::Custom(r) => {
let top = r.top();
let page_size = rustix::param::page_size();
assert!(
top.align_offset(page_size) == 0,
"expected fiber stack top ({}) to be page aligned ({})",
top as usize,
page_size
);
top
}
})
}
pub fn range(&self) -> Option<Range<usize>> {
Some(match self {
FiberStack::Default { top, len, mmap: _ } => {
let base = unsafe { top.sub(*len) as usize };
base..base + len
}
FiberStack::Custom(s) => {
let range = s.range();
let page_size = rustix::param::page_size();
let start_ptr = range.start as *const u8;
assert!(
start_ptr.align_offset(page_size) == 0,
"expected fiber stack end ({}) to be page aligned ({})",
range.start,
page_size
);
let end_ptr = range.end as *const u8;
assert!(
end_ptr.align_offset(page_size) == 0,
"expected fiber stack start ({}) to be page aligned ({})",
range.end,
page_size
);
range
}
})
}
}
impl Drop for FiberStack {
impl Drop for MmapFiberStack {
fn drop(&mut self) {
unsafe {
if let FiberStack::Default {
top,
len,
mmap: true,
} = self
{
let ret = rustix::mm::munmap(top.sub(*len) as _, *len);
debug_assert!(ret.is_ok());
}
let ret = rustix::mm::munmap(self.mapping_base.cast(), self.mapping_len);
debug_assert!(ret.is_ok());
}
}
}
pub struct Fiber;
pub struct Suspend(*mut u8);
pub struct Suspend {
top_of_stack: *mut u8,
previous: asan::PreviousStack,
}
extern "C" {
#[wasmtime_versioned_export_macros::versioned_link]
@ -182,10 +188,17 @@ extern "C" {
extern "C" fn fiber_start<F, A, B, C>(arg0: *mut u8, top_of_stack: *mut u8)
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
{
unsafe {
let inner = Suspend(top_of_stack);
// Complete the `start_switch` AddressSanitizer handshake which would
// have been started in `Fiber::resume`.
let previous = asan::fiber_start_complete();
let inner = Suspend {
top_of_stack,
previous,
};
let initial = inner.take_resume::<A, B, C>();
super::Suspend::<A, B, C>::execute(inner, initial, Box::from_raw(arg0.cast::<F>()))
}
@ -194,7 +207,7 @@ where
impl Fiber {
pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
{
unsafe {
let data = Box::into_raw(Box::new(func)).cast();
@ -213,7 +226,11 @@ impl Fiber {
let addr = stack.top().unwrap().cast::<usize>().offset(-1);
addr.write(result as *const _ as usize);
wasmtime_fiber_switch(stack.top().unwrap());
asan::fiber_switch(
stack.top().unwrap(),
false,
&mut asan::PreviousStack::new(stack),
);
// null this out to help catch use-after-free
addr.write(0);
@ -222,11 +239,17 @@ impl Fiber {
}
impl Suspend {
pub(crate) fn switch<A, B, C>(&self, result: RunResult<A, B, C>) -> A {
pub(crate) fn switch<A, B, C>(&mut self, result: RunResult<A, B, C>) -> A {
unsafe {
let is_finishing = match &result {
RunResult::Returned(_) | RunResult::Panicked(_) => true,
RunResult::Executing | RunResult::Resuming(_) | RunResult::Yield(_) => false,
};
// Calculate 0xAff8 and then write to it
(*self.result_location::<A, B, C>()).set(result);
wasmtime_fiber_switch(self.0);
asan::fiber_switch(self.top_of_stack, is_finishing, &mut self.previous);
self.take_resume::<A, B, C>()
}
}
@ -239,7 +262,7 @@ impl Suspend {
}
unsafe fn result_location<A, B, C>(&self) -> *const Cell<RunResult<A, B, C>> {
let ret = self.0.cast::<*const u8>().offset(-1).read();
let ret = self.top_of_stack.cast::<*const u8>().offset(-1).read();
assert!(!ret.is_null());
ret.cast()
}
@ -259,7 +282,209 @@ cfg_if::cfg_if! {
// assembler file built with the `build.rs`.
} else if #[cfg(target_arch = "riscv64")] {
mod riscv64;
}else {
} else {
compile_error!("fibers are not supported on this CPU architecture");
}
}
/// Support for AddressSanitizer to support stack manipulations we do in this
/// fiber implementation.
///
/// This module uses, when fuzzing is enabled, special intrinsics provided by
/// the sanitizer runtime called `__sanitizer_{start,finish}_switch_fiber`.
/// These aren't really super heavily documented and the current implementation
/// is inspired by googling the functions and looking at Boost & Julia's usage
/// of them as well as the documentation for these functions in their own
/// header file in the LLVM source tree. The general idea is that they're
/// called around every stack switch with some other fiddly bits as well.
#[cfg(asan)]
mod asan {
use super::{FiberStack, MmapFiberStack, RuntimeFiberStack};
use rustix::param::page_size;
use std::mem::ManuallyDrop;
use std::ops::Range;
use std::sync::Mutex;
/// State for the "previous stack" maintained by asan itself and fed in for
/// custom stacks.
pub struct PreviousStack {
bottom: *const u8,
size: usize,
}
impl PreviousStack {
pub fn new(stack: &FiberStack) -> PreviousStack {
let range = stack.range().unwrap();
PreviousStack {
bottom: range.start as *const u8,
// Discount the two pointers we store at the top of the stack,
// so subtract two pointers.
size: range.len() - 2 * std::mem::size_of::<*const u8>(),
}
}
}
impl Default for PreviousStack {
fn default() -> PreviousStack {
PreviousStack {
bottom: std::ptr::null(),
size: 0,
}
}
}
/// Switches the current stack to `top_of_stack`
///
/// * `top_of_stack` - for going to fibers this is calculated and for
/// restoring back to the original stack this was saved during the initial
/// transition.
/// * `is_finishing` - whether or not we're switching off a fiber for the
/// final time; customizes how asan intrinsics are invoked.
/// * `prev` - the stack we're switching to initially and saves the
/// stack to return to upon resumption.
pub unsafe fn fiber_switch(
top_of_stack: *mut u8,
is_finishing: bool,
prev: &mut PreviousStack,
) {
let mut private_asan_pointer = std::ptr::null_mut();
// If this fiber is finishing then NULL is passed to asan to let it know
// that it can deallocate the "fake stack" that it's tracking for this
// fiber.
let private_asan_pointer_ref = if is_finishing {
None
} else {
Some(&mut private_asan_pointer)
};
// NB: in fiddling with asan an optimizations and such it appears that
// these functions need to be "very close to each other". If other Rust
// functions are invoked or added as an abstraction here that appears to
// trigger false positives in ASAN. That leads to the design of this
// module as-is where this function exists to have these three
// functions very close to one another.
__sanitizer_start_switch_fiber(private_asan_pointer_ref, prev.bottom, prev.size);
super::wasmtime_fiber_switch(top_of_stack);
__sanitizer_finish_switch_fiber(private_asan_pointer, &mut prev.bottom, &mut prev.size);
}
/// Hook for when a fiber first starts, used to configure ASAN.
pub unsafe fn fiber_start_complete() -> PreviousStack {
let mut ret = PreviousStack::default();
__sanitizer_finish_switch_fiber(std::ptr::null_mut(), &mut ret.bottom, &mut ret.size);
ret
}
// These intrinsics are provided by the address sanitizer runtime. Their C
// signatures were translated into Rust-isms here with `Option` and `&mut`.
extern "C" {
fn __sanitizer_start_switch_fiber(
private_asan_pointer_save: Option<&mut *mut u8>,
bottom: *const u8,
size: usize,
);
fn __sanitizer_finish_switch_fiber(
private_asan_pointer: *mut u8,
bottom_old: &mut *const u8,
size_old: &mut usize,
);
}
/// This static is a workaround for llvm/llvm-project#53891, notably this is
/// a global cache of all fiber stacks.
///
/// The problem with ASAN is that if we allocate memory for a stack, use it
/// as a stack, deallocate the stack, and then when that memory is later
/// mapped as normal heap memory. This is possible due to `mmap` reusing
/// addresses and it ends up confusing ASAN. In this situation ASAN will
/// have false positives about stack overflows saying that writes to
/// freshly-allocated memory, which just happened to historically be a
/// stack, are a stack overflow.
///
/// This static works around the issue by ensuring that, only when asan is
/// enabled, all stacks are cached globally. Stacks are never deallocated
/// and forever retained here. This only works if the number of stacks
/// retained here is relatively small to prevent OOM from continuously
/// running programs. That's hopefully the case as ASAN is mostly used in
/// OSS-Fuzz and our fuzzers only fuzz one thing at a time per thread
/// meaning that this should only ever be a relatively small set of stacks.
static FIBER_STACKS: Mutex<Vec<MmapFiberStack>> = Mutex::new(Vec::new());
pub fn new_fiber_stack(size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
let needed_size = size + page_size();
let mut stacks = FIBER_STACKS.lock().unwrap();
let stack = match stacks.iter().position(|i| needed_size <= i.mapping_len) {
// If an appropriately sized stack was already allocated, then use
// that one.
Some(i) => stacks.remove(i),
// ... otherwise allocate a brand new stack.
None => MmapFiberStack::new(size)?,
};
let stack = AsanFiberStack(ManuallyDrop::new(stack));
Ok(Box::new(stack))
}
/// Custom structure used to prevent the interior mmap-allocated stack from
/// actually getting unmapped.
///
/// On drop this stack will return the interior stack to the global
/// `FIBER_STACKS` list.
struct AsanFiberStack(ManuallyDrop<MmapFiberStack>);
unsafe impl RuntimeFiberStack for AsanFiberStack {
fn top(&self) -> *mut u8 {
self.0.mapping_base.wrapping_byte_add(self.0.mapping_len)
}
fn range(&self) -> Range<usize> {
let base = self.0.mapping_base as usize;
let end = base + self.0.mapping_len;
base + page_size()..end
}
}
impl Drop for AsanFiberStack {
fn drop(&mut self) {
let stack = unsafe { ManuallyDrop::take(&mut self.0) };
FIBER_STACKS.lock().unwrap().push(stack);
}
}
}
// Shim module that's the same as above but only has stubs.
#[cfg(not(asan))]
mod asan_disabled {
use super::{FiberStack, RuntimeFiberStack};
#[derive(Default)]
pub struct PreviousStack;
impl PreviousStack {
#[inline]
pub fn new(_stack: &FiberStack) -> PreviousStack {
PreviousStack
}
}
pub unsafe fn fiber_switch(
top_of_stack: *mut u8,
_is_finishing: bool,
_prev: &mut PreviousStack,
) {
super::wasmtime_fiber_switch(top_of_stack);
}
#[inline]
pub unsafe fn fiber_start_complete() -> PreviousStack {
PreviousStack
}
pub fn new_fiber_stack(_size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
unimplemented!()
}
}
#[cfg(not(asan))]
use asan_disabled as asan;

4
crates/fiber/src/windows.rs

@ -56,7 +56,7 @@ extern "C" {
unsafe extern "system" fn fiber_start<F, A, B, C>(data: *mut c_void)
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
{
// Set the stack guarantee to be consistent with what Rust expects for threads
// This value is taken from:
@ -76,7 +76,7 @@ where
impl Fiber {
pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
{
unsafe {
let state = Box::new(StartState {

1
crates/fuzzing/Cargo.toml

@ -29,6 +29,7 @@ wasm-smith = { workspace = true }
wasm-mutate = { workspace = true }
wasm-spec-interpreter = { path = "./wasm-spec-interpreter", optional = true }
wasmi = "0.31.1"
futures = { workspace = true }
# We rely on precompiled v8 binaries, but rusty-v8 doesn't have a precompiled
# binary for MinGW which is built on our CI. It does have one for Windows-msvc,

2
crates/fuzzing/src/generators.rs

@ -9,6 +9,7 @@
//! `Arbitrary` trait for the wrapped external tool.
pub mod api;
mod async_config;
mod codegen_settings;
pub mod component_types;
mod config;
@ -22,6 +23,7 @@ pub mod table_ops;
mod value;
mod wast_test;
pub use async_config::AsyncConfig;
pub use codegen_settings::CodegenSettings;
pub use config::CompilerStrategy;
pub use config::{Config, WasmtimeConfig};

44
crates/fuzzing/src/generators/async_config.rs

@ -0,0 +1,44 @@
use arbitrary::{Arbitrary, Unstructured};
use std::time::Duration;
/// Configuration for async support within a store.
///
/// Note that the `Arbitrary` implementation for this type always returns
/// `Disabled` because this is something that is statically chosen if the fuzzer
/// has support for async.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum AsyncConfig {
/// No async support enabled.
Disabled,
/// Async support is enabled and cooperative yielding is done with fuel.
YieldWithFuel(u64),
/// Async support is enabled and cooperative yielding is done with epochs.
YieldWithEpochs {
/// Duration between epoch ticks.
dur: Duration,
/// Number of ticks between yields.
ticks: u64,
},
}
impl AsyncConfig {
/// Applies this async configuration to the `wasmtime::Config` provided to
/// ensure it's ready to execute with the resulting modules.
pub fn configure(&self, config: &mut wasmtime::Config) {
match self {
AsyncConfig::Disabled => {}
AsyncConfig::YieldWithFuel(_) => {
config.async_support(true).consume_fuel(true);
}
AsyncConfig::YieldWithEpochs { .. } => {
config.async_support(true).epoch_interruption(true);
}
}
}
}
impl<'a> Arbitrary<'a> for AsyncConfig {
fn arbitrary(_: &mut Unstructured<'a>) -> arbitrary::Result<AsyncConfig> {
Ok(AsyncConfig::Disabled)
}
}

61
crates/fuzzing/src/generators/config.rs

@ -1,8 +1,8 @@
//! Generate a configuration for both Wasmtime and the Wasm module to execute.
use super::{
CodegenSettings, InstanceAllocationStrategy, MemoryConfig, ModuleConfig, NormalMemoryConfig,
UnalignedMemoryCreator,
AsyncConfig, CodegenSettings, InstanceAllocationStrategy, MemoryConfig, ModuleConfig,
NormalMemoryConfig, UnalignedMemoryCreator,
};
use crate::oracles::{StoreLimits, Timeout};
use anyhow::Result;
@ -227,6 +227,8 @@ impl Config {
cfg.cranelift_pcc(pcc);
}
self.wasmtime.async_config.configure(&mut cfg);
// Vary the memory configuration, but only if threads are not enabled.
// When the threads proposal is enabled we might generate shared memory,
// which is less amenable to different memory configurations:
@ -285,20 +287,26 @@ impl Config {
/// Configures a store based on this configuration.
pub fn configure_store(&self, store: &mut Store<StoreLimits>) {
store.limiter(|s| s as &mut dyn wasmtime::ResourceLimiter);
if self.wasmtime.consume_fuel {
store.set_fuel(u64::MAX).unwrap();
}
if self.wasmtime.epoch_interruption {
// Without fuzzing of async execution, we can't test the
// "update deadline and continue" behavior, but we can at
// least test the codegen paths and checks with the
// trapping behavior, which works synchronously too. We'll
// set the deadline one epoch tick in the future; then
// this works exactly like an interrupt flag. We expect no
// traps/interrupts unless we bump the epoch, which we do
// as one particular Timeout mode (`Timeout::Epoch`).
store.epoch_deadline_trap();
store.set_epoch_deadline(1);
match self.wasmtime.async_config {
AsyncConfig::Disabled => {
if self.wasmtime.consume_fuel {
store.set_fuel(u64::MAX).unwrap();
}
if self.wasmtime.epoch_interruption {
store.epoch_deadline_trap();
store.set_epoch_deadline(1);
}
}
AsyncConfig::YieldWithFuel(amt) => {
assert!(self.wasmtime.consume_fuel);
store.fuel_async_yield_interval(Some(amt)).unwrap();
store.set_fuel(amt).unwrap();
}
AsyncConfig::YieldWithEpochs { ticks, .. } => {
assert!(self.wasmtime.epoch_interruption);
store.set_epoch_deadline(ticks);
store.epoch_deadline_async_yield_and_update(ticks);
}
}
}
@ -355,6 +363,23 @@ impl Config {
self.module_config.config.exceptions_enabled = false;
self.module_config.config.reference_types_enabled = false;
}
/// Updates this configuration to forcibly enable async support. Only useful
/// in fuzzers which do async calls.
pub fn enable_async(&mut self, u: &mut Unstructured<'_>) -> arbitrary::Result<()> {
if self.wasmtime.consume_fuel || u.arbitrary()? {
self.wasmtime.async_config =
AsyncConfig::YieldWithFuel(u.int_in_range(1000..=100_000)?);
self.wasmtime.consume_fuel = true;
} else {
self.wasmtime.async_config = AsyncConfig::YieldWithEpochs {
dur: Duration::from_millis(u.int_in_range(1..=10)?),
ticks: u.int_in_range(1..=10)?,
};
self.wasmtime.epoch_interruption = true;
}
Ok(())
}
}
impl<'a> Arbitrary<'a> for Config {
@ -454,6 +479,10 @@ pub struct WasmtimeConfig {
/// Whether or not fuzzing should enable PCC.
pcc: bool,
/// Configuration for whether wasm is invoked in an async fashion and how
/// it's cooperatively time-sliced.
pub async_config: AsyncConfig,
}
impl WasmtimeConfig {

281
crates/fuzzing/src/oracles.rs

@ -24,8 +24,11 @@ use crate::generators::{self, DiffValue, DiffValueType};
use crate::single_module_fuzzer::KnownValid;
use arbitrary::Arbitrary;
pub use stacks::check_stacks;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst};
use std::sync::{Arc, Condvar, Mutex};
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use wasmtime::*;
use wasmtime_wast::WastContext;
@ -149,7 +152,7 @@ pub fn instantiate(
None => return,
};
let mut timeout_state = SignalOnDrop::default();
let mut timeout_state = HelperThread::default();
match timeout {
Timeout::Fuel(fuel) => store.set_fuel(fuel).unwrap(),
@ -164,7 +167,7 @@ pub fn instantiate(
// infrastructure.
Timeout::Epoch(timeout) => {
let engine = store.engine().clone();
timeout_state.spawn_timeout(timeout, move || engine.increment_epoch());
timeout_state.run_periodically(timeout, move || engine.increment_epoch());
}
Timeout::None => {}
}
@ -328,7 +331,13 @@ pub fn instantiate_with_dummy(store: &mut Store<StoreLimits>, module: &Module) -
// the two steps together to match on the error below.
let instance =
dummy::dummy_linker(store, module).and_then(|l| l.instantiate(&mut *store, module));
unwrap_instance(store, instance)
}
fn unwrap_instance(
store: &Store<StoreLimits>,
instance: anyhow::Result<Instance>,
) -> Option<Instance> {
let e = match instance {
Ok(i) => return Some(i),
Err(e) => e,
@ -811,56 +820,54 @@ fn table_ops_eventually_gcs() {
}
#[derive(Default)]
struct SignalOnDrop {
state: Arc<(Mutex<bool>, Condvar)>,
struct HelperThread {
state: Arc<HelperThreadState>,
thread: Option<std::thread::JoinHandle<()>>,
}
impl SignalOnDrop {
fn spawn_timeout(&mut self, dur: Duration, closure: impl FnOnce() + Send + 'static) {
#[derive(Default)]
struct HelperThreadState {
should_exit: Mutex<bool>,
should_exit_cvar: Condvar,
}
impl HelperThread {
fn run_periodically(&mut self, dur: Duration, mut closure: impl FnMut() + Send + 'static) {
let state = self.state.clone();
let start = Instant::now();
self.thread = Some(std::thread::spawn(move || {
// Using our mutex/condvar we wait here for the first of `dur` to
// pass or the `SignalOnDrop` instance to get dropped.
let (lock, cvar) = &*state;
let mut signaled = lock.lock().unwrap();
while !*signaled {
// Adjust our requested `dur` based on how much time has passed.
let dur = match dur.checked_sub(start.elapsed()) {
Some(dur) => dur,
None => break,
};
let (lock, result) = cvar.wait_timeout(signaled, dur).unwrap();
signaled = lock;
// pass or the `HelperThread` instance to get dropped.
let mut should_exit = state.should_exit.lock().unwrap();
while !*should_exit {
let (lock, result) = state
.should_exit_cvar
.wait_timeout(should_exit, dur)
.unwrap();
should_exit = lock;
// If we timed out for sure then there's no need to continue
// since we'll just abort on the next `checked_sub` anyway.
if result.timed_out() {
break;
closure();
}
}
drop(signaled);
closure();
}));
}
}
impl Drop for SignalOnDrop {
impl Drop for HelperThread {
fn drop(&mut self) {
if let Some(thread) = self.thread.take() {
let (lock, cvar) = &*self.state;
// Signal our thread that we've been dropped and wake it up if it's
// blocked.
let mut g = lock.lock().unwrap();
*g = true;
cvar.notify_one();
drop(g);
// ... and then wait for the thread to exit to ensure we clean up
// after ourselves.
thread.join().unwrap();
}
let thread = match self.thread.take() {
Some(thread) => thread,
None => return,
};
// Signal our thread that it should exit and wake it up in case it's
// sleeping.
*self.state.should_exit.lock().unwrap() = true;
self.state.should_exit_cvar.notify_one();
// ... and then wait for the thread to exit to ensure we clean up
// after ourselves.
thread.join().unwrap();
}
}
@ -951,3 +958,205 @@ pub fn dynamic_component_api_target(input: &mut arbitrary::Unstructured) -> arbi
Ok(())
}
/// Instantiates a wasm module and runs its exports with dummy values, all in
/// an async fashion.
///
/// Attempts to stress yields in host functions to ensure that exiting and
/// resuming a wasm function call works.
pub fn call_async(wasm: &[u8], config: &generators::Config, mut poll_amts: &[u32]) {
let mut store = config.to_store();
let module = match compile_module(store.engine(), wasm, KnownValid::Yes, config) {
Some(module) => module,
None => return,
};
// Configure a helper thread to periodically increment the epoch to
// forcibly enable yields-via-epochs if epochs are in use. Note that this
// is required because the wasm isn't otherwise guaranteed to necessarily
// call any imports which will also increment the epoch.
let mut helper_thread = HelperThread::default();
if let generators::AsyncConfig::YieldWithEpochs { dur, .. } = &config.wasmtime.async_config {
let engine = store.engine().clone();
helper_thread.run_periodically(*dur, move || engine.increment_epoch());
}
// Generate a `Linker` where all function imports are custom-built to yield
// periodically and additionally increment the epoch.
let mut imports = Vec::new();
for import in module.imports() {
let item = match import.ty() {
ExternType::Func(ty) => {
let poll_amt = take_poll_amt(&mut poll_amts);
Func::new_async(&mut store, ty.clone(), move |caller, _, results| {
let ty = ty.clone();
Box::new(async move {
caller.engine().increment_epoch();
log::info!("yielding {} times in import", poll_amt);
YieldN(poll_amt).await;
for (ret_ty, result) in ty.results().zip(results) {
*result = dummy::dummy_value(ret_ty)?;
}
Ok(())
})
})
.into()
}
other_ty => match dummy::dummy_extern(&mut store, other_ty) {
Ok(item) => item,
Err(e) => {
log::warn!("couldn't create import: {}", e);
return;
}
},
};
imports.push(item);
}
// Run the instantiation process, asynchronously, and if everything
// succeeds then pull out the instance.
// log::info!("starting instantiation");
let instance = run(Timeout {
future: Instance::new_async(&mut store, &module, &imports),
polls: take_poll_amt(&mut poll_amts),
end: Instant::now() + Duration::from_millis(2_000),
});
let instance = match instance {
Ok(instantiation_result) => match unwrap_instance(&store, instantiation_result) {
Some(instance) => instance,
None => {
log::info!("instantiation hit a nominal error");
return; // resource exhaustion or limits met
}
},
Err(_) => {
log::info!("instantiation failed to complete");
return; // Timed out or ran out of polls
}
};
// Run each export of the instance in the same manner as instantiation
// above. Dummy values are passed in for argument values here:
//
// TODO: this should probably be more clever about passing in arguments for
// example they might be used as pointers or something and always using 0
// isn't too interesting.
let funcs = instance
.exports(&mut store)
.filter_map(|e| {
let name = e.name().to_string();
let func = e.into_extern().into_func()?;
Some((name, func))
})
.collect::<Vec<_>>();
for (name, func) in funcs {
let ty = func.ty(&store);
let params = ty
.params()
.map(|ty| dummy::dummy_value(ty).unwrap())
.collect::<Vec<_>>();
let mut results = ty
.results()
.map(|ty| dummy::dummy_value(ty).unwrap())
.collect::<Vec<_>>();
log::info!("invoking export {:?}", name);
let future = func.call_async(&mut store, &params, &mut results);
match run(Timeout {
future,
polls: take_poll_amt(&mut poll_amts),
end: Instant::now() + Duration::from_millis(2_000),
}) {
// On success or too many polls, try the next export.
Ok(_) | Err(Exhausted::Polls) => {}
// If time ran out then stop the current test case as we might have
// already sucked up a lot of time for this fuzz test case so don't
// keep it going.
Err(Exhausted::Time) => return,
}
}
fn take_poll_amt(polls: &mut &[u32]) -> u32 {
match polls.split_first() {
Some((a, rest)) => {
*polls = rest;
*a
}
None => 0,
}
}
/// Helper future to yield N times before resolving.
struct YieldN(u32);
impl Future for YieldN {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
if self.0 == 0 {
Poll::Ready(())
} else {
self.0 -= 1;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
/// Helper future for applying a timeout to `future` up to either when `end`
/// is the current time or `polls` polls happen.
///
/// Note that this helps to time out infinite loops in wasm, for example.
struct Timeout<F> {
future: F,
/// If the future isn't ready by this time then the `Timeout<F>` future
/// will return `None`.
end: Instant,
/// If the future doesn't resolve itself in this many calls to `poll`
/// then the `Timeout<F>` future will return `None`.
polls: u32,
}
enum Exhausted {
Time,
Polls,
}
impl<F: Future> Future for Timeout<F> {
type Output = Result<F::Output, Exhausted>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let (end, polls, future) = unsafe {
let me = self.get_unchecked_mut();
(me.end, &mut me.polls, Pin::new_unchecked(&mut me.future))
};
match future.poll(cx) {
Poll::Ready(val) => Poll::Ready(Ok(val)),
Poll::Pending => {
if Instant::now() >= end {
log::warn!("future operation timed out");
return Poll::Ready(Err(Exhausted::Time));
}
if *polls == 0 {
log::warn!("future operation ran out of polls");
return Poll::Ready(Err(Exhausted::Polls));
}
*polls -= 1;
Poll::Pending
}
}
}
}
fn run<F: Future>(future: F) -> F::Output {
let mut f = Box::pin(future);
let mut cx = Context::from_waker(futures::task::noop_waker_ref());
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
}

8
crates/runtime/build.rs

@ -4,6 +4,14 @@ use wasmtime_versioned_export_macros::versioned_suffix;
fn main() {
println!("cargo:rerun-if-changed=build.rs");
// NB: duplicating a workaround in the wasmtime-fiber build script.
match env::var("CARGO_CFG_SANITIZE") {
Ok(s) if s == "address" => {
println!("cargo:rustc-cfg=asan");
}
_ => {}
}
// If this platform is neither unix nor windows then there's no default need
// for a C helper library since `helpers.c` is tailored for just these
// platforms currently.

82
crates/runtime/src/instance/allocator/pooling.rs

@ -25,8 +25,19 @@ mod table_pool;
#[cfg(feature = "gc")]
mod gc_heap_pool;
#[cfg(all(feature = "async"))]
mod generic_stack_pool;
#[cfg(all(feature = "async", unix, not(miri)))]
mod stack_pool;
mod unix_stack_pool;
#[cfg(all(feature = "async"))]
cfg_if::cfg_if! {
if #[cfg(all(unix, not(miri), not(asan)))] {
use unix_stack_pool as stack_pool;
} else {
use generic_stack_pool as stack_pool;
}
}
use super::{
InstanceAllocationRequest, InstanceAllocatorImpl, MemoryAllocationIndex, TableAllocationIndex,
@ -55,7 +66,7 @@ use crate::{GcHeap, GcRuntime};
#[cfg(feature = "gc")]
use gc_heap_pool::GcHeapPool;
#[cfg(all(feature = "async", unix, not(miri)))]
#[cfg(feature = "async")]
use stack_pool::StackPool;
#[cfg(feature = "component-model")]
@ -232,13 +243,8 @@ pub struct PoolingInstanceAllocator {
#[cfg(feature = "gc")]
gc_heaps: GcHeapPool,
#[cfg(all(feature = "async", unix, not(miri)))]
#[cfg(feature = "async")]
stacks: StackPool,
#[cfg(all(feature = "async", windows))]
stack_size: usize,
#[cfg(all(feature = "async", windows))]
live_stacks: AtomicU64,
}
impl Drop for PoolingInstanceAllocator {
@ -252,10 +258,8 @@ impl Drop for PoolingInstanceAllocator {
#[cfg(feature = "gc")]
debug_assert!(self.gc_heaps.is_empty());
#[cfg(all(feature = "async", unix, not(miri)))]
#[cfg(feature = "async")]
debug_assert!(self.stacks.is_empty());
#[cfg(all(feature = "async", windows))]
debug_assert_eq!(self.live_stacks.load(Ordering::Acquire), 0);
}
}
@ -270,12 +274,8 @@ impl PoolingInstanceAllocator {
tables: TablePool::new(config)?,
#[cfg(feature = "gc")]
gc_heaps: GcHeapPool::new(config)?,
#[cfg(all(feature = "async", unix, not(miri)))]
#[cfg(feature = "async")]
stacks: StackPool::new(config)?,
#[cfg(all(feature = "async", windows))]
stack_size: config.stack_size,
#[cfg(all(feature = "async", windows))]
live_stacks: AtomicU64::new(0),
})
}
@ -513,58 +513,12 @@ unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator {
#[cfg(feature = "async")]
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
cfg_if::cfg_if! {
if #[cfg(miri)] {
unimplemented!()
} else if #[cfg(unix)] {
self.stacks.allocate()
} else if #[cfg(windows)] {
if self.stack_size == 0 {
bail!("fiber stack allocation not supported")
}
// On windows, we don't use a stack pool as we use the native
// fiber implementation. We do still enforce the `total_stacks`
// limit, however.
let old_count = self.live_stacks.fetch_add(1, Ordering::AcqRel);
if old_count >= u64::from(self.limits.total_stacks) {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
bail!(
"maximum concurrent fiber limit of {} reached",
self.limits.total_stacks
);
}
match wasmtime_fiber::FiberStack::new(self.stack_size) {
Ok(stack) => Ok(stack),
Err(e) => {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
Err(anyhow::Error::from(e))
}
}
} else {
compile_error!("not implemented");
}
}
self.stacks.allocate()
}
#[cfg(feature = "async")]
unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack) {
cfg_if::cfg_if! {
if #[cfg(miri)] {
let _ = stack;
unimplemented!()
} else if #[cfg(unix)] {
self.stacks.deallocate(stack);
} else if #[cfg(windows)] {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
// A no-op as we don't own the fiber stack on Windows.
let _ = stack;
} else {
compile_error!("not implemented");
}
}
self.stacks.deallocate(stack);
}
fn purge_module(&self, module: CompiledModuleId) {

66
crates/runtime/src/instance/allocator/pooling/generic_stack_pool.rs

@ -0,0 +1,66 @@
#![cfg_attr(not(asan), allow(dead_code))]
use crate::PoolingInstanceAllocatorConfig;
use anyhow::{bail, Result};
use std::sync::atomic::{AtomicU64, Ordering};
/// A generic implementation of a stack pool.
///
/// This implementation technically doesn't actually pool anything at this time.
/// Originally this was the implementation for non-Unix (e.g. Windows and
/// MIRI), but nowadays this is also used for fuzzing. For more documentation
/// for why this is used on fuzzing see the `asan` module in the
/// `wasmtime-fiber` crate.
///
/// Currently the only purpose of `StackPool` is to limit the total number of
/// concurrent stacks while otherwise leveraging `wasmtime_fiber::FiberStack`
/// natively.
#[derive(Debug)]
pub struct StackPool {
stack_size: usize,
live_stacks: AtomicU64,
stack_limit: u64,
}
impl StackPool {
pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
Ok(StackPool {
stack_size: config.stack_size,
live_stacks: AtomicU64::new(0),
stack_limit: config.limits.total_stacks.into(),
})
}
pub fn is_empty(&self) -> bool {
self.live_stacks.load(Ordering::Acquire) == 0
}
pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
if self.stack_size == 0 {
bail!("fiber stack allocation not supported")
}
let old_count = self.live_stacks.fetch_add(1, Ordering::AcqRel);
if old_count >= self.stack_limit {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
bail!(
"maximum concurrent fiber limit of {} reached",
self.stack_limit
);
}
match wasmtime_fiber::FiberStack::new(self.stack_size) {
Ok(stack) => Ok(stack),
Err(e) => {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
Err(anyhow::Error::from(e))
}
}
}
pub unsafe fn deallocate(&self, stack: &wasmtime_fiber::FiberStack) {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
// A no-op as we don't own the fiber stack on Windows.
let _ = stack;
}
}

2
crates/runtime/src/instance/allocator/pooling/stack_pool.rs → crates/runtime/src/instance/allocator/pooling/unix_stack_pool.rs

@ -1,3 +1,5 @@
#![cfg_attr(asan, allow(dead_code))]
use super::{
index_allocator::{SimpleIndexAllocator, SlotId},
round_up_to_pow2,

12
crates/wasmtime/src/runtime/store.rs

@ -382,7 +382,7 @@ pub struct StoreOpaque {
#[cfg(feature = "async")]
struct AsyncState {
current_suspend: UnsafeCell<*const wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
current_suspend: UnsafeCell<*mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
current_poll_cx: UnsafeCell<*mut Context<'static>>,
}
@ -501,7 +501,7 @@ impl<T> Store<T> {
table_limit: crate::DEFAULT_TABLE_LIMIT,
#[cfg(feature = "async")]
async_state: AsyncState {
current_suspend: UnsafeCell::new(ptr::null()),
current_suspend: UnsafeCell::new(ptr::null_mut()),
current_poll_cx: UnsafeCell::new(ptr::null_mut()),
},
fuel_reserve: 0,
@ -2273,7 +2273,7 @@ impl<T> StoreContextMut<'_, T> {
#[cfg(feature = "async")]
pub struct AsyncCx {
current_suspend: *mut *const wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
current_suspend: *mut *mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
current_poll_cx: *mut *mut Context<'static>,
track_pkey_context_switch: bool,
}
@ -2319,7 +2319,7 @@ impl AsyncCx {
// if this `Reset` is removed.
let suspend = *self.current_suspend;
let _reset = Reset(self.current_suspend, suspend);
*self.current_suspend = ptr::null();
*self.current_suspend = ptr::null_mut();
assert!(!suspend.is_null());
loop {
@ -2419,7 +2419,9 @@ unsafe impl<T> wasmtime_runtime::Store for StoreInner<T> {
// self.async_cx() panicks when used with a non-async store, so
// wrap this in an option.
#[cfg(feature = "async")]
let async_cx = if self.async_support() {
let async_cx = if self.async_support()
&& matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
{
Some(self.async_cx().unwrap())
} else {
None

6
fuzz/Cargo.toml

@ -108,3 +108,9 @@ name = "cranelift-icache"
path = "fuzz_targets/cranelift-icache.rs"
test = false
doc = false
[[bin]]
name = "call_async"
path = "fuzz_targets/call_async.rs"
test = false
doc = false

39
fuzz/fuzz_targets/call_async.rs

@ -0,0 +1,39 @@
#![no_main]
use libfuzzer_sys::arbitrary::{Result, Unstructured};
use libfuzzer_sys::fuzz_target;
use wasmtime_fuzzing::{generators, oracles};
fuzz_target!(|data: &[u8]| {
// errors in `run` have to do with not enough input in `data`, which we
// ignore here since it doesn't affect how we'd like to fuzz.
let _ = run_one(data);
});
fn run_one(data: &[u8]) -> Result<()> {
let mut u = Unstructured::new(data);
let mut config: generators::Config = u.arbitrary()?;
// Try to ensure imports/exports/etc are generated by adding one to the
// minimums/maximums.
config.module_config.config.min_types = 1;
config.module_config.config.max_types += 1;
config.module_config.config.min_imports = 1;
config.module_config.config.max_imports += 1;
config.module_config.config.min_funcs = 1;
config.module_config.config.max_funcs += 1;
config.module_config.config.min_exports = 1;
config.module_config.config.max_exports += 1;
// Use the fuzz input to select an async strategy.
config.enable_async(&mut u)?;
let mut poll_amts = Vec::with_capacity(u.arbitrary_len::<u32>()?);
for _ in 0..poll_amts.capacity() {
poll_amts.push(u.int_in_range(0..=10_000)?);
}
let module = config.module_config.generate(&mut u, None)?;
oracles::call_async(&module.to_bytes(), &config, &poll_amts);
Ok(())
}
Loading…
Cancel
Save