Browse Source

Split up `src/runtime.rs` in `wasmtime` (#2404)

This file has grown quite a lot with `Store` over time so this splits it
up into three separate files, one for each of the main types defined in
it: `Config`, `Engine`, and `Store`.
pull/2407/head
Alex Crichton 4 years ago
committed by GitHub
parent
commit
01b7d88641
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 592
      crates/wasmtime/src/config.rs
  2. 148
      crates/wasmtime/src/engine.rs
  3. 2
      crates/wasmtime/src/func.rs
  4. 15
      crates/wasmtime/src/lib.rs
  5. 426
      crates/wasmtime/src/store.rs

592
crates/wasmtime/src/runtime.rs → crates/wasmtime/src/config.rs

@ -1,38 +1,25 @@
use crate::externals::MemoryCreator;
use crate::sig_registry::SignatureRegistry;
use crate::trampoline::{MemoryCreatorProxy, StoreInstanceHandle};
use crate::Module;
use crate::trampoline::MemoryCreatorProxy;
use anyhow::{bail, Result};
use std::cell::RefCell;
use std::cmp;
use std::convert::TryFrom;
use std::fmt;
use std::hash::{Hash, Hasher};
#[cfg(feature = "cache")]
use std::path::Path;
use std::rc::{Rc, Weak};
use std::sync::Arc;
use wasmparser::WasmFeatures;
#[cfg(feature = "cache")]
use wasmtime_cache::CacheConfig;
use wasmtime_environ::settings::{self, Configurable, SetError};
use wasmtime_environ::{isa, isa::TargetIsa, wasm, Tunables};
use wasmtime_environ::{isa, isa::TargetIsa, Tunables};
use wasmtime_jit::{native, CompilationStrategy, Compiler};
use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
use wasmtime_runtime::{
debug_builtins, InstanceHandle, RuntimeMemoryCreator, SignalHandler, StackMapRegistry,
VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex,
};
// Runtime Environment
// Configuration
/// Global configuration options used to create an [`Engine`] and customize its
/// behavior.
/// Global configuration options used to create an [`Engine`](crate::Engine)
/// and customize its behavior.
///
/// This structure exposed a builder-like interface and is primarily consumed by
/// [`Engine::new()`]
/// [`Engine::new()`](crate::Engine::new)
#[derive(Clone)]
pub struct Config {
pub(crate) flags: settings::Builder,
@ -103,10 +90,10 @@ impl Config {
}
/// Configures whether functions and loops will be interruptable via the
/// [`Store::interrupt_handle`] method.
/// [`Store::interrupt_handle`](crate::Store::interrupt_handle) method.
///
/// For more information see the documentation on
/// [`Store::interrupt_handle`].
/// [`Store::interrupt_handle`](crate::Store::interrupt_handle).
///
/// By default this option is `false`.
pub fn interruptable(&mut self, enable: bool) -> &mut Self {
@ -630,7 +617,7 @@ impl Config {
self.isa_flags.clone().finish(settings::Flags::new(flags))
}
fn build_compiler(&self) -> Compiler {
pub(crate) fn build_compiler(&self) -> Compiler {
let isa = self.target_isa();
Compiler::new(isa, self.strategy, self.tunables.clone(), self.features)
}
@ -725,566 +712,3 @@ pub enum ProfilingStrategy {
/// Collect profiling info using the "ittapi", used with `VTune` on Linux.
VTune,
}
// Engine
/// An `Engine` which is a global context for compilation and management of wasm
/// modules.
///
/// An engine can be safely shared across threads and is a cheap cloneable
/// handle to the actual engine. The engine itself will be deallocate once all
/// references to it have gone away.
///
/// Engines store global configuration preferences such as compilation settings,
/// enabled features, etc. You'll likely only need at most one of these for a
/// program.
///
/// ## Engines and `Clone`
///
/// Using `clone` on an `Engine` is a cheap operation. It will not create an
/// entirely new engine, but rather just a new reference to the existing engine.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Engines and `Default`
///
/// You can create an engine with default configuration settings using
/// `Engine::default()`. Be sure to consult the documentation of [`Config`] for
/// default settings.
#[derive(Clone)]
pub struct Engine {
inner: Arc<EngineInner>,
}
struct EngineInner {
config: Config,
compiler: Compiler,
}
impl Engine {
/// Creates a new [`Engine`] with the specified compilation and
/// configuration settings.
pub fn new(config: &Config) -> Engine {
debug_builtins::ensure_exported();
Engine {
inner: Arc::new(EngineInner {
config: config.clone(),
compiler: config.build_compiler(),
}),
}
}
/// Returns the configuration settings that this engine is using.
pub fn config(&self) -> &Config {
&self.inner.config
}
pub(crate) fn compiler(&self) -> &Compiler {
&self.inner.compiler
}
#[cfg(feature = "cache")]
pub(crate) fn cache_config(&self) -> &CacheConfig {
&self.config().cache_config
}
/// Returns whether the engine `a` and `b` refer to the same configuration.
pub fn same(a: &Engine, b: &Engine) -> bool {
Arc::ptr_eq(&a.inner, &b.inner)
}
}
impl Default for Engine {
fn default() -> Engine {
Engine::new(&Config::default())
}
}
// Store
/// A `Store` is a collection of WebAssembly instances and host-defined items.
///
/// All WebAssembly instances and items will be attached to and refer to a
/// `Store`. For example instances, functions, globals, and tables are all
/// attached to a `Store`. Instances are created by instantiating a [`Module`]
/// within a `Store`.
///
/// `Store` is not thread-safe and cannot be sent to other threads. All items
/// which refer to a `Store` additionally are not threadsafe and can only be
/// used on the original thread that they were created on.
///
/// A `Store` is not intended to be a long-lived object in a program. No form of
/// GC is implemented at this time so once an instance is created within a
/// `Store` it will not be deallocated until all references to the `Store` have
/// gone away (this includes all references to items in the store). This makes
/// `Store` unsuitable for creating an unbounded number of instances in it
/// because `Store` will never release this memory. It's instead recommended to
/// have a long-lived [`Engine`] and instead create a `Store` for a more scoped
/// portion of your application.
///
/// # Stores and `Clone`
///
/// Using `clone` on a `Store` is a cheap operation. It will not create an
/// entirely new store, but rather just a new reference to the existing object.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Stores and `Default`
///
/// You can create a store with default configuration settings using
/// `Store::default()`. This will create a brand new [`Engine`] with default
/// ocnfiguration (see [`Config`] for more information).
#[derive(Clone)]
pub struct Store {
inner: Rc<StoreInner>,
}
pub(crate) struct StoreInner {
engine: Engine,
interrupts: Arc<VMInterrupts>,
signatures: RefCell<SignatureRegistry>,
instances: RefCell<Vec<InstanceHandle>>,
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
jit_code_ranges: RefCell<Vec<(usize, usize)>>,
externref_activations_table: VMExternRefActivationsTable,
stack_map_registry: StackMapRegistry,
}
struct HostInfoKey(VMExternRef);
impl PartialEq for HostInfoKey {
fn eq(&self, rhs: &Self) -> bool {
VMExternRef::eq(&self.0, &rhs.0)
}
}
impl Eq for HostInfoKey {}
impl Hash for HostInfoKey {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
VMExternRef::hash(&self.0, hasher);
}
}
impl Store {
/// Creates a new store to be associated with the given [`Engine`].
pub fn new(engine: &Engine) -> Store {
// Ensure that wasmtime_runtime's signal handlers are configured. Note
// that at the `Store` level it means we should perform this
// once-per-thread. Platforms like Unix, however, only require this
// once-per-program. In any case this is safe to call many times and
// each one that's not relevant just won't do anything.
wasmtime_runtime::init_traps();
Store {
inner: Rc::new(StoreInner {
engine: engine.clone(),
interrupts: Arc::new(Default::default()),
signatures: RefCell::new(Default::default()),
instances: RefCell::new(Vec::new()),
signal_handler: RefCell::new(None),
jit_code_ranges: RefCell::new(Vec::new()),
externref_activations_table: VMExternRefActivationsTable::new(),
stack_map_registry: StackMapRegistry::default(),
}),
}
}
pub(crate) fn from_inner(inner: Rc<StoreInner>) -> Store {
Store { inner }
}
/// Returns the [`Engine`] that this store is associated with.
pub fn engine(&self) -> &Engine {
&self.inner.engine
}
/// Returns an optional reference to a ['RuntimeMemoryCreator']
pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
self.engine()
.config()
.memory_creator
.as_ref()
.map(|x| x as _)
}
pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> {
&self.inner.signatures
}
pub(crate) fn lookup_shared_signature<'a>(
&'a self,
module: &'a wasmtime_environ::Module,
) -> impl Fn(wasm::SignatureIndex) -> VMSharedSignatureIndex + 'a {
move |index| {
self.signatures()
.borrow()
.lookup(&module.signatures[index])
.expect("signature not previously registered")
}
}
/// Returns whether or not the given address falls within the JIT code
/// managed by the compiler
pub(crate) fn is_in_jit_code(&self, addr: usize) -> bool {
self.inner
.jit_code_ranges
.borrow()
.iter()
.any(|(start, end)| *start <= addr && addr < *end)
}
pub(crate) fn register_module(&self, module: &Module) {
// All modules register their JIT code in a store for two reasons
// currently:
//
// * First we only catch signals/traps if the program counter falls
// within the jit code of an instantiated wasm module. This ensures
// we don't catch accidental Rust/host segfaults.
//
// * Second when generating a backtrace we'll use this mapping to
// only generate wasm frames for instruction pointers that fall
// within jit code.
self.register_jit_code(module);
// We need to know about all the stack maps of all instantiated modules
// so when performing a GC we know about all wasm frames that we find
// on the stack.
self.register_stack_maps(module);
// Signatures are loaded into our `SignatureRegistry` here
// once-per-module (and once-per-signature). This allows us to create
// a `Func` wrapper for any function in the module, which requires that
// we know about the signature and trampoline for all instances.
self.register_signatures(module);
}
fn register_jit_code(&self, module: &Module) {
let mut ranges = module.compiled_module().jit_code_ranges();
// Checking of we already registered JIT code ranges by searching
// first range start.
match ranges.next() {
None => (),
Some(first) => {
if !self.is_in_jit_code(first.0) {
// The range is not registered -- add all ranges (including
// first one) to the jit_code_ranges.
let mut jit_code_ranges = self.inner.jit_code_ranges.borrow_mut();
jit_code_ranges.push(first);
jit_code_ranges.extend(ranges);
}
}
}
}
fn register_stack_maps(&self, module: &Module) {
let module = &module.compiled_module();
self.stack_map_registry()
.register_stack_maps(module.stack_maps().map(|(func, stack_maps)| unsafe {
let ptr = (*func).as_ptr();
let len = (*func).len();
let start = ptr as usize;
let end = ptr as usize + len;
let range = start..end;
(range, stack_maps)
}));
}
fn register_signatures(&self, module: &Module) {
let trampolines = module.compiled_module().trampolines();
let module = module.compiled_module().module();
let mut signatures = self.signatures().borrow_mut();
for (index, wasm) in module.signatures.iter() {
signatures.register(wasm, trampolines[index]);
}
}
pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
self.inner.instances.borrow_mut().push(handle.clone());
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn existing_instance_handle(&self, handle: InstanceHandle) -> StoreInstanceHandle {
debug_assert!(self
.inner
.instances
.borrow()
.iter()
.any(|i| i.vmctx_ptr() == handle.vmctx_ptr()));
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn weak(&self) -> Weak<StoreInner> {
Rc::downgrade(&self.inner)
}
pub(crate) fn upgrade(weak: &Weak<StoreInner>) -> Option<Self> {
let inner = weak.upgrade()?;
Some(Self { inner })
}
pub(crate) fn signal_handler(&self) -> std::cell::Ref<'_, Option<Box<SignalHandler<'static>>>> {
self.inner.signal_handler.borrow()
}
pub(crate) fn signal_handler_mut(
&self,
) -> std::cell::RefMut<'_, Option<Box<SignalHandler<'static>>>> {
self.inner.signal_handler.borrow_mut()
}
pub(crate) fn interrupts(&self) -> &VMInterrupts {
&self.inner.interrupts
}
/// Returns whether the stores `a` and `b` refer to the same underlying
/// `Store`.
///
/// Because the `Store` type is reference counted multiple clones may point
/// to the same underlying storage, and this method can be used to determine
/// whether two stores are indeed the same.
pub fn same(a: &Store, b: &Store) -> bool {
Rc::ptr_eq(&a.inner, &b.inner)
}
/// Creates an [`InterruptHandle`] which can be used to interrupt the
/// execution of instances within this `Store`.
///
/// An [`InterruptHandle`] handle is a mechanism of ensuring that guest code
/// doesn't execute for too long. For example it's used to prevent wasm
/// programs for executing infinitely in infinite loops or recursive call
/// chains.
///
/// The [`InterruptHandle`] type is sendable to other threads so you can
/// interact with it even while the thread with this `Store` is executing
/// wasm code.
///
/// There's one method on an interrupt handle:
/// [`InterruptHandle::interrupt`]. This method is used to generate an
/// interrupt and cause wasm code to exit "soon".
///
/// ## When are interrupts delivered?
///
/// The term "interrupt" here refers to one of two different behaviors that
/// are interrupted in wasm:
///
/// * The head of every loop in wasm has a check to see if it's interrupted.
/// * The prologue of every function has a check to see if it's interrupted.
///
/// This interrupt mechanism makes no attempt to signal interrupts to
/// native code. For example if a host function is blocked, then sending
/// an interrupt will not interrupt that operation.
///
/// Interrupts are consumed as soon as possible when wasm itself starts
/// executing. This means that if you interrupt wasm code then it basically
/// guarantees that the next time wasm is executing on the target thread it
/// will return quickly (either normally if it were already in the process
/// of returning or with a trap from the interrupt). Once an interrupt
/// trap is generated then an interrupt is consumed, and further execution
/// will not be interrupted (unless another interrupt is set).
///
/// When implementing interrupts you'll want to ensure that the delivery of
/// interrupts into wasm code is also handled in your host imports and
/// functionality. Host functions need to either execute for bounded amounts
/// of time or you'll need to arrange for them to be interrupted as well.
///
/// ## Return Value
///
/// This function returns a `Result` since interrupts are not always
/// enabled. Interrupts are enabled via the [`Config::interruptable`]
/// method, and if this store's [`Config`] hasn't been configured to enable
/// interrupts then an error is returned.
///
/// ## Examples
///
/// ```
/// # use anyhow::Result;
/// # use wasmtime::*;
/// # fn main() -> Result<()> {
/// // Enable interruptable code via `Config` and then create an interrupt
/// // handle which we'll use later to interrupt running code.
/// let engine = Engine::new(Config::new().interruptable(true));
/// let store = Store::new(&engine);
/// let interrupt_handle = store.interrupt_handle()?;
///
/// // Compile and instantiate a small example with an infinite loop.
/// let module = Module::new(&engine, r#"
/// (func (export "run") (loop br 0))
/// "#)?;
/// let instance = Instance::new(&store, &module, &[])?;
/// let run = instance
/// .get_func("run")
/// .ok_or(anyhow::format_err!("failed to find `run` function export"))?
/// .get0::<()>()?;
///
/// // Spin up a thread to send us an interrupt in a second
/// std::thread::spawn(move || {
/// std::thread::sleep(std::time::Duration::from_secs(1));
/// interrupt_handle.interrupt();
/// });
///
/// let trap = run().unwrap_err();
/// assert!(trap.to_string().contains("wasm trap: interrupt"));
/// # Ok(())
/// # }
/// ```
pub fn interrupt_handle(&self) -> Result<InterruptHandle> {
if self.engine().config().tunables.interruptable {
Ok(InterruptHandle {
interrupts: self.inner.interrupts.clone(),
})
} else {
bail!("interrupts aren't enabled for this `Store`")
}
}
pub(crate) fn externref_activations_table(&self) -> &VMExternRefActivationsTable {
&self.inner.externref_activations_table
}
pub(crate) fn stack_map_registry(&self) -> &StackMapRegistry {
&self.inner.stack_map_registry
}
/// Perform garbage collection of `ExternRef`s.
pub fn gc(&self) {
// For this crate's API, we ensure that `set_stack_canary` invariants
// are upheld for all host-->Wasm calls, and we register every module
// used with this store in `self.inner.stack_map_registry`.
unsafe {
wasmtime_runtime::gc(
&self.inner.stack_map_registry,
&self.inner.externref_activations_table,
);
}
}
}
impl Default for Store {
fn default() -> Store {
Store::new(&Engine::default())
}
}
impl fmt::Debug for Store {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = &*self.inner as *const StoreInner;
f.debug_struct("Store").field("inner", &inner).finish()
}
}
impl Drop for StoreInner {
fn drop(&mut self) {
for instance in self.instances.get_mut().iter() {
unsafe {
instance.dealloc();
}
}
}
}
/// A threadsafe handle used to interrupt instances executing within a
/// particular `Store`.
///
/// This structure is created by the [`Store::interrupt_handle`] method.
pub struct InterruptHandle {
interrupts: Arc<VMInterrupts>,
}
impl InterruptHandle {
/// Flags that execution within this handle's original [`Store`] should be
/// interrupted.
///
/// This will not immediately interrupt execution of wasm modules, but
/// rather it will interrupt wasm execution of loop headers and wasm
/// execution of function entries. For more information see
/// [`Store::interrupt_handle`].
pub fn interrupt(&self) {
self.interrupts.interrupt()
}
}
fn _assert_send_sync() {
fn _assert<T: Send + Sync>() {}
_assert::<Engine>();
_assert::<Config>();
_assert::<InterruptHandle>();
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Module;
use tempfile::TempDir;
#[test]
fn cache_accounts_for_opt_level() -> Result<()> {
let td = TempDir::new()?;
let config_path = td.path().join("config.toml");
std::fs::write(
&config_path,
&format!(
"
[cache]
enabled = true
directory = '{}'
",
td.path().join("cache").display()
),
)?;
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::None)
.cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::Speed)
.cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::SpeedAndSize)
.cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
// FIXME(#1523) need debuginfo on aarch64 before we run this test there
if !cfg!(target_arch = "aarch64") {
let mut cfg = Config::new();
cfg.debug_info(true).cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
}
Ok(())
}
}

148
crates/wasmtime/src/engine.rs

@ -0,0 +1,148 @@
use crate::Config;
use std::sync::Arc;
#[cfg(feature = "cache")]
use wasmtime_cache::CacheConfig;
use wasmtime_jit::Compiler;
use wasmtime_runtime::debug_builtins;
/// An `Engine` which is a global context for compilation and management of wasm
/// modules.
///
/// An engine can be safely shared across threads and is a cheap cloneable
/// handle to the actual engine. The engine itself will be deallocate once all
/// references to it have gone away.
///
/// Engines store global configuration preferences such as compilation settings,
/// enabled features, etc. You'll likely only need at most one of these for a
/// program.
///
/// ## Engines and `Clone`
///
/// Using `clone` on an `Engine` is a cheap operation. It will not create an
/// entirely new engine, but rather just a new reference to the existing engine.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Engines and `Default`
///
/// You can create an engine with default configuration settings using
/// `Engine::default()`. Be sure to consult the documentation of [`Config`] for
/// default settings.
#[derive(Clone)]
pub struct Engine {
inner: Arc<EngineInner>,
}
struct EngineInner {
config: Config,
compiler: Compiler,
}
impl Engine {
/// Creates a new [`Engine`] with the specified compilation and
/// configuration settings.
pub fn new(config: &Config) -> Engine {
debug_builtins::ensure_exported();
Engine {
inner: Arc::new(EngineInner {
config: config.clone(),
compiler: config.build_compiler(),
}),
}
}
/// Returns the configuration settings that this engine is using.
pub fn config(&self) -> &Config {
&self.inner.config
}
pub(crate) fn compiler(&self) -> &Compiler {
&self.inner.compiler
}
#[cfg(feature = "cache")]
pub(crate) fn cache_config(&self) -> &CacheConfig {
&self.config().cache_config
}
/// Returns whether the engine `a` and `b` refer to the same configuration.
pub fn same(a: &Engine, b: &Engine) -> bool {
Arc::ptr_eq(&a.inner, &b.inner)
}
}
impl Default for Engine {
fn default() -> Engine {
Engine::new(&Config::default())
}
}
#[cfg(test)]
mod tests {
use crate::{Config, Engine, Module, OptLevel};
use anyhow::Result;
use tempfile::TempDir;
#[test]
fn cache_accounts_for_opt_level() -> Result<()> {
let td = TempDir::new()?;
let config_path = td.path().join("config.toml");
std::fs::write(
&config_path,
&format!(
"
[cache]
enabled = true
directory = '{}'
",
td.path().join("cache").display()
),
)?;
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::None)
.cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::Speed)
.cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::SpeedAndSize)
.cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
// FIXME(#1523) need debuginfo on aarch64 before we run this test there
if !cfg!(target_arch = "aarch64") {
let mut cfg = Config::new();
cfg.debug_info(true).cache_config_load(&config_path)?;
let engine = Engine::new(&cfg);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 0);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
Module::new(&engine, "(module (func))")?;
assert_eq!(engine.config().cache_config.cache_hits(), 1);
assert_eq!(engine.config().cache_config.cache_misses(), 1);
}
Ok(())
}
}

2
crates/wasmtime/src/func.rs

@ -1,4 +1,4 @@
use crate::runtime::StoreInner;
use crate::store::StoreInner;
use crate::trampoline::StoreInstanceHandle;
use crate::{Extern, ExternRef, FuncType, Memory, Store, Trap, Val, ValType};
use anyhow::{bail, ensure, Context as _, Result};

15
crates/wasmtime/src/lib.rs

@ -234,6 +234,8 @@
#![doc(test(attr(deny(warnings))))]
#![doc(test(attr(allow(dead_code, unused_variables, unused_mut))))]
mod config;
mod engine;
mod externals;
mod frame_info;
mod func;
@ -241,13 +243,15 @@ mod instance;
mod linker;
mod module;
mod r#ref;
mod runtime;
mod sig_registry;
mod store;
mod trampoline;
mod trap;
mod types;
mod values;
pub use crate::config::*;
pub use crate::engine::*;
pub use crate::externals::*;
pub use crate::frame_info::FrameInfo;
pub use crate::func::*;
@ -255,7 +259,7 @@ pub use crate::instance::Instance;
pub use crate::linker::*;
pub use crate::module::Module;
pub use crate::r#ref::ExternRef;
pub use crate::runtime::*;
pub use crate::store::*;
pub use crate::trap::*;
pub use crate::types::*;
pub use crate::values::*;
@ -269,3 +273,10 @@ cfg_if::cfg_if! {
// ... unknown os!
}
}
fn _assert_send_sync() {
fn _assert<T: Send + Sync>() {}
_assert::<Engine>();
_assert::<Config>();
_assert::<InterruptHandle>();
}

426
crates/wasmtime/src/store.rs

@ -0,0 +1,426 @@
use crate::sig_registry::SignatureRegistry;
use crate::trampoline::StoreInstanceHandle;
use crate::Engine;
use crate::Module;
use anyhow::{bail, Result};
use std::cell::RefCell;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::rc::{Rc, Weak};
use std::sync::Arc;
use wasmtime_environ::wasm;
use wasmtime_runtime::{
InstanceHandle, RuntimeMemoryCreator, SignalHandler, StackMapRegistry, VMExternRef,
VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex,
};
/// A `Store` is a collection of WebAssembly instances and host-defined items.
///
/// All WebAssembly instances and items will be attached to and refer to a
/// `Store`. For example instances, functions, globals, and tables are all
/// attached to a `Store`. Instances are created by instantiating a [`Module`]
/// within a `Store`.
///
/// `Store` is not thread-safe and cannot be sent to other threads. All items
/// which refer to a `Store` additionally are not threadsafe and can only be
/// used on the original thread that they were created on.
///
/// A `Store` is not intended to be a long-lived object in a program. No form of
/// GC is implemented at this time so once an instance is created within a
/// `Store` it will not be deallocated until all references to the `Store` have
/// gone away (this includes all references to items in the store). This makes
/// `Store` unsuitable for creating an unbounded number of instances in it
/// because `Store` will never release this memory. It's instead recommended to
/// have a long-lived [`Engine`] and instead create a `Store` for a more scoped
/// portion of your application.
///
/// # Stores and `Clone`
///
/// Using `clone` on a `Store` is a cheap operation. It will not create an
/// entirely new store, but rather just a new reference to the existing object.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Stores and `Default`
///
/// You can create a store with default configuration settings using
/// `Store::default()`. This will create a brand new [`Engine`] with default
/// ocnfiguration (see [`Config`](crate::Config) for more information).
#[derive(Clone)]
pub struct Store {
inner: Rc<StoreInner>,
}
pub(crate) struct StoreInner {
engine: Engine,
interrupts: Arc<VMInterrupts>,
signatures: RefCell<SignatureRegistry>,
instances: RefCell<Vec<InstanceHandle>>,
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
jit_code_ranges: RefCell<Vec<(usize, usize)>>,
externref_activations_table: VMExternRefActivationsTable,
stack_map_registry: StackMapRegistry,
}
struct HostInfoKey(VMExternRef);
impl PartialEq for HostInfoKey {
fn eq(&self, rhs: &Self) -> bool {
VMExternRef::eq(&self.0, &rhs.0)
}
}
impl Eq for HostInfoKey {}
impl Hash for HostInfoKey {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
VMExternRef::hash(&self.0, hasher);
}
}
impl Store {
/// Creates a new store to be associated with the given [`Engine`].
pub fn new(engine: &Engine) -> Store {
// Ensure that wasmtime_runtime's signal handlers are configured. Note
// that at the `Store` level it means we should perform this
// once-per-thread. Platforms like Unix, however, only require this
// once-per-program. In any case this is safe to call many times and
// each one that's not relevant just won't do anything.
wasmtime_runtime::init_traps();
Store {
inner: Rc::new(StoreInner {
engine: engine.clone(),
interrupts: Arc::new(Default::default()),
signatures: RefCell::new(Default::default()),
instances: RefCell::new(Vec::new()),
signal_handler: RefCell::new(None),
jit_code_ranges: RefCell::new(Vec::new()),
externref_activations_table: VMExternRefActivationsTable::new(),
stack_map_registry: StackMapRegistry::default(),
}),
}
}
pub(crate) fn from_inner(inner: Rc<StoreInner>) -> Store {
Store { inner }
}
/// Returns the [`Engine`] that this store is associated with.
pub fn engine(&self) -> &Engine {
&self.inner.engine
}
/// Returns an optional reference to a ['RuntimeMemoryCreator']
pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
self.engine()
.config()
.memory_creator
.as_ref()
.map(|x| x as _)
}
pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> {
&self.inner.signatures
}
pub(crate) fn lookup_shared_signature<'a>(
&'a self,
module: &'a wasmtime_environ::Module,
) -> impl Fn(wasm::SignatureIndex) -> VMSharedSignatureIndex + 'a {
move |index| {
self.signatures()
.borrow()
.lookup(&module.signatures[index])
.expect("signature not previously registered")
}
}
/// Returns whether or not the given address falls within the JIT code
/// managed by the compiler
pub(crate) fn is_in_jit_code(&self, addr: usize) -> bool {
self.inner
.jit_code_ranges
.borrow()
.iter()
.any(|(start, end)| *start <= addr && addr < *end)
}
pub(crate) fn register_module(&self, module: &Module) {
// All modules register their JIT code in a store for two reasons
// currently:
//
// * First we only catch signals/traps if the program counter falls
// within the jit code of an instantiated wasm module. This ensures
// we don't catch accidental Rust/host segfaults.
//
// * Second when generating a backtrace we'll use this mapping to
// only generate wasm frames for instruction pointers that fall
// within jit code.
self.register_jit_code(module);
// We need to know about all the stack maps of all instantiated modules
// so when performing a GC we know about all wasm frames that we find
// on the stack.
self.register_stack_maps(module);
// Signatures are loaded into our `SignatureRegistry` here
// once-per-module (and once-per-signature). This allows us to create
// a `Func` wrapper for any function in the module, which requires that
// we know about the signature and trampoline for all instances.
self.register_signatures(module);
}
fn register_jit_code(&self, module: &Module) {
let mut ranges = module.compiled_module().jit_code_ranges();
// Checking of we already registered JIT code ranges by searching
// first range start.
match ranges.next() {
None => (),
Some(first) => {
if !self.is_in_jit_code(first.0) {
// The range is not registered -- add all ranges (including
// first one) to the jit_code_ranges.
let mut jit_code_ranges = self.inner.jit_code_ranges.borrow_mut();
jit_code_ranges.push(first);
jit_code_ranges.extend(ranges);
}
}
}
}
fn register_stack_maps(&self, module: &Module) {
let module = &module.compiled_module();
self.stack_map_registry()
.register_stack_maps(module.stack_maps().map(|(func, stack_maps)| unsafe {
let ptr = (*func).as_ptr();
let len = (*func).len();
let start = ptr as usize;
let end = ptr as usize + len;
let range = start..end;
(range, stack_maps)
}));
}
fn register_signatures(&self, module: &Module) {
let trampolines = module.compiled_module().trampolines();
let module = module.compiled_module().module();
let mut signatures = self.signatures().borrow_mut();
for (index, wasm) in module.signatures.iter() {
signatures.register(wasm, trampolines[index]);
}
}
pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
self.inner.instances.borrow_mut().push(handle.clone());
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn existing_instance_handle(&self, handle: InstanceHandle) -> StoreInstanceHandle {
debug_assert!(self
.inner
.instances
.borrow()
.iter()
.any(|i| i.vmctx_ptr() == handle.vmctx_ptr()));
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn weak(&self) -> Weak<StoreInner> {
Rc::downgrade(&self.inner)
}
pub(crate) fn upgrade(weak: &Weak<StoreInner>) -> Option<Self> {
let inner = weak.upgrade()?;
Some(Self { inner })
}
pub(crate) fn signal_handler(&self) -> std::cell::Ref<'_, Option<Box<SignalHandler<'static>>>> {
self.inner.signal_handler.borrow()
}
pub(crate) fn signal_handler_mut(
&self,
) -> std::cell::RefMut<'_, Option<Box<SignalHandler<'static>>>> {
self.inner.signal_handler.borrow_mut()
}
pub(crate) fn interrupts(&self) -> &VMInterrupts {
&self.inner.interrupts
}
/// Returns whether the stores `a` and `b` refer to the same underlying
/// `Store`.
///
/// Because the `Store` type is reference counted multiple clones may point
/// to the same underlying storage, and this method can be used to determine
/// whether two stores are indeed the same.
pub fn same(a: &Store, b: &Store) -> bool {
Rc::ptr_eq(&a.inner, &b.inner)
}
/// Creates an [`InterruptHandle`] which can be used to interrupt the
/// execution of instances within this `Store`.
///
/// An [`InterruptHandle`] handle is a mechanism of ensuring that guest code
/// doesn't execute for too long. For example it's used to prevent wasm
/// programs for executing infinitely in infinite loops or recursive call
/// chains.
///
/// The [`InterruptHandle`] type is sendable to other threads so you can
/// interact with it even while the thread with this `Store` is executing
/// wasm code.
///
/// There's one method on an interrupt handle:
/// [`InterruptHandle::interrupt`]. This method is used to generate an
/// interrupt and cause wasm code to exit "soon".
///
/// ## When are interrupts delivered?
///
/// The term "interrupt" here refers to one of two different behaviors that
/// are interrupted in wasm:
///
/// * The head of every loop in wasm has a check to see if it's interrupted.
/// * The prologue of every function has a check to see if it's interrupted.
///
/// This interrupt mechanism makes no attempt to signal interrupts to
/// native code. For example if a host function is blocked, then sending
/// an interrupt will not interrupt that operation.
///
/// Interrupts are consumed as soon as possible when wasm itself starts
/// executing. This means that if you interrupt wasm code then it basically
/// guarantees that the next time wasm is executing on the target thread it
/// will return quickly (either normally if it were already in the process
/// of returning or with a trap from the interrupt). Once an interrupt
/// trap is generated then an interrupt is consumed, and further execution
/// will not be interrupted (unless another interrupt is set).
///
/// When implementing interrupts you'll want to ensure that the delivery of
/// interrupts into wasm code is also handled in your host imports and
/// functionality. Host functions need to either execute for bounded amounts
/// of time or you'll need to arrange for them to be interrupted as well.
///
/// ## Return Value
///
/// This function returns a `Result` since interrupts are not always
/// enabled. Interrupts are enabled via the
/// [`Config::interruptable`](crate::Config::interruptable) method, and if
/// this store's [`Config`](crate::Config) hasn't been configured to enable
/// interrupts then an error is returned.
///
/// ## Examples
///
/// ```
/// # use anyhow::Result;
/// # use wasmtime::*;
/// # fn main() -> Result<()> {
/// // Enable interruptable code via `Config` and then create an interrupt
/// // handle which we'll use later to interrupt running code.
/// let engine = Engine::new(Config::new().interruptable(true));
/// let store = Store::new(&engine);
/// let interrupt_handle = store.interrupt_handle()?;
///
/// // Compile and instantiate a small example with an infinite loop.
/// let module = Module::new(&engine, r#"
/// (func (export "run") (loop br 0))
/// "#)?;
/// let instance = Instance::new(&store, &module, &[])?;
/// let run = instance
/// .get_func("run")
/// .ok_or(anyhow::format_err!("failed to find `run` function export"))?
/// .get0::<()>()?;
///
/// // Spin up a thread to send us an interrupt in a second
/// std::thread::spawn(move || {
/// std::thread::sleep(std::time::Duration::from_secs(1));
/// interrupt_handle.interrupt();
/// });
///
/// let trap = run().unwrap_err();
/// assert!(trap.to_string().contains("wasm trap: interrupt"));
/// # Ok(())
/// # }
/// ```
pub fn interrupt_handle(&self) -> Result<InterruptHandle> {
if self.engine().config().tunables.interruptable {
Ok(InterruptHandle {
interrupts: self.inner.interrupts.clone(),
})
} else {
bail!("interrupts aren't enabled for this `Store`")
}
}
pub(crate) fn externref_activations_table(&self) -> &VMExternRefActivationsTable {
&self.inner.externref_activations_table
}
pub(crate) fn stack_map_registry(&self) -> &StackMapRegistry {
&self.inner.stack_map_registry
}
/// Perform garbage collection of `ExternRef`s.
pub fn gc(&self) {
// For this crate's API, we ensure that `set_stack_canary` invariants
// are upheld for all host-->Wasm calls, and we register every module
// used with this store in `self.inner.stack_map_registry`.
unsafe {
wasmtime_runtime::gc(
&self.inner.stack_map_registry,
&self.inner.externref_activations_table,
);
}
}
}
impl Default for Store {
fn default() -> Store {
Store::new(&Engine::default())
}
}
impl fmt::Debug for Store {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = &*self.inner as *const StoreInner;
f.debug_struct("Store").field("inner", &inner).finish()
}
}
impl Drop for StoreInner {
fn drop(&mut self) {
for instance in self.instances.get_mut().iter() {
unsafe {
instance.dealloc();
}
}
}
}
/// A threadsafe handle used to interrupt instances executing within a
/// particular `Store`.
///
/// This structure is created by the [`Store::interrupt_handle`] method.
pub struct InterruptHandle {
interrupts: Arc<VMInterrupts>,
}
impl InterruptHandle {
/// Flags that execution within this handle's original [`Store`] should be
/// interrupted.
///
/// This will not immediately interrupt execution of wasm modules, but
/// rather it will interrupt wasm execution of loop headers and wasm
/// execution of function entries. For more information see
/// [`Store::interrupt_handle`].
pub fn interrupt(&self) {
self.interrupts.interrupt()
}
}
Loading…
Cancel
Save