Browse Source

Make module ids unique per-process, not per-engine (#8758)

* Make module ids unique per-process, not per-engine

Currently all instances of `wasmtime::Module` have a unique 64-bit id
embedded into them. This ID was originally only used for affinity in the
pooling allocator as a quick check of module equality. This use case
only required engine-local ids so the initial implementation had an ID
allocator per-engine.

Later, however, this id was reused for `wasmtime::ModuleExport` which
was intended to skip the string lookup for an export at runtime. This
also stored a module id but it did not store an engine identifier. This
meant that it's possible to mix these up and pass the wrong export to
the wrong engine. This behavior can lead to a runtime panic in Wasmtime.

This commit fixes this by making the module identifier be global
per-process instead of per-engine. This mirrors how store IDs are
allocated where they're per-process instead of per-engine. The same
logic for why store IDs are unlikely to be exhausted applies here too
where this 64-bit space of identifiers is unlikely to be exhausted.

* Fix warnings

* Fix tests
pull/8761/head
Alex Crichton 5 months ago
committed by GitHub
parent
commit
af59c4d568
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 8
      crates/wasmtime/src/engine.rs
  2. 5
      crates/wasmtime/src/runtime/instantiate.rs
  3. 8
      crates/wasmtime/src/runtime/module.rs
  4. 2
      crates/wasmtime/src/runtime/store/data.rs
  5. 2
      crates/wasmtime/src/runtime/vm.rs
  6. 32
      crates/wasmtime/src/runtime/vm/instance/allocator/pooling/index_allocator.rs
  7. 40
      crates/wasmtime/src/runtime/vm/module_id.rs
  8. 23
      tests/all/module.rs

8
crates/wasmtime/src/engine.rs

@ -61,8 +61,6 @@ struct EngineInner {
signatures: TypeRegistry,
#[cfg(feature = "runtime")]
epoch: AtomicU64,
#[cfg(feature = "runtime")]
unique_id_allocator: crate::runtime::vm::CompiledModuleIdAllocator,
/// One-time check of whether the compiler's settings, if present, are
/// compatible with the native host.
@ -129,8 +127,6 @@ impl Engine {
signatures: TypeRegistry::new(),
#[cfg(feature = "runtime")]
epoch: AtomicU64::new(0),
#[cfg(feature = "runtime")]
unique_id_allocator: crate::runtime::vm::CompiledModuleIdAllocator::new(),
#[cfg(any(feature = "cranelift", feature = "winch"))]
compatible_with_native_host: OnceLock::new(),
config,
@ -639,10 +635,6 @@ impl Engine {
self.inner.epoch.fetch_add(1, Ordering::Relaxed);
}
pub(crate) fn unique_id_allocator(&self) -> &crate::runtime::vm::CompiledModuleIdAllocator {
&self.inner.unique_id_allocator
}
/// Returns a [`std::hash::Hash`] that can be used to check precompiled WebAssembly compatibility.
///
/// The outputs of [`Engine::precompile_module`] and [`Engine::precompile_component`]

5
crates/wasmtime/src/runtime/instantiate.rs

@ -4,7 +4,7 @@
//! steps.
use crate::prelude::*;
use crate::runtime::vm::{CompiledModuleId, CompiledModuleIdAllocator, MmapVec};
use crate::runtime::vm::{CompiledModuleId, MmapVec};
use crate::{code_memory::CodeMemory, profiling_agent::ProfilingAgent};
use alloc::sync::Arc;
use anyhow::Result;
@ -50,7 +50,6 @@ impl CompiledModule {
code_memory: Arc<CodeMemory>,
info: CompiledModuleInfo,
profiler: &dyn ProfilingAgent,
id_allocator: &CompiledModuleIdAllocator,
) -> Result<Self> {
let mut ret = Self {
module: Arc::new(info.module),
@ -60,7 +59,7 @@ impl CompiledModule {
dbg_jit_registration: None,
code_memory,
meta: info.meta,
unique_id: id_allocator.alloc(),
unique_id: CompiledModuleId::new(),
func_names: info.func_names,
};
ret.register_debug_and_profiling(profiler)?;

8
crates/wasmtime/src/runtime/module.rs

@ -478,12 +478,8 @@ impl Module {
info: CompiledModuleInfo,
serializable: bool,
) -> Result<Self> {
let module = CompiledModule::from_artifacts(
code.code_memory().clone(),
info,
engine.profiler(),
engine.unique_id_allocator(),
)?;
let module =
CompiledModule::from_artifacts(code.code_memory().clone(), info, engine.profiler())?;
// Validate the module can be used with the current instance allocator.
let offsets = VMOffsets::new(HostPtr, module.module());

2
crates/wasmtime/src/runtime/store/data.rs

@ -209,7 +209,7 @@ pub struct StoreId(NonZeroU64);
impl StoreId {
/// Allocates a new unique identifier for a store that has never before been
/// used in this process.
fn allocate() -> StoreId {
pub fn allocate() -> StoreId {
static NEXT_ID: AtomicU64 = AtomicU64::new(0);
// Only allow 2^63 stores at which point we start panicking to prevent

2
crates/wasmtime/src/runtime/vm.rs

@ -74,7 +74,7 @@ pub use crate::runtime::vm::vmcontext::{
pub use send_sync_ptr::SendSyncPtr;
mod module_id;
pub use module_id::{CompiledModuleId, CompiledModuleIdAllocator};
pub use module_id::CompiledModuleId;
mod cow;
pub use crate::runtime::vm::cow::{MemoryImage, MemoryImageSlot, ModuleMemoryImages};

32
crates/wasmtime/src/runtime/vm/instance/allocator/pooling/index_allocator.rs

@ -485,7 +485,6 @@ impl List {
#[cfg(test)]
mod test {
use super::*;
use crate::runtime::vm::CompiledModuleIdAllocator;
use wasmtime_environ::EntityRef;
#[test]
@ -503,9 +502,8 @@ mod test {
#[test]
fn test_affinity_allocation_strategy() {
let id_alloc = CompiledModuleIdAllocator::new();
let id1 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id2 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
let state = ModuleAffinityIndexAllocator::new(100, 100);
let index1 = state.alloc(Some(id1)).unwrap();
@ -560,8 +558,7 @@ mod test {
#[test]
fn clear_affine() {
let id_alloc = CompiledModuleIdAllocator::new();
let id = id_alloc.alloc();
let id = CompiledModuleId::new();
let memory_index = DefinedMemoryIndex::new(0);
for max_unused_warm_slots in [0, 1, 2] {
@ -589,11 +586,11 @@ mod test {
use rand::Rng;
let mut rng = rand::thread_rng();
let id_alloc = CompiledModuleIdAllocator::new();
let ids =
std::iter::repeat_with(|| MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0)))
.take(10)
.collect::<Vec<_>>();
let ids = std::iter::repeat_with(|| {
MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0))
})
.take(10)
.collect::<Vec<_>>();
let state = ModuleAffinityIndexAllocator::new(1000, 1000);
let mut allocated: Vec<SlotId> = vec![];
let mut last_id = vec![None; 1000];
@ -634,10 +631,9 @@ mod test {
#[test]
fn test_affinity_threshold() {
let id_alloc = CompiledModuleIdAllocator::new();
let id1 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id2 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id3 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
let id3 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
let state = ModuleAffinityIndexAllocator::new(10, 2);
// Set some slot affinities
@ -673,7 +669,7 @@ mod test {
// LRU is 1, so that should be picked
assert_eq!(
state.alloc(Some(MemoryInModule(
id_alloc.alloc(),
CompiledModuleId::new(),
DefinedMemoryIndex::new(0)
))),
Some(SlotId(1))
@ -682,7 +678,7 @@ mod test {
// Pick another LRU entry, this time 2
assert_eq!(
state.alloc(Some(MemoryInModule(
id_alloc.alloc(),
CompiledModuleId::new(),
DefinedMemoryIndex::new(0)
))),
Some(SlotId(2))
@ -691,7 +687,7 @@ mod test {
// This should preserve slot `0` and pick up something new
assert_eq!(
state.alloc(Some(MemoryInModule(
id_alloc.alloc(),
CompiledModuleId::new(),
DefinedMemoryIndex::new(0)
))),
Some(SlotId(3))

40
crates/wasmtime/src/runtime/vm/module_id.rs

@ -1,43 +1,19 @@
//! Unique IDs for modules in the runtime.
use core::{
num::NonZeroU64,
sync::atomic::{AtomicU64, Ordering},
};
use core::num::NonZeroU64;
/// A unique identifier (within an engine or similar) for a compiled
/// module.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct CompiledModuleId(NonZeroU64);
/// An allocator for compiled module IDs.
pub struct CompiledModuleIdAllocator {
next: AtomicU64,
}
impl CompiledModuleIdAllocator {
/// Create a compiled-module ID allocator.
impl CompiledModuleId {
/// Allocates a new ID which will be unique within this process.
pub fn new() -> Self {
Self {
next: AtomicU64::new(1),
}
}
/// Allocate a new ID.
pub fn alloc(&self) -> CompiledModuleId {
// Note: why is `Relaxed` OK here?
//
// The only requirement we have is that IDs are unique. We
// don't care how one module's ID compares to another, i.e.,
// what order they come in. `Relaxed` means that this
// `fetch_add` operation does not have any particular
// synchronization (ordering) with respect to any other memory
// access in the program. However, `fetch_add` is always
// atomic with respect to other accesses to this variable
// (`self.next`). So we will always hand out separate, unique
// IDs correctly, just in some possibly arbitrary order (which
// is fine).
let id = self.next.fetch_add(1, Ordering::Relaxed);
CompiledModuleId(NonZeroU64::new(id).unwrap())
// As an implementation detail this is implemented on the same
// allocator as stores. It's ok if there are "holes" in the store id
// space as it's not required to be compact, it's just used for
// uniqueness.
CompiledModuleId(crate::store::StoreId::allocate().as_raw())
}
}

23
tests/all/module.rs

@ -330,3 +330,26 @@ fn tail_call_defaults() -> Result<()> {
}
Ok(())
}
#[test]
fn cross_engine_module_exports() -> Result<()> {
let a_engine = Engine::default();
let b_engine = Engine::default();
let a_module = Module::new(&a_engine, "(module)")?;
let b_module = Module::new(
&b_engine,
r#"
(module
(func (export "x"))
)
"#,
)?;
let export = b_module.get_export_index("x").unwrap();
let mut store = Store::new(&a_engine, ());
let instance = Instance::new(&mut store, &a_module, &[])?;
assert!(instance.get_module_export(&mut store, &export).is_none());
Ok(())
}

Loading…
Cancel
Save