Browse Source

Split out `TypeRegistry`'s open-coded slab arena into into a reusable type (#7986)

First of all, it is just a nice separation of concerns.

Second of all, as I design the GC rooting APIs for Wasmtime's upcoming Wasm GC
support, I want this same thing and I'd rather not open code it multiple times.
pull/7996/head
Nick Fitzgerald 9 months ago
committed by GitHub
parent
commit
1d8a0983bc
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 5
      Cargo.lock
  2. 3
      Cargo.toml
  3. 12
      crates/slab/Cargo.toml
  4. 450
      crates/slab/src/lib.rs
  5. 3
      crates/wasmtime/Cargo.toml
  6. 188
      crates/wasmtime/src/runtime/type_registry.rs
  7. 1
      scripts/publish.rs

5
Cargo.lock

@ -3277,6 +3277,7 @@ dependencies = [
"wasmtime-jit-debug",
"wasmtime-jit-icache-coherence",
"wasmtime-runtime",
"wasmtime-slab",
"wasmtime-winch",
"wat",
"windows-sys 0.52.0",
@ -3661,6 +3662,10 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "wasmtime-slab"
version = "19.0.0"
[[package]]
name = "wasmtime-types"
version = "19.0.0"

3
Cargo.toml

@ -121,7 +121,7 @@ members = [
"examples/component/wasm",
"fuzz",
"winch",
"winch/codegen",
"winch/codegen", "crates/slab",
]
exclude = [
'docs/rust_wasi_markdown_parser',
@ -173,6 +173,7 @@ wasmtime-component-util = { path = "crates/component-util", version = "=19.0.0"
wasmtime-component-macro = { path = "crates/component-macro", version = "=19.0.0" }
wasmtime-asm-macros = { path = "crates/asm-macros", version = "=19.0.0" }
wasmtime-versioned-export-macros = { path = "crates/versioned-export-macros", version = "=19.0.0" }
wasmtime-slab = { path = "crates/slab", version = "=19.0.0" }
component-test-util = { path = "crates/misc/component-test-util" }
component-fuzz-util = { path = "crates/misc/component-fuzz-util" }
wiggle = { path = "crates/wiggle", version = "=19.0.0", default-features = false }

12
crates/slab/Cargo.toml

@ -0,0 +1,12 @@
[package]
authors = ["The Wasmtime Project Developers"]
description = "Uni-typed slab with a free list for use in Wasmtime"
edition.workspace = true
license = "Apache-2.0 WITH LLVM-exception"
name = "wasmtime-slab"
repository = "https://github.com/bytecodealliance/wasmtime"
version.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

450
crates/slab/src/lib.rs

@ -0,0 +1,450 @@
//! A very simple, uniformly-typed slab arena that supports deallocation and
//! reusing deallocated entries' space.
//!
//! The free list of vacant entries in the slab are stored inline in the slab's
//! existing storage.
//!
//! # Example
//!
//! ```
//! use wasmtime_slab::{Id, Slab};
//!
//! let mut slab = Slab::new();
//!
//! // Insert some values into the slab.
//! let rza = slab.alloc("Robert Fitzgerald Diggs");
//! let gza = slab.alloc("Gary Grice");
//! let bill = slab.alloc("Bill Gates");
//!
//! // Alloced elements can be accessed infallibly via indexing (and missing and
//! // deallocated entries will panic).
//! assert_eq!(slab[rza], "Robert Fitzgerald Diggs");
//!
//! // Alternatively, the `get` and `get_mut` methods provide fallible lookup.
//! if let Some(genius) = slab.get(gza) {
//! println!("The gza gza genius: {}", genius);
//! }
//! if let Some(val) = slab.get_mut(bill) {
//! *val = "Bill Gates doesn't belong in this set...";
//! }
//!
//! // We can remove values from the slab.
//! slab.dealloc(bill);
//!
//! // Allocate a new entry.
//! let bill = slab.alloc("Bill Murray");
//! ```
//!
//! # Using `Id`s with the Wrong `Slab`
//!
//! `Slab` does NOT check that `Id`s used to access previously-allocated values
//! came from the current `Slab` instance (as opposed to a different `Slab`
//! instance). Using `Id`s from a different `Slab` is safe, but will yield an
//! unrelated value, if any at all.
//!
//! If you desire checking that an `Id` came from the correct `Slab` instance,
//! it should be easy to layer that functionality on top of this crate by
//! wrapping `Slab` and `Id` in types that additionally maintain a slab instance
//! identifier.
//!
//! # The ABA Problem
//!
//! This `Slab` type does NOT protect against ABA bugs, such as the following
//! sequence:
//!
//! * Value `A` is allocated into the slab, yielding id `i`.
//!
//! * `A` is deallocated, and so `i`'s associated entry is added to the slab's
//! free list.
//!
//! * Value `B` is allocated into the slab, reusing `i`'s associated entry,
//! yielding id `i`.
//!
//! * The "original" id `i` is used to access the arena, expecting the
//! deallocated value `A`, but getting the new value `B`.
//!
//! That is, it does not detect and prevent against the memory-safe version of
//! use-after-free bugs.
//!
//! If you need to protect against ABA bugs, it should be easy to layer that
//! functionality on top of this crate by wrapping `Slab` with something like
//! the following:
//!
//! ```rust
//! pub struct GenerationalId {
//! id: wasmtime_slab::Id,
//! generation: u32,
//! }
//!
//! struct GenerationalEntry<T> {
//! value: T,
//! generation: u32,
//! }
//!
//! pub struct GenerationalSlab<T> {
//! slab: wasmtime_slab::Slab<GenerationalEntry<T>>,
//! generation: u32,
//! }
//!
//! impl<T> GenerationalSlab<T> {
//! pub fn alloc(&mut self, value: T) -> GenerationalId {
//! let generation = self.generation;
//! let id = self.slab.alloc(GenerationalEntry { value, generation });
//! GenerationalId { id, generation }
//! }
//!
//! pub fn get(&self, id: GenerationalId) -> Option<&T> {
//! let entry = self.slab.get(id.id)?;
//!
//! // Check that the entry's generation matches the id's generation,
//! // else we have an ABA bug. (Alternatively, return `None` instead
//! // of panicking.)
//! assert_eq!(id.generation, entry.generation);
//!
//! Some(&entry.value)
//! }
//!
//! pub fn dealloc(&mut self, id: GenerationalId) {
//! // Check that the entry's generation matches the id's generation,
//! // else we have an ABA bug. (Alternatively, silently return on
//! // double-free instead of panicking.)
//! assert_eq!(id.generation, self.slab[id.id].generation);
//!
//! self.slab.dealloc(id.id);
//!
//! // Increment our generation whenever we deallocate so that any new
//! // value placed in this same entry will have a different generation
//! // and we can detect ABA bugs.
//! self.generation += 1;
//! }
//! }
//! ```
#![forbid(unsafe_code)]
#![deny(missing_docs, missing_debug_implementations)]
use std::num::NonZeroU32;
/// An identifier for an allocated value inside a `slab`.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Id(EntryIndex);
impl std::fmt::Debug for Id {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Id").field(&self.0.index()).finish()
}
}
impl Id {
/// Get the raw underlying representation of this `Id`.
#[inline]
pub fn into_raw(self) -> u32 {
u32::try_from(self.0.index()).unwrap()
}
/// Construct an `Id` from its raw underlying representation.
///
/// `raw` should be a value that was previously created via
/// `Id::into_raw`. May panic if given arbitrary values.
#[inline]
pub fn from_raw(raw: u32) -> Self {
let raw = usize::try_from(raw).unwrap();
Self(EntryIndex::new(raw))
}
}
/// A simple, uni-typed slab arena.
pub struct Slab<T> {
/// The slab's entries, each is either occupied and holding a `T` or vacant
/// and is a link the free list.
entries: Vec<Entry<T>>,
/// The index of the first free entry in the free list.
free: Option<EntryIndex>,
/// The number of occupied entries is this slab.
len: u32,
}
impl<T> std::fmt::Debug for Slab<T>
where
T: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
enum Entry<T> {
/// An occupied entry holding a `T`.
Occupied(T),
/// A vacant entry.
Free {
/// A link in the slab's free list, pointing to the next free entry, if
/// any.
next_free: Option<EntryIndex>,
},
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
struct EntryIndex(NonZeroU32);
impl EntryIndex {
#[inline]
fn new(index: usize) -> Self {
assert!(index <= Slab::<()>::MAX_CAPACITY);
let x = u32::try_from(index + 1).unwrap();
Self(NonZeroU32::new(x).unwrap())
}
#[inline]
fn index(&self) -> usize {
let index = self.0.get() - 1;
usize::try_from(index).unwrap()
}
}
impl<T> Default for Slab<T> {
#[inline]
fn default() -> Self {
Self {
entries: Vec::default(),
free: None,
len: 0,
}
}
}
impl<T> std::ops::Index<Id> for Slab<T> {
type Output = T;
#[inline]
fn index(&self, id: Id) -> &Self::Output {
self.get(id)
.expect("id from different slab or value was deallocated")
}
}
impl<T> std::ops::IndexMut<Id> for Slab<T> {
#[inline]
fn index_mut(&mut self, id: Id) -> &mut Self::Output {
self.get_mut(id)
.expect("id from different slab or value was deallocated")
}
}
impl<T> Slab<T> {
/// The maximum capacity any `Slab` can have: `u32::MAX - 1`.
pub const MAX_CAPACITY: usize = (u32::MAX - 1) as usize;
/// Construct a new, empty slab.
#[inline]
pub fn new() -> Self {
Slab::default()
}
/// Construct a new, empty slab, pre-reserving space for at least `capacity`
/// elements.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let mut slab = Self::new();
slab.reserve(capacity);
slab
}
/// Ensure that there is space for at least `additional` elements in this
/// slab.
///
/// # Panics
///
/// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.
pub fn reserve(&mut self, additional: usize) {
let cap = self.capacity();
let len = self.len();
assert!(cap >= len);
if cap - len >= additional {
// Already have `additional` capacity available.
return;
}
self.entries.reserve(additional);
// Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`
// in `self.entries`.
assert!(self.entries.capacity() <= Self::MAX_CAPACITY);
}
fn double_capacity(&mut self) {
// Double our capacity to amortize the cost of resizing. But make sure
// we add some amount of minimum additional capacity, since doubling
// zero capacity isn't useful.
const MIN_CAPACITY: usize = 16;
let additional = std::cmp::max(self.entries.capacity(), MIN_CAPACITY);
self.reserve(additional);
}
/// What is the capacity of this slab? That is, how many entries can it
/// contain within its current underlying storage?
#[inline]
pub fn capacity(&self) -> usize {
self.entries.capacity()
}
/// How many values are currently allocated within this slab?
#[inline]
pub fn len(&self) -> usize {
usize::try_from(self.len).unwrap()
}
/// Are there zero allocated values within this slab?
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Try to allocate a `T` value within this slab.
///
/// If there is no available capacity, ownership of the given value is
/// returned via `Err(value)`.
#[inline]
pub fn try_alloc(&mut self, value: T) -> Result<Id, T> {
if let Some(index) = self.try_alloc_index() {
let next_free = match self.entries[index.index()] {
Entry::Free { next_free } => next_free,
Entry::Occupied { .. } => unreachable!(),
};
self.free = next_free;
self.entries[index.index()] = Entry::Occupied(value);
self.len += 1;
Ok(Id(index))
} else {
Err(value)
}
}
#[inline]
fn try_alloc_index(&mut self) -> Option<EntryIndex> {
self.free.take().or_else(|| {
if self.entries.len() < self.entries.capacity() {
let index = EntryIndex::new(self.entries.len());
self.entries.push(Entry::Free { next_free: None });
Some(index)
} else {
None
}
})
}
/// Allocate a `T` value within this slab, allocating additional underlying
/// storage if there is no available capacity.
///
/// # Panics
///
/// Panics if allocating this value requires reallocating the underlying
/// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.
#[inline]
pub fn alloc(&mut self, value: T) -> Id {
self.try_alloc(value)
.unwrap_or_else(|value| self.alloc_slow(value))
}
/// Get the `Id` that will be returned for the next allocation in this slab.
#[inline]
pub fn next_id(&self) -> Id {
let index = self.free.unwrap_or_else(|| EntryIndex::new(self.len()));
Id(index)
}
#[inline(never)]
#[cold]
fn alloc_slow(&mut self, value: T) -> Id {
// Reserve additional capacity, since we didn't have space for the
// allocation.
self.double_capacity();
// After which the allocation will succeed.
self.try_alloc(value).ok().unwrap()
}
/// Get a shared borrow of the value associated with `id`.
///
/// Returns `None` if the value has since been deallocated.
///
/// If `id` comes from a different `Slab` instance, this method may panic,
/// return `None`, or return an arbitrary value.
#[inline]
pub fn get(&self, id: Id) -> Option<&T> {
match self
.entries
.get(id.0.index())
.expect("id from different slab")
{
Entry::Occupied(x) => Some(x),
Entry::Free { .. } => None,
}
}
/// Get an exclusive borrow of the value associated with `id`.
///
/// Returns `None` if the value has since been deallocated.
///
/// If `id` comes from a different `Slab` instance, this method may panic,
/// return `None`, or return an arbitrary value.
#[inline]
pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {
match self
.entries
.get_mut(id.0.index())
.expect("id from different slab")
{
Entry::Occupied(x) => Some(x),
Entry::Free { .. } => None,
}
}
/// Does this slab contain an allocated value for `id`?
#[inline]
pub fn contains(&self, id: Id) -> bool {
match self.entries.get(id.0.index()) {
Some(Entry::Occupied(_)) => true,
None | Some(Entry::Free { .. }) => false,
}
}
/// Iterate over all values currently allocated within this `Slab`.
///
/// Yields pairs of an `Id` and the `Id`'s associated value.
///
/// Iteration order is undefined.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {
assert!(self.entries.len() <= Self::MAX_CAPACITY);
self.entries
.iter()
.enumerate()
.filter_map(|(i, e)| match e {
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
Entry::Free { .. } => None,
})
}
/// Deallocate the value associated with the given `id`.
///
/// If `id` comes from a different `Slab` instance, this method may panic,
/// do nothing, or deallocate an arbitrary value.
#[inline]
pub fn dealloc(&mut self, id: Id) {
match self
.entries
.get_mut(id.0.index())
.expect("id from a different slab")
{
Entry::Free { .. } => panic!("attempt to deallocate an entry that is already vacant"),
e @ Entry::Occupied(_) => {
let next_free = std::mem::replace(&mut self.free, Some(id.0));
*e = Entry::Free { next_free };
self.len -= 1;
}
}
}
}

3
crates/wasmtime/Cargo.toml

@ -32,6 +32,7 @@ wasmtime-cranelift = { workspace = true, optional = true }
wasmtime-winch = { workspace = true, optional = true }
wasmtime-component-macro = { workspace = true, optional = true }
wasmtime-component-util = { workspace = true, optional = true }
wasmtime-slab = { workspace = true, optional = true }
target-lexicon = { workspace = true }
wasmparser = { workspace = true }
wasm-encoder = { workspace = true, optional = true }
@ -170,4 +171,4 @@ coredump = ["dep:wasm-encoder", "runtime"]
debug-builtins = ["wasmtime-runtime?/debug-builtins"]
# Enable support for executing compiled Wasm modules.
runtime = ["dep:wasmtime-runtime", "dep:wasmtime-jit-icache-coherence"]
runtime = ["dep:wasmtime-runtime", "dep:wasmtime-jit-icache-coherence", "dep:wasmtime-slab"]

188
crates/wasmtime/src/runtime/type_registry.rs

@ -23,6 +23,7 @@ use wasmtime_environ::{
WasmFuncType,
};
use wasmtime_runtime::VMSharedTypeIndex;
use wasmtime_slab::{Id as SlabId, Slab};
// ### Notes on the Lifetime Management of Types
//
@ -154,8 +155,13 @@ impl Drop for TypeCollection {
}
#[inline]
fn entry_index(index: VMSharedTypeIndex) -> usize {
usize::try_from(index.bits()).unwrap()
fn shared_type_index_to_slab_id(index: VMSharedTypeIndex) -> SlabId {
SlabId::from_raw(index.bits())
}
#[inline]
fn slab_id_to_shared_type_index(id: SlabId) -> VMSharedTypeIndex {
VMSharedTypeIndex::new(id.into_raw())
}
/// A Wasm type that has been registered in the engine's `TypeRegistry`.
@ -168,7 +174,7 @@ fn entry_index(index: VMSharedTypeIndex) -> usize {
/// Dereferences to its underlying `WasmFuncType`.
pub struct RegisteredType {
engine: Engine,
entry: OccupiedEntry,
entry: Entry,
}
impl Debug for RegisteredType {
@ -271,9 +277,9 @@ impl RegisteredType {
/// registry.
pub fn root(engine: &Engine, index: VMSharedTypeIndex) -> Option<RegisteredType> {
let entry = {
let i = entry_index(index);
let id = shared_type_index_to_slab_id(index);
let inner = engine.signatures().0.read().unwrap();
let e = inner.entries.get(i)?.as_occupied()?;
let e = inner.entries.get(id)?;
// NB: make sure to incref while the lock is held to prevent:
//
@ -293,7 +299,7 @@ impl RegisteredType {
///
/// It is the caller's responsibility to ensure that the entry's reference
/// count has already been incremented.
fn from_parts(engine: Engine, entry: OccupiedEntry) -> Self {
fn from_parts(engine: Engine, entry: Entry) -> Self {
debug_assert!(entry.0.registrations.load(Acquire) != 0);
RegisteredType { engine, entry }
}
@ -311,7 +317,7 @@ impl RegisteredType {
/// A Wasm function type, its `VMSharedTypeIndex`, and its registration count.
#[derive(Debug)]
struct OccupiedEntryInner {
struct EntryInner {
ty: WasmFuncType,
index: VMSharedTypeIndex,
registrations: AtomicUsize,
@ -321,9 +327,9 @@ struct OccupiedEntryInner {
/// function type, so that this can be a hash consing key in
/// `TypeRegistryInner::map`.
#[derive(Clone, Debug)]
struct OccupiedEntry(Arc<OccupiedEntryInner>);
struct Entry(Arc<EntryInner>);
impl Deref for OccupiedEntry {
impl Deref for Entry {
type Target = WasmFuncType;
fn deref(&self) -> &Self::Target {
@ -331,27 +337,27 @@ impl Deref for OccupiedEntry {
}
}
impl PartialEq for OccupiedEntry {
impl PartialEq for Entry {
fn eq(&self, other: &Self) -> bool {
self.0.ty == other.0.ty
}
}
impl Eq for OccupiedEntry {}
impl Eq for Entry {}
impl Hash for OccupiedEntry {
impl Hash for Entry {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.ty.hash(state);
}
}
impl Borrow<WasmFuncType> for OccupiedEntry {
impl Borrow<WasmFuncType> for Entry {
fn borrow(&self) -> &WasmFuncType {
&self.0.ty
}
}
impl OccupiedEntry {
impl Entry {
/// Increment the registration count.
fn incref(&self, why: &str) {
let old_count = self.0.registrations.fetch_add(1, AcqRel);
@ -377,69 +383,15 @@ impl OccupiedEntry {
}
}
#[derive(Debug)]
enum RegistryEntry {
/// An occupied entry containing a registered type.
Occupied(OccupiedEntry),
/// A vacant entry that is additionally a link in the free list of all
/// vacant entries.
Vacant {
/// The next link in the free list of all vacant entries, if any.
next_vacant: Option<VMSharedTypeIndex>,
},
}
impl RegistryEntry {
fn is_vacant(&self) -> bool {
matches!(self, Self::Vacant { .. })
}
fn is_occupied(&self) -> bool {
matches!(self, Self::Occupied(_))
}
fn as_occupied(&self) -> Option<&OccupiedEntry> {
match self {
Self::Occupied(o) => Some(o),
Self::Vacant { .. } => None,
}
}
fn unwrap_occupied(&self) -> &OccupiedEntry {
match self {
Self::Occupied(o) => o,
Self::Vacant { .. } => panic!("unwrap_occupied on vacant entry"),
}
}
fn unwrap_next_vacant(&self) -> Option<VMSharedTypeIndex> {
match self {
Self::Vacant { next_vacant } => *next_vacant,
Self::Occupied(_) => panic!("unwrap_next_vacant on occupied entry"),
}
}
}
#[derive(Debug, Default)]
struct TypeRegistryInner {
// A map from the Wasm function type to a `VMSharedTypeIndex`, for all
// the Wasm function types we have already registered.
map: HashSet<OccupiedEntry>,
map: HashSet<Entry>,
// A map from `VMSharedTypeIndex::bits()` to the type index's associated
// Wasm type.
entries: Vec<RegistryEntry>,
// The head of the free list of the entries that are vacant and can
// therefore (along with their associated `VMSharedTypeIndex`) be reused.
//
// This is a size optimization, and arguably not strictly necessary for
// correctness, but is necessary to avoid unbounded memory growth: if we did
// not reuse entries/indices, we would have holes in our `self.entries` list
// and, as we load and unload new Wasm modules, `self.entries` would keep
// growing indefinitely.
first_vacant: Option<VMSharedTypeIndex>,
entries: Slab<Entry>,
// An explicit stack of entries that we are in the middle of dropping. Used
// to avoid recursion when dropping a type that is holding the last
@ -470,9 +422,9 @@ impl TypeRegistryInner {
EngineOrModuleTypeIndex::Module(_) => Err(()),
EngineOrModuleTypeIndex::Engine(id) => {
let id = VMSharedTypeIndex::new(id);
let i = entry_index(id);
let id = shared_type_index_to_slab_id(id);
assert!(
self.entries[i].is_occupied(),
self.entries.contains(id),
"canonicalized in a different engine? {ty:?}"
);
Ok(())
@ -507,42 +459,6 @@ impl TypeRegistryInner {
debug_assert!(self.is_canonicalized(ty))
}
/// Allocate a vacant entry, either from the free list, or creating a new
/// entry.
fn alloc_vacant_entry(&mut self) -> VMSharedTypeIndex {
match self.first_vacant.take() {
// Pop a vacant entry off the free list when we can.
Some(index) => {
let i = entry_index(index);
let entry = &mut self.entries[i];
self.first_vacant = entry.unwrap_next_vacant();
index
}
// Otherwise, allocate a new entry.
None => {
debug_assert_eq!(self.entries.len(), self.map.len());
let len = self.entries.len();
let len = u32::try_from(len).unwrap();
// Keep `index_map`'s length under `u32::MAX` because
// `u32::MAX` is reserved for `VMSharedTypeIndex`'s
// default value.
assert!(
len < std::u32::MAX,
"Invariant check: self.entries.len() < std::u32::MAX"
);
let index = VMSharedTypeIndex::new(len);
self.entries
.push(RegistryEntry::Vacant { next_vacant: None });
index
}
}
}
/// Add a new type to this registry.
///
/// The type must be canonicalized and must not already exist in the
@ -550,7 +466,7 @@ impl TypeRegistryInner {
///
/// Initializes the new entry's registration count to one, and callers
/// should not further increment the registration count.
fn register_new(&mut self, ty: WasmFuncType) -> OccupiedEntry {
fn register_new(&mut self, ty: WasmFuncType) -> Entry {
assert!(
self.is_canonicalized(&ty),
"ty is not already canonicalized: {ty:?}"
@ -562,8 +478,8 @@ impl TypeRegistryInner {
ty.trace::<_, ()>(&mut |idx| match idx {
EngineOrModuleTypeIndex::Engine(id) => {
let id = VMSharedTypeIndex::new(id);
let i = entry_index(id);
let e = self.entries[i].unwrap_occupied();
let i = shared_type_index_to_slab_id(id);
let e = &self.entries[i];
e.incref("new type references existing type in TypeRegistryInner::register_new");
Ok(())
}
@ -571,9 +487,10 @@ impl TypeRegistryInner {
})
.unwrap();
let index = self.alloc_vacant_entry();
let id = self.entries.next_id();
let index = slab_id_to_shared_type_index(id);
log::trace!("create {index:?} = {ty:?} (registrations -> 1)");
let entry = OccupiedEntry(Arc::new(OccupiedEntryInner {
let entry = Entry(Arc::new(EntryInner {
ty,
index,
registrations: AtomicUsize::new(1),
@ -581,15 +498,14 @@ impl TypeRegistryInner {
let is_new_entry = self.map.insert(entry.clone());
assert!(is_new_entry);
let i = entry_index(index);
assert!(self.entries[i].is_vacant());
self.entries[i] = RegistryEntry::Occupied(entry.clone());
let id = self.entries.alloc(entry.clone());
assert_eq!(id, shared_type_index_to_slab_id(index));
entry
}
/// Register the given canonicalized type, incrementing its reference count.
fn register_canonicalized(&mut self, ty: WasmFuncType) -> OccupiedEntry {
fn register_canonicalized(&mut self, ty: WasmFuncType) -> Entry {
assert!(
self.is_canonicalized(&ty),
"type is not already canonicalized: {ty:?}"
@ -607,8 +523,8 @@ impl TypeRegistryInner {
fn unregister_type_collection(&mut self, collection: &TypeCollection) {
for (_, id) in collection.types.iter() {
let i = entry_index(*id);
let e = self.entries[i].unwrap_occupied();
let i = shared_type_index_to_slab_id(*id);
let e = &self.entries[i];
if e.decref("TypeRegistryInner::unregister_type_collection") {
self.unregister_entry(*id);
}
@ -626,9 +542,9 @@ impl TypeRegistryInner {
debug_assert!(self.drop_stack.is_empty());
self.drop_stack.push(index);
while let Some(id) = self.drop_stack.pop() {
let i = entry_index(id);
let entry = self.entries[i].unwrap_occupied();
while let Some(index) = self.drop_stack.pop() {
let slab_id = shared_type_index_to_slab_id(index);
let entry = &self.entries[slab_id];
// We need to double check whether the entry is still at zero
// registrations: Between the time that we observed a zero and
@ -643,7 +559,7 @@ impl TypeRegistryInner {
let registrations = entry.0.registrations.load(Acquire);
if registrations != 0 {
log::trace!(
"{id:?} was concurrently resurrected and no longer has zero \
"{index:?} was concurrently resurrected and no longer has zero \
registrations (registrations -> {registrations})"
);
continue;
@ -654,15 +570,15 @@ impl TypeRegistryInner {
entry
.0
.ty
.trace::<_, ()>(&mut |idx| match idx {
EngineOrModuleTypeIndex::Engine(child_id) => {
let child_id = VMSharedTypeIndex::new(child_id);
let child_index = entry_index(child_id);
let child_entry = self.entries[child_index].unwrap_occupied();
.trace::<_, ()>(&mut |child_index| match child_index {
EngineOrModuleTypeIndex::Engine(child_index) => {
let child_index = VMSharedTypeIndex::new(child_index);
let child_slab_id = shared_type_index_to_slab_id(child_index);
let child_entry = &self.entries[child_slab_id];
if child_entry.decref(
"referenced by unregistered type in TypeCollection::unregister_entry",
) {
self.drop_stack.push(child_id);
self.drop_stack.push(child_index);
}
Ok(())
}
@ -672,12 +588,9 @@ impl TypeRegistryInner {
})
.unwrap();
log::trace!("removing {id:?} from registry");
log::trace!("removing {index:?} from registry");
self.map.remove(entry);
self.entries[i] = RegistryEntry::Vacant {
next_vacant: self.first_vacant.take(),
};
self.first_vacant = Some(id);
self.entries.dealloc(slab_id);
}
}
}
@ -692,7 +605,7 @@ impl Drop for TypeRegistryInner {
"type registry not empty: still have registered types in self.map"
);
assert!(
self.entries.iter().all(|e| e.is_vacant()),
self.entries.is_empty(),
"type registry not empty: not all entries are vacant"
);
}
@ -720,9 +633,8 @@ impl TypeRegistry {
/// constructor if you need to ensure that property and you don't have some
/// other mechanism already keeping the type registered.
pub fn borrow(&self, index: VMSharedTypeIndex) -> Option<impl Deref<Target = WasmFuncType>> {
let i = entry_index(index);
let id = shared_type_index_to_slab_id(index);
let inner = self.0.read().unwrap();
let e = inner.entries.get(i)?;
e.as_occupied().cloned()
inner.entries.get(id).cloned()
}
}

1
scripts/publish.rs

@ -46,6 +46,7 @@ const CRATES_TO_PUBLISH: &[&str] = &[
// wasmtime
"wasmtime-asm-macros",
"wasmtime-versioned-export-macros",
"wasmtime-slab",
"wasmtime-component-util",
"wasmtime-wit-bindgen",
"wasmtime-component-macro",

Loading…
Cancel
Save