Browse Source

Merge pull request from GHSA-44mr-8vmm-wjhg

This ensures that memories, even with zero contents, still have the
necessary virtual mappings as required by the code generator to report
out-of-bounds reads/writes.
release-2.0.0
Alex Crichton 2 years ago
committed by GitHub
parent
commit
e60c374290
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 34
      crates/runtime/src/instance/allocator/pooling.rs
  2. 41
      tests/all/pooling_allocator.rs

34
crates/runtime/src/instance/allocator/pooling.rs

@ -19,8 +19,8 @@ use std::convert::TryFrom;
use std::mem;
use std::sync::Mutex;
use wasmtime_environ::{
DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, PrimaryMap, Tunables, VMOffsets,
WASM_PAGE_SIZE,
DefinedMemoryIndex, DefinedTableIndex, HostPtr, MemoryStyle, Module, PrimaryMap, Tunables,
VMOffsets, WASM_PAGE_SIZE,
};
mod index_allocator;
@ -386,6 +386,20 @@ impl InstancePool {
.defined_memory_index(memory_index)
.expect("should be a defined memory since we skipped imported ones");
match plan.style {
MemoryStyle::Static { bound } => {
let bound = bound * u64::from(WASM_PAGE_SIZE);
if bound < self.memories.static_memory_bound {
return Err(InstantiationError::Resource(anyhow!(
"static bound of {bound:x} bytes incompatible with \
reservation of {:x} bytes",
self.memories.static_memory_bound,
)));
}
}
MemoryStyle::Dynamic { .. } => {}
}
let memory = unsafe {
std::slice::from_raw_parts_mut(
self.memories.get_base(instance_index, defined_index),
@ -658,6 +672,7 @@ struct MemoryPool {
initial_memory_offset: usize,
max_memories: usize,
max_instances: usize,
static_memory_bound: u64,
}
impl MemoryPool {
@ -679,15 +694,11 @@ impl MemoryPool {
);
}
let memory_size = if instance_limits.memory_pages > 0 {
usize::try_from(
u64::from(tunables.static_memory_bound) * u64::from(WASM_PAGE_SIZE)
+ tunables.static_memory_offset_guard_size,
)
.map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?
} else {
0
};
let static_memory_bound =
u64::from(tunables.static_memory_bound) * u64::from(WASM_PAGE_SIZE);
let memory_size =
usize::try_from(static_memory_bound + tunables.static_memory_offset_guard_size)
.map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?;
assert!(
memory_size % crate::page_size() == 0,
@ -745,6 +756,7 @@ impl MemoryPool {
max_memories,
max_instances,
max_memory_size: (instance_limits.memory_pages as usize) * (WASM_PAGE_SIZE as usize),
static_memory_bound,
};
Ok(pool)

41
tests/all/pooling_allocator.rs

@ -721,3 +721,44 @@ configured maximum of 16 bytes; breakdown of allocation requirement:
Ok(())
}
#[test]
fn zero_memory_pages_disallows_oob() -> Result<()> {
let mut config = Config::new();
config.allocation_strategy(InstanceAllocationStrategy::Pooling {
strategy: PoolingAllocationStrategy::NextAvailable,
instance_limits: InstanceLimits {
count: 1,
memory_pages: 0,
..Default::default()
},
});
let engine = Engine::new(&config)?;
let module = Module::new(
&engine,
r#"
(module
(memory 0)
(func (export "load") (param i32) (result i32)
local.get 0
i32.load)
(func (export "store") (param i32 )
local.get 0
local.get 0
i32.store)
)
"#,
)?;
let mut store = Store::new(&engine, ());
let instance = Instance::new(&mut store, &module, &[])?;
let load32 = instance.get_typed_func::<i32, i32, _>(&mut store, "load")?;
let store32 = instance.get_typed_func::<i32, (), _>(&mut store, "store")?;
for i in 0..31 {
assert!(load32.call(&mut store, 1 << i).is_err());
assert!(store32.call(&mut store, 1 << i).is_err());
}
Ok(())
}

Loading…
Cancel
Save