Browse Source

Fix libcall relocations for precompiled modules (#5608)

* Fix libcall relocations for precompiled modules

This commit fixes some asserts and support for relocation libcalls in
precompiled modules loaded from disk. In doing so this reworks how mmaps
are managed for files from disk. All non-file-backed `Mmap` entries are
read/write but file-backed versions were readonly. This commit changes
this such that all `Mmap` objects, even if they're file-backed, start as
read/write. The file-based versions all use copy-on-write to preserve
the private-ness of the mapping.

This is not functionally intended to change anything. Instead this
should have some more memory writable after a module is loaded but the
text section, for example, is still left as read/execute when loading is
finished. Additionally this makes modules compiled in memory more
consistent with modules loaded from disk.

* Update a comment

* Force images to become readonly during publish

This marks compiled images as entirely readonly during the
`CodeMemory::publish` step which happens just before the text section
becomes executable. This ensures that all images, no matter where they
come from, are guaranteed frozen before they start executing.
pull/5638/head
Alex Crichton 2 years ago
committed by GitHub
parent
commit
4ad86752de
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 25
      crates/jit/src/code_memory.rs
  2. 110
      crates/runtime/src/mmap.rs
  3. 22
      crates/runtime/src/mmap_vec.rs
  4. 46
      tests/all/module.rs

25
crates/jit/src/code_memory.rs

@ -237,8 +237,22 @@ impl CodeMemory {
// both the actual unwinding tables as well as the validity of the
// pointers we pass in itself.
unsafe {
// First, if necessary, apply relocations. This can happen for
// things like libcalls which happen late in the lowering process
// that don't go through the Wasm-based libcalls layer that's
// indirected through the `VMContext`. Note that most modules won't
// have relocations, so this typically doesn't do anything.
self.apply_relocations()?;
// Next freeze the contents of this image by making all of the
// memory readonly. Nothing after this point should ever be modified
// so commit everything. For a compiled-in-memory image this will
// mean IPIs to evict writable mappings from other cores. For
// loaded-from-disk images this shouldn't result in IPIs so long as
// there weren't any relocations because nothing should have
// otherwise written to the image at any point either.
self.mmap.make_readonly(0..self.mmap.len())?;
let text = self.text();
// Clear the newly allocated code from cache if the processor requires it
@ -248,9 +262,7 @@ impl CodeMemory {
icache_coherence::clear_cache(text.as_ptr().cast(), text.len())
.expect("Failed cache clear");
// Switch the executable portion from read/write to
// read/execute, notably not using read/write/execute to prevent
// modifications.
// Switch the executable portion from readonly to read/execute.
self.mmap
.make_executable(self.text.clone(), self.enable_branch_protection)
.expect("unable to make memory executable");
@ -273,13 +285,6 @@ impl CodeMemory {
return Ok(());
}
// Mmaps currently all start as readonly so before updating relocations
// the mapping needs to be made writable first. Note that this isn't
// reset back to readonly since the `make_executable` call, which
// happens after this, will implicitly remove the writable bit and leave
// it as just read/execute.
self.mmap.make_writable(self.text.clone())?;
for (offset, libcall) in self.relocations.iter() {
let offset = self.text.start + offset;
let libcall = match libcall {

110
crates/runtime/src/mmap.rs

@ -67,7 +67,7 @@ impl Mmap {
rustix::mm::mmap(
ptr::null_mut(),
len,
rustix::mm::ProtFlags::READ,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE,
&file,
0,
@ -109,12 +109,16 @@ impl Mmap {
.len();
let len = usize::try_from(len).map_err(|_| anyhow!("file too large to map"))?;
// Create a file mapping that allows PAGE_EXECUTE_READ which
// we'll be using for mapped text sections in ELF images later.
// Create a file mapping that allows PAGE_EXECUTE_WRITECOPY.
// This enables up-to these permissions but we won't leave all
// of these permissions active at all times. Execution is
// necessary for the generated code from Cranelift and the
// WRITECOPY part is needed for possibly resolving relocations,
// but otherwise writes don't happen.
let mapping = CreateFileMappingW(
file.as_raw_handle() as isize,
ptr::null_mut(),
PAGE_EXECUTE_READ,
PAGE_EXECUTE_WRITECOPY,
0,
0,
ptr::null(),
@ -124,9 +128,16 @@ impl Mmap {
.context("failed to create file mapping");
}
// Create a view for the entire file using `FILE_MAP_EXECUTE`
// here so that we can later change the text section to execute.
let ptr = MapViewOfFile(mapping, FILE_MAP_READ | FILE_MAP_EXECUTE, 0, 0, len);
// Create a view for the entire file using all our requisite
// permissions so that we can change the virtual permissions
// later on.
let ptr = MapViewOfFile(
mapping,
FILE_MAP_READ | FILE_MAP_EXECUTE | FILE_MAP_COPY,
0,
0,
len,
);
let err = io::Error::last_os_error();
CloseHandle(mapping);
if ptr.is_null() {
@ -140,10 +151,10 @@ impl Mmap {
file: Some(Arc::new(file)),
};
// Protect the entire file as PAGE_READONLY to start (i.e.
// Protect the entire file as PAGE_WRITECOPY to start (i.e.
// remove the execute bit)
let mut old = 0;
if VirtualProtect(ret.ptr as *mut _, ret.len, PAGE_READONLY, &mut old) == 0 {
if VirtualProtect(ret.ptr as *mut _, ret.len, PAGE_WRITECOPY, &mut old) == 0 {
return Err(io::Error::last_os_error())
.context("failed change pages to `PAGE_READONLY`");
}
@ -340,7 +351,6 @@ impl Mmap {
/// Return the allocated memory as a mutable slice of u8.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
debug_assert!(!self.is_readonly());
unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.len) }
}
@ -364,53 +374,6 @@ impl Mmap {
self.len() == 0
}
/// Returns whether the underlying mapping is readonly, meaning that
/// attempts to write will fault.
pub fn is_readonly(&self) -> bool {
self.file.is_some()
}
/// Makes the specified `range` within this `Mmap` to be read/write.
pub unsafe fn make_writable(&self, range: Range<usize>) -> Result<()> {
assert!(range.start <= self.len());
assert!(range.end <= self.len());
assert!(range.start <= range.end);
assert!(
range.start % crate::page_size() == 0,
"changing of protections isn't page-aligned",
);
let base = self.as_ptr().add(range.start) as *mut _;
let len = range.end - range.start;
// On Windows when we have a file mapping we need to specifically use
// `PAGE_WRITECOPY` to ensure that pages are COW'd into place because
// we don't want our modifications to go back to the original file.
#[cfg(windows)]
{
use std::io;
use windows_sys::Win32::System::Memory::*;
let mut old = 0;
let result = if self.file.is_some() {
VirtualProtect(base, len, PAGE_WRITECOPY, &mut old)
} else {
VirtualProtect(base, len, PAGE_READWRITE, &mut old)
};
if result == 0 {
return Err(io::Error::last_os_error().into());
}
}
#[cfg(not(windows))]
{
use rustix::mm::{mprotect, MprotectFlags};
mprotect(base, len, MprotectFlags::READ | MprotectFlags::WRITE)?;
}
Ok(())
}
/// Makes the specified `range` within this `Mmap` to be read/execute.
pub unsafe fn make_executable(
&self,
@ -471,6 +434,39 @@ impl Mmap {
Ok(())
}
/// Makes the specified `range` within this `Mmap` to be readonly.
pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
assert!(range.start <= self.len());
assert!(range.end <= self.len());
assert!(range.start <= range.end);
assert!(
range.start % crate::page_size() == 0,
"changing of protections isn't page-aligned",
);
let base = self.as_ptr().add(range.start) as *mut _;
let len = range.end - range.start;
#[cfg(windows)]
{
use std::io;
use windows_sys::Win32::System::Memory::*;
let mut old = 0;
let result = VirtualProtect(base, len, PAGE_READONLY, &mut old);
if result == 0 {
return Err(io::Error::last_os_error().into());
}
}
#[cfg(not(windows))]
{
use rustix::mm::{mprotect, MprotectFlags};
mprotect(base, len, MprotectFlags::READ)?;
}
Ok(())
}
/// Returns the underlying file that this mmap is mapping, if present.
pub fn original_file(&self) -> Option<&Arc<File>> {
self.file.as_ref()

22
crates/runtime/src/mmap_vec.rs

@ -68,11 +68,6 @@ impl MmapVec {
Ok(MmapVec::new(mmap, len))
}
/// Returns whether the original mmap was created from a readonly mapping.
pub fn is_readonly(&self) -> bool {
self.mmap.is_readonly()
}
/// Splits the collection into two at the given index.
///
/// Returns a separate `MmapVec` which shares the underlying mapping, but
@ -95,24 +90,28 @@ impl MmapVec {
return ret;
}
/// Makes the specified `range` within this `mmap` to be read/write.
pub unsafe fn make_writable(&self, range: Range<usize>) -> Result<()> {
self.mmap
.make_writable(range.start + self.range.start..range.end + self.range.start)
}
/// Makes the specified `range` within this `mmap` to be read/execute.
pub unsafe fn make_executable(
&self,
range: Range<usize>,
enable_branch_protection: bool,
) -> Result<()> {
assert!(range.start <= range.end);
assert!(range.end <= self.range.len());
self.mmap.make_executable(
range.start + self.range.start..range.end + self.range.start,
enable_branch_protection,
)
}
/// Makes the specified `range` within this `mmap` to be read-only.
pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
assert!(range.start <= range.end);
assert!(range.end <= self.range.len());
self.mmap
.make_readonly(range.start + self.range.start..range.end + self.range.start)
}
/// Returns the underlying file that this mmap is mapping, if present.
pub fn original_file(&self) -> Option<&Arc<File>> {
self.mmap.original_file()
@ -135,7 +134,6 @@ impl Deref for MmapVec {
impl DerefMut for MmapVec {
fn deref_mut(&mut self) -> &mut [u8] {
debug_assert!(!self.is_readonly());
// SAFETY: The underlying mmap is protected behind an `Arc` which means
// there there can be many references to it. We are guaranteed, though,
// that each reference to the underlying `mmap` has a disjoint `range`

46
tests/all/module.rs

@ -168,3 +168,49 @@ fn serialize_not_overly_massive() -> Result<()> {
Ok(())
}
// This test specifically disables SSE4.1 in Cranelift which force wasm
// instructions like `f32.ceil` to go through libcalls instead of using native
// instructions. Note that SIMD is also disabled here because SIMD otherwise
// requires SSE4.1 to be enabled.
//
// This test then also tests that loading modules through various means, e.g.
// through precompiled artifacts, all works.
#[test]
#[cfg_attr(not(target_arch = "x86_64"), ignore)]
fn missing_sse_and_floats_still_works() -> Result<()> {
let mut config = Config::new();
config.wasm_simd(false);
unsafe {
config.cranelift_flag_set("has_sse41", "false");
}
let engine = Engine::new(&config)?;
let module = Module::new(
&engine,
r#"
(module
(func (export "f32.ceil") (param f32) (result f32)
local.get 0
f32.ceil)
)
"#,
)?;
let bytes = module.serialize()?;
let module2 = unsafe { Module::deserialize(&engine, &bytes)? };
let tmpdir = tempfile::TempDir::new()?;
let path = tmpdir.path().join("module.cwasm");
std::fs::write(&path, &bytes)?;
let module3 = unsafe { Module::deserialize_file(&engine, &path)? };
for module in [module, module2, module3] {
let mut store = Store::new(&engine, ());
let instance = Instance::new(&mut store, &module, &[])?;
let ceil = instance.get_typed_func::<f32, f32>(&mut store, "f32.ceil")?;
for f in [1.0, 2.3, -1.3] {
assert_eq!(ceil.call(&mut store, f)?, f.ceil());
}
}
Ok(())
}

Loading…
Cancel
Save