|
| 1 | +use std::alloc::{self, Layout}; |
| 2 | +use std::sync; |
| 3 | + |
| 4 | +use nix::sys::mman::ProtFlags; |
| 5 | + |
| 6 | +use crate::helpers::ToU64; |
| 7 | + |
| 8 | +static ALLOCATOR: sync::Mutex<MachineAlloc> = sync::Mutex::new(MachineAlloc::empty()); |
| 9 | + |
| 10 | +/// A distinct allocator for the `MiriMachine`, allowing us to manage its |
| 11 | +/// memory separately from that of Miri itself. |
| 12 | +#[derive(Debug)] |
| 13 | +pub struct MachineAlloc { |
| 14 | + pages: Vec<*mut u8>, |
| 15 | + huge_allocs: Vec<(*mut u8, usize)>, |
| 16 | + allocated: Vec<Box<[u8]>>, |
| 17 | + page_size: usize, |
| 18 | + enabled: bool, |
| 19 | +} |
| 20 | + |
| 21 | +// SAFETY: We only point to heap-allocated data |
| 22 | +unsafe impl Send for MachineAlloc {} |
| 23 | + |
| 24 | +impl MachineAlloc { |
| 25 | + // Allocation-related methods |
| 26 | + |
| 27 | + /// Initializes the allocator with placeholder 4k pages. |
| 28 | + const fn empty() -> Self { |
| 29 | + Self { |
| 30 | + pages: Vec::new(), |
| 31 | + huge_allocs: Vec::new(), |
| 32 | + allocated: Vec::new(), |
| 33 | + page_size: 4096, |
| 34 | + enabled: false, |
| 35 | + } |
| 36 | + } |
| 37 | + |
| 38 | + /// SAFETY: There must be no existing `MiriAllocBytes` |
| 39 | + pub unsafe fn enable() { |
| 40 | + let mut alloc = ALLOCATOR.lock().unwrap(); |
| 41 | + alloc.enabled = true; |
| 42 | + // This needs to specifically be the system pagesize! |
| 43 | + alloc.page_size = unsafe { |
| 44 | + let ret = libc::sysconf(libc::_SC_PAGE_SIZE); |
| 45 | + if ret > 0 { |
| 46 | + ret.try_into().unwrap() |
| 47 | + } else { |
| 48 | + 4096 // fallback |
| 49 | + } |
| 50 | + } |
| 51 | + } |
| 52 | + |
| 53 | + /// Returns a vector of page addresses managed by the allocator. |
| 54 | + #[expect(dead_code)] |
| 55 | + pub fn pages() -> Vec<u64> { |
| 56 | + let alloc = ALLOCATOR.lock().unwrap(); |
| 57 | + alloc.pages.clone().into_iter().map(|p| p.addr().to_u64()).collect() |
| 58 | + } |
| 59 | + |
| 60 | + fn add_page(&mut self) { |
| 61 | + let page_layout = |
| 62 | + unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) }; |
| 63 | + let page_ptr = unsafe { alloc::alloc(page_layout) }; |
| 64 | + if page_ptr.is_null() { |
| 65 | + panic!("aligned_alloc failed!!!") |
| 66 | + } |
| 67 | + self.allocated.push(vec![0u8; self.page_size / 8].into_boxed_slice()); |
| 68 | + self.pages.push(page_ptr); |
| 69 | + } |
| 70 | + |
| 71 | + #[inline] |
| 72 | + fn normalized_layout(layout: Layout) -> (usize, usize) { |
| 73 | + let align = if layout.align() < 8 { 8 } else { layout.align() }; |
| 74 | + let size = layout.size().next_multiple_of(8); |
| 75 | + (size, align) |
| 76 | + } |
| 77 | + |
| 78 | + #[inline] |
| 79 | + fn huge_normalized_layout(&self, layout: Layout) -> (usize, usize) { |
| 80 | + let size = layout.size().next_multiple_of(self.page_size); |
| 81 | + let align = std::cmp::max(layout.align(), self.page_size); |
| 82 | + (size, align) |
| 83 | + } |
| 84 | + |
| 85 | + /// SAFETY: See alloc::alloc() |
| 86 | + #[inline] |
| 87 | + pub unsafe fn alloc(layout: Layout) -> *mut u8 { |
| 88 | + let mut alloc = ALLOCATOR.lock().unwrap(); |
| 89 | + unsafe { if alloc.enabled { alloc.alloc_inner(layout) } else { alloc::alloc(layout) } } |
| 90 | + } |
| 91 | + |
| 92 | + /// SAFETY: See alloc::alloc_zeroed() |
| 93 | + pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { |
| 94 | + let mut alloc = ALLOCATOR.lock().unwrap(); |
| 95 | + if alloc.enabled { |
| 96 | + let ptr = unsafe { alloc.alloc_inner(layout) }; |
| 97 | + if !ptr.is_null() { |
| 98 | + unsafe { |
| 99 | + ptr.write_bytes(0, layout.size()); |
| 100 | + } |
| 101 | + } |
| 102 | + ptr |
| 103 | + } else { |
| 104 | + unsafe { alloc::alloc_zeroed(layout) } |
| 105 | + } |
| 106 | + } |
| 107 | + |
| 108 | + /// SAFETY: See alloc::alloc() |
| 109 | + unsafe fn alloc_inner(&mut self, layout: Layout) -> *mut u8 { |
| 110 | + let (size, align) = MachineAlloc::normalized_layout(layout); |
| 111 | + |
| 112 | + if align > self.page_size || size > self.page_size { |
| 113 | + unsafe { self.alloc_multi_page(layout) } |
| 114 | + } else { |
| 115 | + for (page, pinfo) in std::iter::zip(&mut self.pages, &mut self.allocated) { |
| 116 | + for idx in (0..self.page_size).step_by(align) { |
| 117 | + if pinfo.len() < idx / 8 + size / 8 { |
| 118 | + break; |
| 119 | + } |
| 120 | + if pinfo[idx / 8..idx / 8 + size / 8].iter().all(|v| *v == 0) { |
| 121 | + pinfo[idx / 8..idx / 8 + size / 8].fill(255); |
| 122 | + unsafe { |
| 123 | + let ret = page.offset(idx.try_into().unwrap()); |
| 124 | + if ret.addr() >= page.addr() + self.page_size { |
| 125 | + panic!("Returing {} from page {}", ret.addr(), page.addr()); |
| 126 | + } |
| 127 | + return page.offset(idx.try_into().unwrap()); |
| 128 | + } |
| 129 | + } |
| 130 | + } |
| 131 | + } |
| 132 | + |
| 133 | + // We get here only if there's no space in our existing pages |
| 134 | + self.add_page(); |
| 135 | + unsafe { self.alloc_inner(layout) } |
| 136 | + } |
| 137 | + } |
| 138 | + |
| 139 | + /// SAFETY: See alloc::alloc() |
| 140 | + unsafe fn alloc_multi_page(&mut self, layout: Layout) -> *mut u8 { |
| 141 | + let (size, align) = self.huge_normalized_layout(layout); |
| 142 | + |
| 143 | + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; |
| 144 | + let ret = unsafe { alloc::alloc(layout) }; |
| 145 | + self.huge_allocs.push((ret, size)); |
| 146 | + ret |
| 147 | + } |
| 148 | + |
| 149 | + /// Safety: see alloc::dealloc() |
| 150 | + pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) { |
| 151 | + let mut alloc = ALLOCATOR.lock().unwrap(); |
| 152 | + unsafe { |
| 153 | + if alloc.enabled { |
| 154 | + alloc.dealloc_inner(ptr, layout); |
| 155 | + } else { |
| 156 | + alloc::dealloc(ptr, layout); |
| 157 | + } |
| 158 | + } |
| 159 | + } |
| 160 | + |
| 161 | + /// SAFETY: See alloc::dealloc() |
| 162 | + unsafe fn dealloc_inner(&mut self, ptr: *mut u8, layout: Layout) { |
| 163 | + let (size, align) = MachineAlloc::normalized_layout(layout); |
| 164 | + |
| 165 | + if size == 0 || ptr.is_null() { |
| 166 | + return; |
| 167 | + } |
| 168 | + |
| 169 | + let ptr_idx = ptr.addr() % self.page_size; |
| 170 | + let page_addr = ptr.addr() - ptr_idx; |
| 171 | + |
| 172 | + if align > self.page_size || size > self.page_size { |
| 173 | + unsafe { |
| 174 | + self.dealloc_multi_page(ptr, layout); |
| 175 | + } |
| 176 | + } else { |
| 177 | + let pinfo = std::iter::zip(&mut self.pages, &mut self.allocated) |
| 178 | + .find(|(page, _)| page.addr() == page_addr); |
| 179 | + let Some((_, pinfo)) = pinfo else { |
| 180 | + panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", self.pages) |
| 181 | + }; |
| 182 | + |
| 183 | + // Everything is always aligned to at least 8 bytes so this is ok |
| 184 | + pinfo[ptr_idx / 8..ptr_idx / 8 + size / 8].fill(0); |
| 185 | + } |
| 186 | + |
| 187 | + let mut free = vec![]; |
| 188 | + let page_layout = |
| 189 | + unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) }; |
| 190 | + for (idx, pinfo) in self.allocated.iter().enumerate() { |
| 191 | + if pinfo.iter().all(|p| *p == 0) { |
| 192 | + free.push(idx); |
| 193 | + } |
| 194 | + } |
| 195 | + free.reverse(); |
| 196 | + for idx in free { |
| 197 | + let _ = self.allocated.remove(idx); |
| 198 | + unsafe { |
| 199 | + alloc::dealloc(self.pages.remove(idx), page_layout); |
| 200 | + } |
| 201 | + } |
| 202 | + } |
| 203 | + |
| 204 | + /// SAFETY: See alloc::dealloc() |
| 205 | + unsafe fn dealloc_multi_page(&mut self, ptr: *mut u8, layout: Layout) { |
| 206 | + let (idx, _) = self |
| 207 | + .huge_allocs |
| 208 | + .iter() |
| 209 | + .enumerate() |
| 210 | + .find(|pg| ptr.addr() == pg.1.0.addr()) |
| 211 | + .expect("Freeing unallocated pages"); |
| 212 | + let ptr = self.huge_allocs.remove(idx).0; |
| 213 | + let (size, align) = self.huge_normalized_layout(layout); |
| 214 | + unsafe { |
| 215 | + let layout = Layout::from_size_align_unchecked(size, align); |
| 216 | + alloc::dealloc(ptr, layout); |
| 217 | + } |
| 218 | + } |
| 219 | + |
| 220 | + // Protection-related methods |
| 221 | + |
| 222 | + /// Protects all owned memory, preventing accesses. |
| 223 | + /// |
| 224 | + /// SAFETY: Accessing memory after this point will result in a segfault |
| 225 | + /// unless it is first unprotected. |
| 226 | + #[expect(dead_code)] |
| 227 | + pub unsafe fn prepare_ffi() -> Result<(), nix::errno::Errno> { |
| 228 | + let mut alloc = ALLOCATOR.lock().unwrap(); |
| 229 | + unsafe { |
| 230 | + alloc.mprotect(ProtFlags::PROT_NONE)?; |
| 231 | + } |
| 232 | + Ok(()) |
| 233 | + } |
| 234 | + |
| 235 | + /// Deprotects all owned memory by setting it to RW. Erroring here is very |
| 236 | + /// likely unrecoverable, so it may panic if applying those permissions |
| 237 | + /// fails. |
| 238 | + #[expect(dead_code)] |
| 239 | + pub fn unprep_ffi() { |
| 240 | + let mut alloc = ALLOCATOR.lock().unwrap(); |
| 241 | + let default_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE; |
| 242 | + unsafe { |
| 243 | + alloc.mprotect(default_flags).unwrap(); |
| 244 | + } |
| 245 | + } |
| 246 | + |
| 247 | + /// Applies `prot` to every page managed by the allocator. |
| 248 | + /// |
| 249 | + /// SAFETY: Accessing memory in violation of the protection flags will |
| 250 | + /// trigger a segfault. |
| 251 | + unsafe fn mprotect(&mut self, prot: ProtFlags) -> Result<(), nix::errno::Errno> { |
| 252 | + for &pg in &self.pages { |
| 253 | + unsafe { |
| 254 | + // We already know only non-null ptrs are pushed to self.pages |
| 255 | + let addr: std::ptr::NonNull<std::ffi::c_void> = |
| 256 | + std::ptr::NonNull::new_unchecked(pg.cast()); |
| 257 | + nix::sys::mman::mprotect(addr, self.page_size, prot)?; |
| 258 | + } |
| 259 | + } |
| 260 | + for &(hpg, size) in &self.huge_allocs { |
| 261 | + unsafe { |
| 262 | + let addr = std::ptr::NonNull::new_unchecked(hpg.cast()); |
| 263 | + nix::sys::mman::mprotect(addr, size, prot)?; |
| 264 | + } |
| 265 | + } |
| 266 | + Ok(()) |
| 267 | + } |
| 268 | +} |
0 commit comments