From e2a261204cfb7a6a097158e3c9e0c3db07ddef25 Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Fri, 23 Feb 2018 18:17:23 -0800 Subject: [PATCH] elfmalloc: Use mmap-alloc's large-align feature for aligns over one page - Use mmap-alloc's large-align feature, which enables passing alignments which are larger than the page size - Propagate allocation failures (rather than asserting/unwrapping/etc) in more places --- elfmalloc/Cargo.toml | 6 +- elfmalloc/src/alloc_impl.rs | 16 ++-- elfmalloc/src/bin/bench.rs | 7 +- elfmalloc/src/frontends.rs | 151 +++++++++++++++++------------------ elfmalloc/src/general.rs | 152 ++++++++++++++++++------------------ elfmalloc/src/rust_alloc.rs | 84 ++++++++++---------- elfmalloc/src/slag.rs | 138 +++++++++++++++----------------- elfmalloc/src/sources.rs | 117 +++++++++++++-------------- elfmalloc/src/utils.rs | 121 ++++++++++++++-------------- elfmalloc/travis.sh | 4 +- 10 files changed, 393 insertions(+), 403 deletions(-) diff --git a/elfmalloc/Cargo.toml b/elfmalloc/Cargo.toml index 3d33cca..8b80afb 100644 --- a/elfmalloc/Cargo.toml +++ b/elfmalloc/Cargo.toml @@ -1,4 +1,4 @@ -# Copyright 2017 the authors. See the 'Copyright and license' section of the +# Copyright 2017-2018 the authors. See the 'Copyright and license' section of the # README.md file at the top-level directory of this repository. # # Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -49,11 +49,11 @@ alloc-fmt = { path = "../alloc-fmt" } alloc-tls = { path = "../alloc-tls" } bagpipe = { path = "../bagpipe" } bsalloc = "0.1.0" -lazy_static = "1.0.0" +lazy_static = { version = "1.0.0", features = ["spin_no_std"] } libc = "0.2" log = "0.3.8" malloc-bind = { path = "../malloc-bind" } -mmap-alloc = { path = "../mmap-alloc" } +mmap-alloc = { path = "../mmap-alloc", features = ["large-align"] } num_cpus = "1.5" smallvec = "0.4.3" sysconf = "0.3.1" diff --git a/elfmalloc/src/alloc_impl.rs b/elfmalloc/src/alloc_impl.rs index 53be8df..7092fcc 100644 --- a/elfmalloc/src/alloc_impl.rs +++ b/elfmalloc/src/alloc_impl.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -25,6 +25,8 @@ use super::general::global; use std::mem; #[cfg(feature = "c-api")] use std::intrinsics::unlikely; +#[cfg(feature = "c-api")] +use std::ptr; #[cfg(feature = "c-api")] use self::libc::{size_t, c_void}; @@ -39,10 +41,10 @@ unsafe impl<'a> Alloc for &'a ElfMallocGlobal { // two up to 1MiB are aligned to their size. Past that size, only page-alignment is // guaranteed. if l.size().is_power_of_two() || l.align() <= mem::size_of::() { - Ok(global::alloc(l.size())) + global::alloc(l.size()) } else { - Ok(global::alloc(l.size().next_power_of_two())) - } + global::alloc(l.size().next_power_of_two()) + }.ok_or(AllocErr::Exhausted { request: l }) } unsafe fn dealloc(&mut self, p: *mut u8, _l: Layout) { @@ -50,14 +52,14 @@ unsafe impl<'a> Alloc for &'a ElfMallocGlobal { } unsafe fn realloc(&mut self, p: *mut u8, _l1: Layout, l2: Layout) -> Result<*mut u8, AllocErr> { - Ok(global::aligned_realloc(p, l2.size(), l2.align())) + global::aligned_realloc(p, l2.size(), l2.align()).ok_or(AllocErr::Exhausted { request: l2 }) } } #[cfg(feature = "c-api")] unsafe impl Malloc for ElfMallocGlobal { unsafe fn c_malloc(&self, size: size_t) -> *mut c_void { - let p = global::alloc(size as usize) as *mut c_void; + let p = global::alloc(size as usize).unwrap_or(ptr::null_mut()) as *mut c_void; alloc_debug_assert_eq!((p as usize) % MIN_ALIGN, 0, "object does not have the required alignment of {}: {:?}", @@ -82,7 +84,7 @@ unsafe impl Malloc for ElfMallocGlobal { "object does not have the required alignment of {}: {:?}", MIN_ALIGN, p); - global::realloc(p as *mut u8, new_size as usize) as *mut c_void + global::realloc(p as *mut u8, new_size as usize).unwrap_or(ptr::null_mut()) as *mut c_void } } diff --git a/elfmalloc/src/bin/bench.rs b/elfmalloc/src/bin/bench.rs index fe8a004..2ffd520 100644 --- a/elfmalloc/src/bin/bench.rs +++ b/elfmalloc/src/bin/bench.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -101,11 +101,12 @@ unsafe impl Send for ElfClone {} impl AllocLike for ElfClone { type Item = T; fn create() -> Self { - ElfClone(DynamicAllocator::new(), marker::PhantomData) + ElfClone(DynamicAllocator::new().unwrap(), marker::PhantomData) } unsafe fn allocate(&mut self) -> *mut T { - self.0.alloc(mem::size_of::()) as *mut T + // TODO: Do something other than unwrap? + self.0.alloc(mem::size_of::()).unwrap() as *mut T } unsafe fn deallocate(&mut self, item: *mut T) { diff --git a/elfmalloc/src/frontends.rs b/elfmalloc/src/frontends.rs index 63b99b3..defc5b6 100644 --- a/elfmalloc/src/frontends.rs +++ b/elfmalloc/src/frontends.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -12,9 +12,11 @@ //! a particular object size. These object-specific allocators can be used as a specialized //! allocator, but their main use-case is as a building block for a general dynamic allocator. This //! latter task is implemented in the `general` module. +extern crate alloc; +use alloc::allocator::{Alloc, Layout}; use super::slag::*; use super::sources::MmapSource; -use super::utils::{likely, OwnedArray, LazyInitializable, mmap}; +use super::utils::{self, likely, OwnedArray, LazyInitializable, MMAP}; use super::alloc_type::AllocType; use std::marker::PhantomData; use std::mem; @@ -22,7 +24,7 @@ use std::ptr; use std::cmp; pub trait Frontend: LazyInitializable + Clone { - unsafe fn alloc(&mut self) -> *mut u8; + unsafe fn alloc(&mut self) -> Option<*mut u8>; unsafe fn free(&mut self, item: *mut u8); } @@ -57,20 +59,20 @@ impl Drop for LocalCache { } impl Clone for LocalCache { fn clone(&self) -> LocalCache { - LocalCache::new(self.alloc.clone()) + LocalCache::new(self.alloc.clone()).expect("failed to allocate new LocalCache") } } impl LocalCache { - fn new(mut alloc: SlagAllocator) -> Self { + fn new(mut alloc: SlagAllocator) -> Option { unsafe { - let stack = PtrStack::new((*alloc.m).n_objects); - let iter = alloc.refresh(); - LocalCache { + let stack = PtrStack::new((*alloc.m).n_objects)?; + let iter = alloc.refresh()?; + Some(LocalCache { alloc: alloc, vals: stack, iter: iter, - } + }) } } } @@ -84,14 +86,14 @@ impl Frontend for LocalCache { } } - unsafe fn alloc(&mut self) -> *mut u8 { + unsafe fn alloc(&mut self) -> Option<*mut u8> { self.vals .pop() .or_else(|| self.iter.next()) - .unwrap_or_else(|| { - let next_iter = self.alloc.refresh(); + .or_else(|| { + let next_iter = self.alloc.refresh()?; self.iter = next_iter; - self.iter.next().expect("New iterator should have values") + Some(self.iter.next().expect("New iterator should have values")) }) } } @@ -115,16 +117,16 @@ pub struct MagazineCache { impl LazyInitializable for MagazineCache { type Params = (*mut Metadata, usize, CA, RevocablePipe); - fn init(&(meta, decommit, ref page_alloc, ref avail): &Self::Params) -> Self { - let salloc = SlagAllocator::partial_new(meta, decommit, page_alloc.clone(), avail.clone()); + fn init(&(meta, decommit, ref page_alloc, ref avail): &Self::Params) -> Option { + let salloc = SlagAllocator::partial_new(meta, decommit, page_alloc.clone(), avail.clone())?; Self::new(salloc) } } impl LazyInitializable for LocalCache { type Params = (*mut Metadata, usize, CA, RevocablePipe); - fn init(&(meta, decommit, ref page_alloc, ref avail): &Self::Params) -> Self { - let salloc = SlagAllocator::partial_new(meta, decommit, page_alloc.clone(), avail.clone()); + fn init(&(meta, decommit, ref page_alloc, ref avail): &Self::Params) -> Option { + let salloc = SlagAllocator::partial_new(meta, decommit, page_alloc.clone(), avail.clone())?; Self::new(salloc) } } @@ -148,26 +150,26 @@ impl Drop for MagazineCache { impl Clone for MagazineCache { fn clone(&self) -> Self { - MagazineCache::new_sized(self.alloc.clone(), self.stack_size) + MagazineCache::new_sized(self.alloc.clone(), self.stack_size).expect("failed to allocate new MagazineCache") } } impl MagazineCache { - pub fn new_sized(mut alloc: SlagAllocator, magazine_size: usize) -> Self { + pub fn new_sized(mut alloc: SlagAllocator, magazine_size: usize) -> Option { alloc_assert!(magazine_size > 0); - let s = PtrStack::new(magazine_size); - let iter = unsafe { alloc.refresh() }; - let buckets = Coalescer::new(magazine_size * 2); - MagazineCache { + let s = PtrStack::new(magazine_size)?; + let iter = unsafe { alloc.refresh()? }; + let buckets = Coalescer::new(magazine_size * 2)?; + Some(MagazineCache { stack_size: magazine_size, s: s, iter: iter, alloc: alloc, coalescer: buckets, - } + }) } - pub fn new(alloc: SlagAllocator) -> Self { + pub fn new(alloc: SlagAllocator) -> Option { use std::cmp; let object_size = unsafe { (*alloc.m).object_size }; const CUTOFF: usize = 32 << 10; @@ -183,11 +185,11 @@ impl MagazineCache { /// /// This amounts to getting memory from the current alloc iterator. If the iterator is /// exhausted, a new `Slag` is acquired. - unsafe fn slag_alloc(&mut self) -> *mut u8 { + unsafe fn slag_alloc(&mut self) -> Option<*mut u8> { for _ in 0..2 { match self.iter.next() { - Some(ptr) => return ptr, - None => self.iter = self.alloc.refresh(), + Some(ptr) => return Some(ptr), + None => self.iter = self.alloc.refresh()?, } } alloc_panic!( @@ -225,10 +227,10 @@ impl MagazineCache { } impl Frontend for MagazineCache { - unsafe fn alloc(&mut self) -> *mut u8 { + unsafe fn alloc(&mut self) -> Option<*mut u8> { if let Some(ptr) = self.s.pop() { trace_event!(cache_alloc); - ptr + Some(ptr) } else { trace_event!(slag_alloc); self.slag_alloc() @@ -275,11 +277,11 @@ impl Default for RemoteFreeCell { } impl Coalescer { - fn new(size: usize) -> Self { - Coalescer( - OwnedArray::new(size.next_power_of_two()), - PtrStack::new(size), - ) + fn new(size: usize) -> Option { + Some(Coalescer( + OwnedArray::new(size.next_power_of_two())?, + PtrStack::new(size)?, + )) } fn bucket_num(&self, word: usize) -> usize { @@ -345,11 +347,11 @@ struct PtrStack { } impl PtrStack { - fn new(max_objects: usize) -> PtrStack { - PtrStack { - data: OwnedArray::new(max_objects), + fn new(max_objects: usize) -> Option { + Some(PtrStack { + data: OwnedArray::new(max_objects)?, top: 0, - } + }) } unsafe fn push(&mut self, item: *mut u8) { @@ -436,15 +438,16 @@ mod magazine { } impl Magazine { - unsafe fn new(size: usize) -> *mut Magazine { - let page_size = mmap::page_size(); + unsafe fn new(size: usize) -> Option<*mut Magazine> { + let page_size = utils::page_size(); alloc_debug_assert!(page_size.is_power_of_two()); let bytes = mem::size_of::() + mem::size_of::<*mut u8>() * size; let rem = bytes & (page_size - 1); let n_pages = (bytes >> page_size.trailing_zeros()) + cmp::min(1, rem); let region_size = n_pages * page_size; alloc_debug_assert!(bytes <= region_size); - let mem = mmap::map(region_size) as *mut Magazine; + let layout = Layout::from_size_align(region_size, page_size).unwrap(); + let mem = (&*MMAP).alloc(layout).map(|ptr| ptr as *mut Magazine).ok()?; ptr::write( mem, Magazine { @@ -454,12 +457,11 @@ mod magazine { base: mem.offset(1) as *mut u8, }, ); - mem + Some(mem) } - unsafe fn default() -> *mut Magazine { - let res = Magazine::new(3068); - res + unsafe fn default() -> Option<*mut Magazine> { + Magazine::new(3068) } /// Unmap the memory associated with the `Magazine`. @@ -468,8 +470,9 @@ mod magazine { /// nonempty. unsafe fn destroy(slf: *mut Magazine) { alloc_debug_assert_eq!(((*slf).base as *mut Magazine).offset(-1), slf); - alloc_debug_assert_eq!(slf as usize % mmap::page_size(), 0); - mmap::unmap(slf as *mut u8, (*slf).mapped) + alloc_debug_assert_eq!(slf as usize % utils::page_size(), 0); + let layout = Layout::from_size_align((*slf).mapped, utils::page_size()).unwrap(); + (&*MMAP).dealloc(slf as *mut u8, layout); } fn push(&mut self, item: *mut u8) -> bool { @@ -582,15 +585,15 @@ mod magazine { /// Allocate a full `Magazine` from the `Depot`, constructing a new one if none are /// present. - fn alloc_empty(&mut self) -> *mut Magazine { - let res = self.empty.pop_mut().unwrap_or_else(|| { + fn alloc_empty(&mut self) -> Option<*mut Magazine> { + let res = self.empty.pop_mut().or_else(|| { unsafe { Magazine::default() } }); - unsafe { - alloc_debug_assert_eq!((*res).top, 0) - }; + if let Some(res) = res { + unsafe { alloc_debug_assert_eq!((*res).top, 0) }; + } res } @@ -611,7 +614,7 @@ mod magazine { /// If the `Depot` is at capacity, `None` is returned and `m` is not freed. fn swap_full(&mut self, m: *mut Magazine) -> Option<*mut Magazine> { if self.free_full(m) { - Some(self.alloc_empty()) + self.alloc_empty() } else { None } @@ -631,32 +634,32 @@ mod magazine { impl LazyInitializable for DepotCache { type Params = (FE::Params, Depot); - fn init(&(ref backing, ref depot): &(FE::Params, Depot)) -> DepotCache { - Self::new(FE::init(backing.clone()), depot.clone()) + fn init(&(ref backing, ref depot): &(FE::Params, Depot)) -> Option> { + Self::new(FE::init(backing.clone())?, depot.clone()) } } impl DepotCache { - fn new(backing: FE, mut depot: Depot) -> DepotCache { - let m1 = depot.alloc_full().unwrap_or_else(|| depot.alloc_empty()); - let m2 = depot.alloc_empty(); - DepotCache { + fn new(backing: FE, mut depot: Depot) -> Option> { + let m1 = depot.alloc_full().or_else(|| depot.alloc_empty())?; + let m2 = depot.alloc_empty()?; + Some(DepotCache { backing: backing, depot: depot, m1: m1, m2: m2, - } + }) } } impl Frontend for DepotCache { - unsafe fn alloc(&mut self) -> *mut u8 { + unsafe fn alloc(&mut self) -> Option<*mut u8> { if let Some(p) = (*self.m1).pop() { - return p; + return Some(p); } mem::swap(&mut self.m1, &mut self.m2); if let Some(p) = (*self.m1).pop() { - return p; + return Some(p); } if let Some(m) = self.depot.swap_empty(self.m1) { @@ -664,11 +667,11 @@ mod magazine { } else { let cap = (*self.m1).cap; for _ in 0..cap { - let _r = (*self.m1).push(self.backing.alloc()); + let _r = (*self.m1).push(self.backing.alloc()?); alloc_debug_assert!(_r); } } - (*self.m1).pop().expect("new full magazine is empty") + Some((*self.m1).pop().expect("new full magazine is empty")) } unsafe fn free(&mut self, item: *mut u8) { @@ -770,7 +773,7 @@ impl AllocBuilder { } /// Build a `LocalAllocator` from the current configuration. - pub fn build_local(&self) -> LocalAllocator { + pub fn build_local(&self) -> Option> { LocalAllocator::new_standalone( self.cutoff_factor, self.page_size, @@ -781,7 +784,7 @@ impl AllocBuilder { } /// Build a `MagazineAllocator` from the current configuration. - pub fn build_magazine(&self) -> MagazineAllocator { + pub fn build_magazine(&self) -> Option> { MagazineAllocator::new_standalone( self.cutoff_factor, self.page_size, @@ -807,15 +810,15 @@ macro_rules! typed_wrapper { target_overhead: usize, eager_decommit: usize, max_objects: usize) - -> Self { - let pa = PageAlloc::new(page_size, target_overhead, 8, AllocType::SmallSlag); + -> Option { + let pa = PageAlloc::new(page_size, target_overhead, 8, AllocType::SmallSlag)?; let slag = SlagAllocator::new(max_objects, mem::size_of::(), 0, - cutoff_factor, eager_decommit, pa); - $name($wrapped::new(slag), PhantomData) + cutoff_factor, eager_decommit, pa)?; + Some($name($wrapped::new(slag)?, PhantomData)) } - pub unsafe fn alloc(&mut self) -> *mut T { - self.0.alloc() as *mut T + pub unsafe fn alloc(&mut self) -> Option<*mut T> { + self.0.alloc().map(|ptr| ptr as *mut T) } pub unsafe fn free(&mut self, item: *mut T) { diff --git a/elfmalloc/src/general.rs b/elfmalloc/src/general.rs index 56cb6a2..78ee015 100644 --- a/elfmalloc/src/general.rs +++ b/elfmalloc/src/general.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -42,6 +42,8 @@ //! instead want something more specialized, such as the `LocalAllocator` and `MagazineAllocator` //! object-specific allocators. +extern crate alloc; +use alloc::allocator::{Alloc, Layout}; use std::cmp; use std::ptr; use std::mem; @@ -53,7 +55,7 @@ use super::slag::{compute_metadata, CoarseAllocator, DirtyFn, Metadata, PageAllo Slag, PageCleanup}; #[allow(unused_imports)] use super::frontends::{MagazineCache, LocalCache, DepotCache, Depot, Frontend}; -use super::utils::{mmap, Lazy, TypedArray, likely}; +use super::utils::{self, Lazy, TypedArray, likely}; use super::alloc_type::AllocType; type Source = MmapSource; @@ -156,8 +158,8 @@ pub(crate) mod global { // methods. unsafe impl Sync for GlobalAllocProvider {} impl GlobalAllocProvider { - fn new() -> GlobalAllocProvider { - GlobalAllocProvider { inner: Some(ElfMalloc::new()) } + fn new() -> Option { + Some(GlobalAllocProvider { inner: Some(ElfMalloc::new()?) }) } } @@ -210,14 +212,14 @@ pub(crate) mod global { } pub unsafe fn get_layout(item: *mut u8) -> (usize /* size */, usize /* alignment */) { - let m_block = match get_type(item) { + let page_size = match get_type(item) { // TODO(ezrosent): this duplicates some work.. AllocType::SmallSlag | AllocType::Large => { with_local_or_clone(|h| { (*h.get()) .alloc .small_pages - .backing_memory() + .page_size() }) } AllocType::BigSlag => { @@ -225,11 +227,11 @@ pub(crate) mod global { (*h.get()) .alloc .large_pages - .backing_memory() + .page_size() }) } }; - super::elfmalloc_get_layout(m_block, item) + super::elfmalloc_get_layout(page_size, item) } fn new_handle() -> GlobalAllocator { @@ -246,7 +248,7 @@ pub(crate) mod global { } lazy_static! { - static ref ELF_HEAP: GlobalAllocProvider = GlobalAllocProvider::new(); + static ref ELF_HEAP: GlobalAllocProvider = GlobalAllocProvider::new().expect("could not create heap"); static ref DESTRUCTOR_CHAN: Mutex> = { // Background thread code: block on a channel waiting for memory reclamation messages // (Husks). @@ -283,16 +285,16 @@ pub(crate) mod global { } } - pub unsafe fn alloc(size: usize) -> *mut u8 { + pub unsafe fn alloc(size: usize) -> Option<*mut u8> { alloc_tls_fast_with!(LOCAL_ELF_HEAP, h, { (*h.get()).alloc.alloc(size) }) .unwrap_or_else(|| super::large_alloc::alloc(size)) } - pub unsafe fn realloc(item: *mut u8, new_size: usize) -> *mut u8 { + pub unsafe fn realloc(item: *mut u8, new_size: usize) -> Option<*mut u8> { aligned_realloc(item, new_size, mem::size_of::()) } - pub unsafe fn aligned_realloc(item: *mut u8, new_size: usize, new_alignment: usize) -> *mut u8 { + pub unsafe fn aligned_realloc(item: *mut u8, new_size: usize, new_alignment: usize) -> Option<*mut u8> { with_local_or_clone(|h| (*h.get()).alloc.realloc(item, new_size, new_alignment)) } @@ -319,8 +321,8 @@ where type Key; /// Create and initialize the map. - fn init T>(start: Self::Key, n_classes: usize, f: F) -> Self { - Self::init_conserve(start, n_classes, f).1 + fn init T>(start: Self::Key, n_classes: usize, f: F) -> Option { + Some(Self::init_conserve(start, n_classes, f)?.1) } /// Create and initialize the map, handing back ownership of the constructor. @@ -328,7 +330,7 @@ where start: Self::Key, n_classes: usize, f: F, - ) -> (F, Self); + ) -> Option<(F, Self)>; /// Get an unchecked raw pointer to the class corresponding to `k`. unsafe fn get_raw(&self, k: Self::Key) -> *mut T; @@ -384,19 +386,19 @@ struct TieredSizeClasses { impl AllocMap for TieredSizeClasses { type Key = usize; - fn init_conserve T>(start: usize, n_classes: usize, f: F) -> (F, Self) { + fn init_conserve T>(start: usize, n_classes: usize, f: F) -> Option<(F, Self)> { let n_small_classes = cmp::min((ELFMALLOC_SMALL_CUTOFF / MULTIPLE) - (start / MULTIPLE), n_classes / 2); let n_medium_classes = n_classes - n_small_classes; - let (f2, small_classes) = Multiples::init_conserve(start, n_small_classes, f); + let (f2, small_classes) = Multiples::init_conserve(start, n_small_classes, f)?; // mutability is unnecessary when we don't execute the 'let word_objs = f3(8)' line #[allow(unused_mut)] let (mut f3, medium_classes) = - PowersOfTwo::init_conserve(small_classes.max_key() + 1, n_medium_classes, f2); + PowersOfTwo::init_conserve(small_classes.max_key() + 1, n_medium_classes, f2)?; #[cfg(any(not(feature = "c-api"), not(any(target_os = "macos", all(windows, target_pointer_width = "64")))))] let word_objs = f3(8); - ( + Some(( f3, TieredSizeClasses { // When compiling for the C API, the minimum alignment is 16 on Mac and 64-bit Windows. @@ -407,7 +409,7 @@ impl AllocMap for TieredSizeClasses { small_objs: small_classes, medium_objs: medium_classes, }, - ) + )) } unsafe fn get_raw(&self, n: usize) -> *mut T { @@ -469,7 +471,7 @@ impl Clone for Multiples { fn clone(&self) -> Self { Multiples::init(self.starting_size, self.classes.len(), |size| unsafe { self.get(size).clone() - }) + }).expect("could not initialize Multiples") } } @@ -481,13 +483,13 @@ fn round_up(n: usize) -> usize { impl AllocMap for Multiples { type Key = usize; - fn init_conserve T>(start: usize, n_classes: usize, mut f: F) -> (F, Self) { + fn init_conserve T>(start: usize, n_classes: usize, mut f: F) -> Option<(F, Self)> { alloc_debug_assert!(n_classes >= 1); let starting_size = round_up(start); let res = Multiples { starting_size: starting_size, max_size: n_classes * MULTIPLE + starting_size - MULTIPLE, - classes: TypedArray::new(n_classes), + classes: TypedArray::new(n_classes)?, }; let mut cur_size = res.starting_size; for p in res.classes.iter() { @@ -497,7 +499,7 @@ impl AllocMap for Multiples { cur_size += MULTIPLE; } alloc_debug_assert_eq!(res.max_size, cur_size - MULTIPLE); - (f, res) + Some((f, res)) } #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] @@ -536,7 +538,7 @@ impl Clone for PowersOfTwo { fn clone(&self) -> Self { PowersOfTwo::init(self.starting_size, self.classes.len(), |size| unsafe { self.get(size).clone() - }) + }).expect("failed to initialize PowersOfTwo") } } @@ -555,12 +557,12 @@ impl Drop for DynamicAllocator { } impl PowersOfTwo { - fn new(start_from: usize, n_classes: usize) -> PowersOfTwo { - PowersOfTwo { + fn new(start_from: usize, n_classes: usize) -> Option> { + Some(PowersOfTwo { starting_size: start_from.next_power_of_two(), max_size: 0, // currently uninitialized - classes: TypedArray::new(n_classes), - } + classes: TypedArray::new(n_classes)?, + }) } } @@ -570,8 +572,8 @@ impl AllocMap for PowersOfTwo { start: usize, n_classes: usize, mut f: F, - ) -> (F, Self) { - let mut res = Self::new(start, n_classes); + ) -> Option<(F, Self)> { + let mut res = Self::new(start, n_classes)?; let mut cur_size = res.starting_size; unsafe { for item in res.classes.iter() { @@ -580,7 +582,7 @@ impl AllocMap for PowersOfTwo { } } res.max_size = cur_size / 2; - (f, res) + Some((f, res)) } #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] @@ -617,17 +619,17 @@ pub struct DynamicAllocator(ElfMalloc, TieredSizeClasses Self { - DynamicAllocator(ElfMalloc::new()) + pub fn new() -> Option { + Some(DynamicAllocator(ElfMalloc::new()?)) } - pub unsafe fn alloc(&mut self, size: usize) -> *mut u8 { + pub unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> { self.0.alloc(size) } pub unsafe fn free(&mut self, item: *mut u8) { self.0.free(item) } - pub unsafe fn realloc(&mut self, item: *mut u8, new_size: usize) -> *mut u8 { + pub unsafe fn realloc(&mut self, item: *mut u8, new_size: usize) -> Option<*mut u8> { self.0.realloc(item, new_size, mem::size_of::()) } @@ -636,7 +638,7 @@ impl DynamicAllocator { item: *mut u8, new_size: usize, new_alignment: usize, - ) -> *mut u8 { + ) -> Option<*mut u8> { self.0.realloc(item, new_size, new_alignment) } } @@ -678,7 +680,7 @@ struct ElfMalloc>> { impl Default for DynamicAllocator { fn default() -> Self { - Self::new() + Self::new().expect("could not create new DynamicAllocator") } } @@ -689,8 +691,8 @@ const ELFMALLOC_SMALL_CUTOFF: usize = ELFMALLOC_SMALL_PAGE_SIZE / 4; impl ElfMalloc, TieredSizeClasses>>> { - fn new() -> Self { - let pa_large = PageAlloc::new(ELFMALLOC_PAGE_SIZE, 1 << 20, 8, AllocType::BigSlag); + fn new() -> Option { + let pa_large = PageAlloc::new(ELFMALLOC_PAGE_SIZE, 1 << 20, 8, AllocType::BigSlag)?; // The small pages are allocated in groups where the first page is aligned to // ELFMALLOC_PAGE_SIZE; this page will be stamped with AllocType::SmallSlag, allowing type // lookups to work as expected. @@ -700,7 +702,7 @@ impl 8, ELFMALLOC_PAGE_SIZE, AllocType::SmallSlag, - ); + )?; Self::new_internal(0.6, pa_small, pa_large, 8, 25) } } @@ -727,7 +729,7 @@ impl>, Key fn clone(&self) -> Self { let new_map = AM::init(self.start_from, self.n_classes, |size: usize| unsafe { self.allocs.get(size).clone() - }); + }).expect("could not initialize AM"); ElfMalloc { small_pages: self.small_pages.clone(), large_pages: self.large_pages.clone(), @@ -739,10 +741,10 @@ impl>, Key } } -unsafe fn elfmalloc_get_layout(m_block: &M, item: *mut u8) -> (usize, usize) { +unsafe fn elfmalloc_get_layout(page_size: usize, item: *mut u8) -> (usize, usize) { match get_type(item) { AllocType::SmallSlag | AllocType::BigSlag => { - let meta = (*Slag::find(item, m_block.page_size())).get_metadata(); + let meta = (*Slag::find(item, page_size)).get_metadata(); ( meta.object_size, if meta.object_size.is_power_of_two() { @@ -752,7 +754,7 @@ unsafe fn elfmalloc_get_layout(m_block: &M, item: *mut u8) -> ( }, ) } - AllocType::Large => (large_alloc::get_size(item), mmap::page_size()), + AllocType::Large => (large_alloc::get_size(item), utils::page_size()), } } @@ -765,16 +767,17 @@ impl>, Key pa_large: PageAlloc, start_from: usize, n_classes: usize, - ) -> Self { - use self::mmap::map; - let mut meta_pointer = map(mem::size_of::() * n_classes) as *mut Metadata; - let small_page_size = pa_small.backing_memory().page_size(); + ) -> Option { + use utils::MMAP; + let layout = Layout::new::().repeat(n_classes).unwrap().0; + let mut meta_pointer = unsafe { (&*MMAP).alloc(layout).map(|ptr| ptr as *mut Metadata).ok()? }; + let small_page_size = pa_small.page_size(); let am = AM::init(start_from, n_classes, |size: usize| { let (u_size, pa, ty) = if size < ELFMALLOC_SMALL_CUTOFF { (small_page_size, pa_small.clone(), AllocType::SmallSlag) } else { ( - pa_large.backing_memory().page_size(), + pa_large.page_size(), pa_large.clone(), AllocType::BigSlag, ) @@ -786,7 +789,7 @@ impl>, Key m_ptr, compute_metadata( size, - pa.backing_memory().page_size(), + pa.page_size(), 0, cutoff_factor, u_size, @@ -794,7 +797,7 @@ impl>, Key ), ); } - let clean = PageCleanup::new(pa.backing_memory().page_size()); + let clean = PageCleanup::new(pa.page_size(), pa.page_size()); // TODO(ezrosent); new_size(8) is a good default, but a better one would take // num_cpus::get() into account when picking this size, as in principle this will run // into scaling limits at some point. @@ -812,16 +815,15 @@ impl>, Key { ObjectAlloc::new((params, Depot::default())) } - }); - let max_size = am.max_key(); - ElfMalloc { + })?; + Some(ElfMalloc { small_pages: pa_small.clone(), large_pages: pa_large.clone(), + max_size: am.max_key(), allocs: am, - max_size: max_size, start_from: start_from, n_classes: n_classes, - } + }) } #[inline] @@ -839,18 +841,18 @@ impl>, Key } match get_type(item) { AllocType::SmallSlag => { - alloc_debug_assert_eq!(self.small_pages.backing_memory().page_size(), ELFMALLOC_SMALL_PAGE_SIZE); + alloc_debug_assert_eq!(self.small_pages.page_size(), ELFMALLOC_SMALL_PAGE_SIZE); Some(ELFMALLOC_SMALL_PAGE_SIZE) }, AllocType::BigSlag => { - alloc_debug_assert_eq!(self.large_pages.backing_memory().page_size(), ELFMALLOC_PAGE_SIZE); + alloc_debug_assert_eq!(self.large_pages.page_size(), ELFMALLOC_PAGE_SIZE); Some(ELFMALLOC_PAGE_SIZE) }, AllocType::Large => None, } } - unsafe fn alloc(&mut self, bytes: usize) -> *mut u8 { + unsafe fn alloc(&mut self, bytes: usize) -> Option<*mut u8> { if likely(bytes <= self.max_size) { self.allocs.get_mut(bytes).alloc() } else { @@ -863,7 +865,7 @@ impl>, Key item: *mut u8, mut new_size: usize, new_alignment: usize, - ) -> *mut u8 { + ) -> Option<*mut u8> { if item.is_null() { let alloc_size = if new_alignment <= mem::size_of::() { new_size @@ -874,16 +876,16 @@ impl>, Key } if new_size == 0 { self.free(item); - return ptr::null_mut(); + return None; } let (old_size, old_alignment) = global::get_layout(item); if old_alignment >= new_alignment && old_size >= new_size { - return item; + return Some(item); } if new_alignment > mem::size_of::() { new_size = new_size.next_power_of_two(); } - let new_mem = self.alloc(new_size); + let new_mem = self.alloc(new_size)?; ptr::copy_nonoverlapping(item, new_mem, ::std::cmp::min(old_size, new_size)); self.free(item); #[cfg(debug_assertions)] @@ -891,7 +893,7 @@ impl>, Key let (size, _) = global::get_layout(new_mem); alloc_debug_assert!(new_size <= size, "Realloc for {} got memory with size {}", new_size, size); } - new_mem + Some(new_mem) } unsafe fn free(&mut self, item: *mut u8) { @@ -922,6 +924,8 @@ mod large_alloc { use super::super::sources::{MemorySource, MmapSource}; use super::{ELFMALLOC_PAGE_SIZE, ELFMALLOC_SMALL_CUTOFF, round_to_page}; use super::super::alloc_type::AllocType; + use utils::{self, MMAP}; + use super::{Alloc, Layout}; // For debugging, we keep around a thread-local map of pointers to lengths. This helps us // scrutinize if various header data is getting propagated correctly. @@ -929,9 +933,6 @@ mod large_alloc { thread_local! { pub static SEEN_PTRS: RefCell> = RefCell::new(HashMap::new()); } - use super::mmap::unmap; - #[cfg(debug_assertions)] - use super::mmap::page_size; #[repr(C)] #[derive(Copy, Clone)] @@ -941,12 +942,12 @@ mod large_alloc { region_size: usize, } - pub unsafe fn alloc(size: usize) -> *mut u8 { + pub unsafe fn alloc(size: usize) -> Option<*mut u8> { // TODO(ezrosent) round up to page size let region_size = size + ELFMALLOC_PAGE_SIZE; // We need a pointer aligned to the SMALL_CUTOFF, so we use an `MmapSource` to map the // memory. See the comment in get_page_size. - let src = MmapSource::new(ELFMALLOC_SMALL_CUTOFF); + let src = MmapSource::new(ELFMALLOC_SMALL_CUTOFF)?; let n_pages = region_size / ELFMALLOC_SMALL_CUTOFF + cmp::min(1, region_size % ELFMALLOC_SMALL_CUTOFF); let mem = src.carve(n_pages).expect("[lage_alloc::alloc] mmap failed"); let res = mem.offset(ELFMALLOC_PAGE_SIZE as isize); @@ -969,7 +970,7 @@ mod large_alloc { alloc_debug_assert_eq!(get_commitment(res), (size + ELFMALLOC_PAGE_SIZE, mem)); #[cfg(test)] SEEN_PTRS.with(|hs| hs.borrow_mut().insert(mem, region_size)); // end extra debugging information - res + Some(res) } pub unsafe fn free(item: *mut u8) { @@ -985,11 +986,11 @@ mod large_alloc { { ptr::write_volatile(item, 10); alloc_debug_assert_eq!( - base_ptr as usize % page_size(), + base_ptr as usize % utils::page_size(), 0, "base_ptr ({:?}) not a multiple of the page size ({})", base_ptr, - page_size() + utils::page_size() ); } #[cfg(test)] @@ -1005,7 +1006,8 @@ mod large_alloc { }); } // end extra debugging information - unmap(base_ptr, size); + let layout = Layout::from_size_align(size, utils::page_size()).unwrap(); + (&*MMAP).dealloc(base_ptr, layout); } pub unsafe fn get_size(item: *mut u8) -> usize { @@ -1056,7 +1058,7 @@ mod tests { alloc_assert!(align >= 512); }); test_and_free(4 << 20, |size, align| { - alloc_assert_eq!((size, align), (4 << 20, mmap::page_size())) + alloc_assert_eq!((size, align), (4 << 20, utils::page_size())) }); } diff --git a/elfmalloc/src/rust_alloc.rs b/elfmalloc/src/rust_alloc.rs index 3e3bec9..470ca55 100644 --- a/elfmalloc/src/rust_alloc.rs +++ b/elfmalloc/src/rust_alloc.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -78,7 +78,7 @@ use super::general::{Multiples, PowersOfTwo, ObjectAlloc, MULTIPLE, AllocMap}; use super::slag::{PageAlloc, Metadata, RevocablePipe, compute_metadata, SlagPipe, PageCleanup}; #[allow(unused_imports)] use super::frontends::{Depot, Frontend}; -use super::utils::{mmap, Lazy, LazyInitializable}; +use super::utils::{self, MMAP, Lazy, LazyInitializable}; use super::sources::MemorySource; use super::bagpipe::bag::WeakBag; use super::sources::MmapSource; @@ -110,28 +110,28 @@ impl PageSource { target_size: usize, pipe_size: usize, page_size: usize, - ) -> PageSource { - let m = M::new(page_size); - PageSource { + ) -> Option> { + let m = M::new(page_size)?; + Some(PageSource { cutoff_bytes: cutoff_bytes, target_size: target_size, - pages: SlagPipe::new_size_cleanup(pipe_size, PageCleanup::new(m.page_size())), + pages: SlagPipe::new_size_cleanup(pipe_size, PageCleanup::new(page_size, page_size)), source: m, - } + }) } unsafe fn free(&mut self, p: *mut u8, old_size: usize) { + let page_size = self.source.page_size(); if self.pages.size_guess() >= self.target_size as isize { - mmap::unmap(p, self.source.page_size()); - return; - } - if old_size >= self.cutoff_bytes { - mmap::uncommit( - p.offset(self.cutoff_bytes as isize), - self.source.page_size() - self.cutoff_bytes, - ); + let layout = Layout::from_size_align(page_size, page_size).unwrap(); + (&*MMAP).dealloc(p, layout); + } else { + if old_size >= self.cutoff_bytes { + let layout = Layout::from_size_align(self.source.page_size() - self.cutoff_bytes, page_size).unwrap(); + (&*MMAP).uncommit(p.offset(self.cutoff_bytes as isize), layout); + } + self.pages.push_mut(p); } - self.pages.push_mut(p); } unsafe fn alloc(&mut self) -> Option<*mut u8> { @@ -167,15 +167,15 @@ impl PageFrontend { max_overhead: usize, pipe_size: usize, parent: PageSource, - ) -> PageFrontend { + ) -> Option> { alloc_debug_assert!(size <= parent.source.page_size()); alloc_debug_assert!(size.is_power_of_two()); - PageFrontend { + Some(PageFrontend { parent: parent, - pages: SlagPipe::new_size_cleanup(pipe_size, PageCleanup::new(size)), + pages: SlagPipe::new_size_cleanup(pipe_size, PageCleanup::new(size, size)), local_size: size, max_overhead: max_overhead, - } + }) } unsafe fn alloc(&mut self) -> Option<*mut u8> { @@ -195,7 +195,7 @@ impl PageFrontend { impl LazyInitializable for PageFrontend { type Params = (usize, usize, usize, PageSource); - fn init(&(size, max_overhead, pipe_size, ref parent): &Self::Params) -> Self { + fn init(&(size, max_overhead, pipe_size, ref parent): &Self::Params) -> Option { PageFrontend::new(size, max_overhead, pipe_size, parent.clone()) } } @@ -272,23 +272,18 @@ unsafe impl Alloc for ElfMalloc { case_analyze!( self, l, - small Ok( - self.small + small self.small .get_mut(if l.align() > mem::size_of::() { l.size().next_power_of_two() } else { l.size() }) - .alloc(), - ); + .alloc().ok_or(AllocErr::Exhausted { request: l }); medium match self.large.get_mut(l.size()).alloc() { Some(p) => Ok(p), None => Err(AllocErr::Exhausted { request: l }), }; - large match mmap::fallible_map(l.size()) { - Some(p) => Ok(p), - None => Err(AllocErr::Exhausted { request: l }), - }; + large (&*MMAP).alloc(l); ) } @@ -305,7 +300,8 @@ unsafe impl Alloc for ElfMalloc { (l.size() + (MULTIPLE - 1)) & !(MULTIPLE - 1) }).free(item); medium self.large.get_mut(l.size()).free(item); - large mmap::unmap(item, l.size());) + large (&*MMAP).dealloc(item, l); + ); } #[inline(always)] @@ -373,7 +369,7 @@ impl Default for ElfMallocBuilder { impl ElfMallocBuilder { pub fn page_size(&mut self, page_size: usize) -> &mut ElfMallocBuilder { - self.page_size = cmp::max(mmap::page_size(), page_size); + self.page_size = cmp::max(utils::page_size(), page_size); self } pub fn target_pa_size(&mut self, target_pa_size: usize) -> &mut ElfMallocBuilder { @@ -397,12 +393,12 @@ impl ElfMallocBuilder { self } - pub fn build(&self) -> ElfMalloc { - let pa = PageAlloc::::new(self.page_size, self.target_pa_size, self.large_pipe_size, AllocType::SmallSlag); + pub fn build(&self) -> Option> { + let pa = PageAlloc::::new(self.page_size, self.target_pa_size, self.large_pipe_size, AllocType::SmallSlag)?; let n_small_classes = (self.page_size / 4) / MULTIPLE; alloc_assert!(n_small_classes > 0); - let mut meta_pointers = mmap::map(mem::size_of::() * n_small_classes) as - *mut Metadata; + let layout = Layout::new::().repeat(n_small_classes).unwrap().0; + let mut meta_pointers = unsafe { (&*MMAP).alloc(layout).ok()? as *mut Metadata }; let small_classes = Multiples::init(MULTIPLE, n_small_classes, |size: usize| { let meta = meta_pointers; unsafe { @@ -423,7 +419,7 @@ impl ElfMallocBuilder { meta, usize::max_value(), /* no eager decommit */ pa.clone(), - RevocablePipe::new_size_cleanup(self.small_pipe_size, PageCleanup::new(self.page_size)), + RevocablePipe::new_size_cleanup(self.small_pipe_size, PageCleanup::new(self.page_size, self.page_size)), ); #[cfg(not(feature = "magazine_layer"))] { @@ -433,7 +429,7 @@ impl ElfMallocBuilder { { ObjectAlloc::new((params, Depot::default())) } - }); + })?; let next_size_class = (small_classes.max_key() + 1).next_power_of_two(); let max_size = self.max_object_size.next_power_of_two(); let n_classes = max_size.trailing_zeros() - next_size_class.trailing_zeros(); @@ -442,22 +438,22 @@ impl ElfMallocBuilder { self.large_obj_target_size, self.large_pipe_size, max_size, - ); + )?; let large_classes = PowersOfTwo::init(next_size_class, n_classes as usize, |size: usize| { let target_size: usize = cmp::max(1, self.target_pipe_overhead / size); Lazy::>::new( (size, target_size, self.small_pipe_size, p_source.clone()), ) - }); + })?; alloc_debug_assert!(small_classes.max_key().is_power_of_two()); - ElfMalloc { + Some(ElfMalloc { small: small_classes, large: large_classes, - } + }) } - pub fn build_owned(&self) -> OwnedElfMalloc { - OwnedElfMalloc(self.build()) + pub fn build_owned(&self) -> Option> { + Some(OwnedElfMalloc(self.build()?)) } } @@ -513,7 +509,7 @@ mod global { lazy_static! { static ref GLOBAL_HANDLE: ElfCloner = ElfCloner(ElfMallocBuilder::default() .page_size(16 << 10) - .build()); + .build().expect("failed to build global handle")); /// We still have a crossbeam dependency, which means that we may have to reclaim a /// thread's cached memory after it is destroyed. See the comments in `general::global` for diff --git a/elfmalloc/src/slag.rs b/elfmalloc/src/slag.rs index 012192b..da5b45c 100644 --- a/elfmalloc/src/slag.rs +++ b/elfmalloc/src/slag.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -59,12 +59,14 @@ //! See the `frontends` module for how the slag subsystem is used to construct allocators. //! //! [1]: https://arxiv.org/abs/1503.09006 +extern crate alloc; +use alloc::allocator::{Alloc, Layout}; use std::mem; use std::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; use super::bagpipe::bag::{Revocable, WeakBag}; use super::bagpipe::{BagPipe, BagCleanup}; use super::bagpipe::queue::{FAAQueueLowLevel, RevocableFAAQueue}; -use super::utils::{mmap, LazyInitializable, unlikely}; +use super::utils::{self, MMAP, LazyInitializable, unlikely}; use super::alloc_type::AllocType; use super::sources::MemorySource; use std::marker::PhantomData; @@ -75,20 +77,23 @@ pub type SlagPipe = BagPipe, PageCleanup>; pub type RevocablePipe = BagPipe, PageCleanup>; #[derive(Copy, Clone)] -pub struct PageCleanup(usize, PhantomData); +pub struct PageCleanup { + size: usize, + align: usize, + _marker: PhantomData, +} impl PageCleanup { - pub fn new(page_size: usize) -> PageCleanup { - PageCleanup(page_size, PhantomData) + pub fn new(page_size: usize, align: usize) -> PageCleanup { + PageCleanup { size: page_size, align, _marker: PhantomData } } } impl BagCleanup for PageCleanup { type Item = *mut T; fn cleanup(&self, it: *mut T) { - unsafe { - mmap::unmap(it as *mut u8, self.0); - } + let layout = Layout::from_size_align(self.size, self.align).unwrap(); + unsafe { (&*MMAP).dealloc(it as *mut u8, layout) }; } } @@ -99,26 +104,17 @@ pub trait CoarseAllocator where Self: Clone, { - /// The concrete type representing backing memory for the allocator. - type Block: MemorySource; - - /// The start of a new block of memory of size `backing_memory().page_size()`. - /// - /// Furthermore, all memory returned by `alloc` must satisfy - /// `c.backing_memory().contains(c.alloc())`*. - /// - /// *That is, if that code actually compiled and didn't have a lifetime issue. - unsafe fn alloc(&mut self) -> *mut u8; + /// Allocate a new block of memory of size `page_size()`. + unsafe fn alloc(&mut self) -> Option<*mut u8>; /// Free a page of memory back to the allocator. /// - /// If `item` is not contained in `self.backing_memory()`, the behavior of `free` is undefined. /// The `uncommit` flag is a hint to the allocator to uncommit the memory. It need not be /// observed. unsafe fn free(&mut self, item: *mut u8, uncommit: bool); /// Get access to the backing memory for the allocator. - fn backing_memory(&self) -> &Self::Block; + fn page_size(&self) -> usize; } @@ -905,22 +901,22 @@ where // bagpipes of byte slices of size creek.page_size clean: SlagPipe, dirty: SlagPipe, - aligned_source: C, pages_per: usize, + page_size: usize, ty: AllocType, _marker: PhantomData, } impl LazyInitializable for PageAlloc { type Params = (usize, usize, usize, usize, AllocType); - fn init(&(page_size, target_overhead, pipe_size, aligned_source, ty): &Self::Params) -> Self { + fn init(&(page_size, target_overhead, pipe_size, aligned_source, ty): &Self::Params) -> Option { Self::new_aligned(page_size, target_overhead, pipe_size, aligned_source, ty) } } impl PageAlloc { /// Create a new `PageAlloc`. - pub fn new(page_size: usize, target_overhead: usize, pipe_size: usize, ty: AllocType) -> Self { + pub fn new(page_size: usize, target_overhead: usize, pipe_size: usize, ty: AllocType) -> Option { Self::new_aligned(page_size, target_overhead, pipe_size, page_size, ty) } @@ -930,42 +926,39 @@ impl PageAlloc { pipe_size: usize, align: usize, ty: AllocType, - ) -> Self { + ) -> Option { alloc_debug_assert!(align >= page_size); alloc_debug_assert!(page_size.is_power_of_two()); alloc_debug_assert!(align.is_power_of_two()); let pages_per = align / page_size; - let clean = PageCleanup::new(page_size); - let creek = C::new(page_size); - let creek_2 = if pages_per > 1 { - C::new(align) + let clean = PageCleanup::new(page_size, align); + let creek = C::new(if pages_per == 1 { + page_size } else { - creek.clone() - }; - PageAlloc { - target_overhead: target_overhead, - creek: creek, - pages_per: pages_per, - aligned_source: creek_2, + align + })?; + Some(PageAlloc { + target_overhead, + creek, + pages_per, + page_size, clean: SlagPipe::new_size_cleanup(2, clean), dirty: SlagPipe::new_size_cleanup(pipe_size, clean), - ty: ty, + ty, _marker: PhantomData, - } + }) } /// Get more clean pages from the backing memory. /// /// One of these pages is returned to the caller for allocation. The rest are added to the /// clean `BagPipe`. - fn refresh_pages(&mut self) -> *mut u8 { + fn refresh_pages(&mut self) -> Option<*mut u8> { // If we are using a higher alignment, just allocate a single higher-aligned page. If not, // allocate two pages. let npages = cmp::max(self.pages_per, 2); - let creek = &self.aligned_source; - let pages = creek - .carve(if self.pages_per == 1 { 2 } else { 1 }) - .expect("out of memory!"); + let pages = self.creek + .carve(if self.pages_per == 1 { 2 } else { 1 })?; let page_size = self.creek.page_size(); // Write the required AllocType to the aligned boundary. In some settings this is // unnecessary, but refresh_pages is not called in the hot path and the cost of writing @@ -976,48 +969,47 @@ impl PageAlloc { pages.offset(page_size as isize * (i as isize)) }); self.clean.bulk_add(iter); - pages + Some(pages) } } impl CoarseAllocator for PageAlloc { - type Block = C; - - fn backing_memory(&self) -> &C { - &self.creek + fn page_size(&self) -> usize { + self.page_size } - unsafe fn alloc(&mut self) -> *mut u8 { + unsafe fn alloc(&mut self) -> Option<*mut u8> { if let Ok(ptr) = self.dirty.try_pop_mut() { trace_event!(grabbed_dirty); - return ptr; - } - if let Ok(ptr) = self.clean.try_pop_mut() { + Some(ptr) + } else if let Ok(ptr) = self.clean.try_pop_mut() { trace_event!(grabbed_clean); D::dirty(ptr); - return ptr; + Some(ptr) + } else { + self.refresh_pages() } - self.refresh_pages() } unsafe fn free(&mut self, ptr: *mut u8, decommit: bool) { - use self::mmap::uncommit; use std::cmp; - let minor_page_size = mmap::page_size() as isize; + let minor_page_size = utils::page_size() as isize; if self.dirty.size_guess() >= self.target_overhead as isize { - uncommit(ptr, self.backing_memory().page_size()); + let layout = Layout::from_size_align(self.page_size, self.page_size).unwrap(); + (&*MMAP).uncommit(ptr, layout); self.clean.push_mut(ptr); return; } if decommit { let uncommit_len = cmp::max( 0, - self.backing_memory().page_size() as isize - minor_page_size, + self.page_size as isize - minor_page_size, ) as usize; if uncommit_len == 0 { self.dirty.push_mut(ptr); } else { - uncommit(ptr.offset(minor_page_size), uncommit_len); + let layout = Layout::from_size_align(uncommit_len, minor_page_size as usize).unwrap(); + (&*MMAP).uncommit(ptr.offset(minor_page_size), layout); self.dirty.push_mut(ptr); } } else { @@ -1073,18 +1065,18 @@ impl SlagAllocator { decommit: usize, mut pa: CA, avail: RevocablePipe, - ) -> Self { - let first_slag = unsafe { pa.alloc() } as *mut Slag; + ) -> Option { + let first_slag = unsafe { pa.alloc()? } as *mut Slag; unsafe { Slag::init(first_slag, meta.as_ref().expect("metadata null")); }; - SlagAllocator { + Some(SlagAllocator { m: meta, slag: first_slag, pages: pa, available: avail, eager_decommit_threshold: decommit, - } + }) } pub fn new( max_objects: usize, @@ -1093,33 +1085,33 @@ impl SlagAllocator { cutoff_factor: f64, eager_decommit: usize, mut pa: CA, - ) -> Self { + ) -> Option { // This is a bit wasteful as one metadata object consumes will wind up consuming a page. In // the dynamic allocator these are packed more tightly. let meta = Box::into_raw(Box::new(compute_metadata( object_size, - pa.backing_memory().page_size(), + pa.page_size(), index, cutoff_factor, max_objects, AllocType::SmallSlag, ))); - let first_slag = unsafe { pa.alloc() } as *mut Slag; + let first_slag = unsafe { pa.alloc()? } as *mut Slag; unsafe { Slag::init(first_slag, meta.as_ref().expect("metadata null")); }; - let cleanup = PageCleanup::new(pa.backing_memory().page_size()); - SlagAllocator { + let cleanup = PageCleanup::new(pa.page_size(), pa.page_size()); + Some(SlagAllocator { m: meta, slag: first_slag, pages: pa, available: RevocablePipe::new_size_cleanup(8, cleanup), eager_decommit_threshold: eager_decommit, - } + }) } /// Re-initialize a non-empty `AllocIter`; potentially getting a new `Slag` to do so. - pub unsafe fn refresh(&mut self) -> AllocIter { + pub unsafe fn refresh(&mut self) -> Option { let s_ref = &*self.slag; let meta = &*self.m; let (_claimed, was) = s_ref.rc.unclaim(); @@ -1156,7 +1148,7 @@ impl SlagAllocator { _claimed, "claiming slag either during initialization or due to being over cutoff" ); - s_ref.refresh(meta) + Some(s_ref.refresh(meta)) } else { // we need a new slag! // first we try and get a slag from the available slagpipe. If it is empty, then we get @@ -1168,7 +1160,7 @@ impl SlagAllocator { slab } Err(_) => { - let new_raw = self.pages.alloc() as *mut Slag; + let new_raw = self.pages.alloc()? as *mut Slag; if (*new_raw).meta.load(Ordering::Relaxed) != self.m { Slag::init(new_raw, meta); } @@ -1179,7 +1171,7 @@ impl SlagAllocator { let s_ref = self.slag.as_mut().expect("s_ref_2"); // let s_ref = &*self.slag; let claimed = s_ref.rc.claim(); alloc_debug_assert!(claimed, "claiming new slag after refresh"); - s_ref.refresh(meta) + Some(s_ref.refresh(meta)) } } @@ -1262,7 +1254,7 @@ impl SlagAllocator { impl Clone for SlagAllocator { fn clone(&self) -> Self { let mut new_page_handle = self.pages.clone(); - let first_slag = unsafe { new_page_handle.alloc() as *mut Slag }; + let first_slag = unsafe { new_page_handle.alloc().expect("failed to allocate slag") as *mut Slag }; unsafe { Slag::init( first_slag, diff --git a/elfmalloc/src/sources.rs b/elfmalloc/src/sources.rs index 5681cd0..8c23e3d 100644 --- a/elfmalloc/src/sources.rs +++ b/elfmalloc/src/sources.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -6,17 +6,21 @@ // copied, modified, or distributed except according to those terms. //! Low-level data-structures for getting more memory from the system. +extern crate alloc; +extern crate mmap_alloc; +use alloc::allocator::{Alloc, Layout}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, AtomicPtr, Ordering}; use std::mem; -use super::utils::{likely, mmap}; +use super::utils::{likely, MMAP}; /// A generator of chunks of memory providing an `sbrk`-like interface. pub trait MemorySource where Self: Clone, { - fn new(page_size: usize) -> Self; + /// Create a new `MemorySource` or return `None` if allocation failed. + fn new(page_size: usize) -> Option; /// The smallest unit of memory that can be `carve`d. fn page_size(&self) -> usize; /// Return `npages` fresh pages from the `Creek`. Each of these pages is aligned to @@ -44,8 +48,9 @@ pub struct MmapSource { unsafe impl Send for MmapSource {} impl MemorySource for MmapSource { - fn new(page_size: usize) -> MmapSource { - MmapSource { page_size: page_size.next_power_of_two() } + fn new(page_size: usize) -> Option { + alloc_assert!(page_size.is_power_of_two()); + Some(MmapSource { page_size }) } fn page_size(&self) -> usize { self.page_size @@ -53,57 +58,27 @@ impl MemorySource for MmapSource { fn carve(&self, npages: usize) -> Option<*mut u8> { trace!("carve({:?})", npages); - // faster mod for power-of-2 sizes. - fn mod_size(x: usize, n: usize) -> usize { - x & (n - 1) - } - // There is a faster path available when our local page size is less than or equal to the - // system one. - let system_page_size = mmap::page_size(); - if self.page_size <= system_page_size { - return mmap::fallible_map(npages * self.page_size); - } - // We want to return pages aligned to our page size, which is larger than the - // system page size. As a result, we want to allocate an extra page to guarantee a slice of - // the memory that is aligned to the larger page size. - let target_size = npages * self.page_size; - let req_size = target_size + self.page_size; - mmap::fallible_map(req_size).and_then(|mem| { - let mem_num = mem as usize; - - alloc_debug_assert_eq!(mod_size(mem_num, system_page_size), 0); - - // region at the end that is not needed. - let rem1 = mod_size(mem_num, self.page_size); - // region at the beginning that is not needed. - let rem2 = self.page_size - rem1; - unsafe { - let res = mem.offset(rem2 as isize); - alloc_debug_assert_eq!(mod_size(res as usize, self.page_size), 0); - if rem1 > 0 { - mmap::unmap(res.offset(target_size as isize), rem1); - } - if rem2 > 0 { - mmap::unmap(mem, rem2); - } - Some(res) - } - }) + // Even if self.page_size is larger than the system page size, that's OK + // because we're using mmap-alloc's large-align feature, which allows + // allocations to be aligned to any alignment, even larger than the page size. + unsafe { (&*MMAP).alloc(Layout::from_size_align(npages * self.page_size, self.page_size).unwrap()).ok() } } } -/// Base address and size of a memory map. +/// Base address and layout of a memory map. /// -/// This could also just be a `*mut [u8]`, but having two fields is more explicit. We need a new -/// type because the `Drop` implementation calls `unmap`. +/// We need a new type because the `Drop` implementation calls `unmap`. #[derive(Debug)] -struct MapAddr(*mut u8, usize); +struct MapAddr{ + base: *mut u8, + layout: Layout, +} impl Drop for MapAddr { fn drop(&mut self) { unsafe { - mmap::unmap(self.0, self.1); + (&*MMAP).dealloc(self.base, self.layout.clone()); } } } @@ -160,7 +135,7 @@ impl MemorySource for Creek { .as_ref() .unwrap() .fetch_add(npages, Ordering::Relaxed); - if likely((new_bump + npages) * self.page_size < self.map_info.1) { + if likely((new_bump + npages) * self.page_size < self.map_info.layout.size()) { Some(self.base.offset((new_bump * self.page_size) as isize)) } else { None @@ -175,23 +150,39 @@ impl MemorySource for Creek { /// Page size and heap size should be powers of two. The allocator may want to reserve some /// pages for itself (or for alignment reasons), as a result it is a good idea to have /// heap_size be much larger than page_size. - fn new(page_size: usize) -> Self { - use self::mmap::fallible_map; - let get_heap = || { + fn new(page_size: usize) -> Option { + // let get_heap = || { + // let mut heap_size: usize = 2 << 40; + // while heap_size > (1 << 30) { + // let layout = Layout::from_size_align(heap_size, page_size).unwrap(); + // if let Ok(heap) = (&*MMAP).alloc(layout) { + // return (heap, heap_size); + // } + // heap_size /= 2; + // } + // return None; + // }; + // lots of stuff breaks if this isn't true + alloc_assert!(page_size.is_power_of_two()); + alloc_assert!(page_size > mem::size_of::()); + + // first, let's grab some memory; + let (orig_base, heap_size) = { let mut heap_size: usize = 2 << 40; - while heap_size > (1 << 30) { - if let Some(heap) = fallible_map(heap_size) { - return (heap, heap_size); + loop { + if heap_size <= (1 << 30) { + return None; + } + let layout = Layout::from_size_align(heap_size, page_size).unwrap(); + if let Ok(heap) = unsafe { (&*MMAP).alloc(layout) } { + break (heap, heap_size); } heap_size /= 2; } - alloc_panic!("unable to map heap") }; - // lots of stuff breaks if this isn't true - alloc_assert!(page_size.is_power_of_two()); - alloc_assert!(page_size > mem::size_of::()); - // first, let's grab some memory; - let (orig_base, heap_size) = get_heap(); + + + // let (orig_base, heap_size) = get_heap(); info!("created heap of size {}", heap_size); let orig_addr = orig_base as usize; let (slush_addr, real_addr) = { @@ -213,12 +204,12 @@ impl MemorySource for Creek { }; (base as *mut u8, (base + page_size) as *mut u8) }; - Creek { + Some(Creek { page_size: page_size, - map_info: Arc::new(MapAddr(orig_base, heap_size)), + map_info: Arc::new(MapAddr{base: orig_base, layout: Layout::from_size_align(heap_size, page_size).unwrap()}), base: real_addr, bump: AtomicPtr::new(slush_addr as *mut AtomicUsize), - } + }) } } @@ -227,7 +218,7 @@ impl MemoryBlock for Creek { check_bump!(self); let it_num = it as usize; let base_num = self.base as usize; - it_num >= base_num && it_num < base_num + self.map_info.1 + it_num >= base_num && it_num < base_num + self.map_info.layout.size() } } diff --git a/elfmalloc/src/utils.rs b/elfmalloc/src/utils.rs index 4b1acca..4f437af 100644 --- a/elfmalloc/src/utils.rs +++ b/elfmalloc/src/utils.rs @@ -1,4 +1,4 @@ -// Copyright 2017 the authors. See the 'Copyright and license' section of the +// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -6,50 +6,58 @@ // copied, modified, or distributed except according to those terms. //! Some basic utilities used throughout the allocator code. -use std::cmp; +extern crate alloc; +extern crate mmap_alloc; +extern crate sysconf; +use alloc::allocator::{Alloc, Layout}; use std::ops::{Deref, DerefMut}; use std::cell::UnsafeCell; -pub mod mmap { - extern crate mmap_alloc; - extern crate sysconf; - use self::mmap_alloc::MapAllocBuilder; - use super::super::alloc::allocator::{Alloc, Layout}; +lazy_static!{ pub static ref MMAP: mmap_alloc::MapAlloc = mmap_alloc::MapAlloc::default(); } - pub fn page_size() -> usize { - self::sysconf::page::pagesize() - } - - pub fn map(size: usize) -> *mut u8 { - fallible_map(size).expect("mmap should not fail") - } - - pub fn fallible_map(size: usize) -> Option<*mut u8> { - unsafe { - if let Ok(s) = MapAllocBuilder::default() - .exec(true) - .build() - .alloc(Layout::from_size_align(size, 1).unwrap()) { - Some(s) - } else { - None - } - } - } - - pub unsafe fn unmap(p: *mut u8, len: usize) { - MapAllocBuilder::default().exec(true).build().dealloc( - p, - Layout::from_size_align(len, 1).unwrap(), - ) - } - pub unsafe fn uncommit(p: *mut u8, len: usize) { - MapAllocBuilder::default().exec(true).build().uncommit( - p, - Layout::from_size_align(len, 1).unwrap(), - ) - } +pub fn page_size() -> usize { + self::sysconf::page::pagesize() } +// pub mod mmap { +// extern crate mmap_alloc; +// extern crate sysconf; +// use self::mmap_alloc::MapAllocBuilder; +// use super::super::alloc::allocator::{Alloc, Layout}; + +// pub fn page_size() -> usize { +// self::sysconf::page::pagesize() +// } + +// pub fn map(size: usize, align: usize) -> *mut u8 { +// fallible_map(size, align).expect("mmap should not fail") +// } + +// pub fn fallible_map(size: usize, align: usize) -> Option<*mut u8> { +// unsafe { +// if let Ok(s) = MapAllocBuilder::default() +// .exec(true) +// .build() +// .alloc(Layout::from_size_align(size, align).unwrap()) { +// Some(s) +// } else { +// None +// } +// } +// } + +// pub unsafe fn unmap(p: *mut u8, size: usize, align: usize) { +// MapAllocBuilder::default().exec(true).build().dealloc( +// p, +// Layout::from_size_align(size, align).unwrap(), +// ) +// } +// pub unsafe fn uncommit(p: *mut u8, size: usize, align: usize) { +// MapAllocBuilder::default().exec(true).build().uncommit( +// p, +// Layout::from_size_align(size, align).unwrap(), +// ) +// } +// } // we use the unlikely intrinsic if it is available. @@ -73,9 +81,9 @@ pub unsafe fn likely(b: bool) -> bool { /// A `LazyInitializable` type can be constructed from `Params`. /// /// Types that implement this trate can be wrapped in the `Lazy` construct. -pub trait LazyInitializable { +pub trait LazyInitializable: Sized { type Params; - fn init(p: &Self::Params) -> Self; + fn init(p: &Self::Params) -> Option; } /// A `Lazy` instance of a type `T` keeps `T::Params` strict but only initializes the value with @@ -120,7 +128,7 @@ impl Deref for Lazy { fn deref(&self) -> &T { let state = unsafe { &mut *self.val.get() }; if unsafe { unlikely(state.is_none()) } { - *state = Some(T::init(&self.params)); + *state = T::init(&self.params); } state.as_ref().unwrap() } @@ -132,7 +140,7 @@ impl DerefMut for Lazy { fn deref_mut(&mut self) -> &mut T { let state = unsafe { &mut *self.val.get() }; if unsafe { unlikely(state.is_none()) } { - *state = Some(T::init(&self.params)); + *state = T::init(&self.params); } state.as_mut().unwrap() } @@ -152,23 +160,17 @@ pub struct TypedArray { // TODO: replace with non-null once that stabilizes. data: *mut T, len: usize, - mapped: usize, } impl TypedArray { - pub fn new(size: usize) -> TypedArray { - use std::mem::size_of; - let page_size = mmap::page_size(); - let bytes = size_of::() * size; - let rem = bytes % page_size; - let n_pages = bytes / page_size + cmp::min(1, rem); - let region_size = n_pages * page_size; - let mem = mmap::map(region_size); - TypedArray { + pub fn new(size: usize) -> Option> { + alloc_assert!(::std::mem::size_of::() > 0); + let layout = Layout::new::().repeat(size).unwrap().0; + let mem = unsafe { (&*MMAP).alloc(layout).ok()? }; + Some(TypedArray { data: mem as *mut T, len: size, - mapped: region_size, - } + }) } pub fn iter(&self) -> TypedArrayIter { @@ -189,7 +191,8 @@ impl TypedArray { } pub unsafe fn destroy(&self) { - mmap::unmap(self.data as *mut u8, self.mapped); + let layout = Layout::new::().repeat(self.len).unwrap().0; + (&*MMAP).dealloc(self.data as *mut u8, layout); } } @@ -197,8 +200,8 @@ impl TypedArray { pub struct OwnedArray(TypedArray); impl OwnedArray { - pub fn new(size: usize) -> OwnedArray { - OwnedArray(TypedArray::new(size)) + pub fn new(size: usize) -> Option> { + Some(OwnedArray(TypedArray::new(size)?)) } } diff --git a/elfmalloc/travis.sh b/elfmalloc/travis.sh index 86eb005..b9f26bc 100755 --- a/elfmalloc/travis.sh +++ b/elfmalloc/travis.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2017 the authors. See the 'Copyright and license' section of the +# Copyright 2017-2018 the authors. See the 'Copyright and license' section of the # README.md file at the top-level directory of this repository. # # Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or @@ -17,6 +17,6 @@ exit 0 travis-cargo --only nightly build RUST_BACKTRACE=1 travis-cargo --only nightly test -for feature in prime_schedules huge_segments no_lazy_region nightly; do +for feature in prime_schedules huge_segments no_lazy_region local_cache use_default_allocator print_stats magazine_layout c-api nightly; do RUST_BACKTRACE=1 travis-cargo --only nightly test -- --features "$feature" done