Skip to content

Commit

Permalink
switch to Talc memory allocator
Browse files Browse the repository at this point in the history
In contrast to linked_list_allocator, I measured a better performance.
The difference is small, but large engough to change the crate.
  • Loading branch information
stlankes committed Aug 7, 2023
1 parent 02ef0d7 commit 64ef8b5
Show file tree
Hide file tree
Showing 7 changed files with 87 additions and 291 deletions.
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ num = { version = "0.4", default-features = false }
num-traits = { version = "0.2", default-features = false }
num-derive = "0.4"
zerocopy = "0.6"
talc = { version = "2", default-features = false }
talc = { version = "2" }
time = { version = "0.3", default-features = false }
pci_types = { version = "0.5" }

Expand Down
14 changes: 13 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,21 @@ fn trivial_test() {
panic!("Test called");
}

#[cfg(target_os = "none")]
static mut ARENA: [u8; 0x2000] = [0; 0x2000];

#[cfg(target_os = "none")]
#[global_allocator]
static ALLOCATOR: LockedAllocator = LockedAllocator::empty();
static mut ALLOCATOR: LockedAllocator = LockedAllocator(
talc::Talc::new(unsafe {
// if we're in a hosted environment, the Rust runtime may allocate before
// main() is called, so we need to initialize the arena automatically
talc::InitOnOom::new(talc::Span::from_slice(
ARENA.as_slice() as *const [u8] as *mut [u8]
))
})
.lock(),
);

/// Interface to allocate memory from system heap
///
Expand Down
70 changes: 70 additions & 0 deletions src/mm/allocator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
//! Implementation of the HermitCore Allocator for dynamically allocating heap memory
//! in the kernel.

use core::alloc::{GlobalAlloc, Layout};

use align_address::Align;
use hermit_sync::RawInterruptTicketMutex;
use talc::{InitOnOom, Span, Talck};

use crate::HW_DESTRUCTIVE_INTERFERENCE_SIZE;

pub struct LockedAllocator(pub Talck<RawInterruptTicketMutex, InitOnOom>);

impl LockedAllocator {
#[inline]
fn align_layout(layout: Layout) -> Layout {
let size = layout.size().align_up(HW_DESTRUCTIVE_INTERFERENCE_SIZE);
let align = layout.align().max(HW_DESTRUCTIVE_INTERFERENCE_SIZE);
Layout::from_size_align(size, align).unwrap()
}

pub unsafe fn init(&self, heap_bottom: *mut u8, heap_size: usize) {
let arena = Span::from_base_size(heap_bottom, heap_size);
unsafe {
self.0.talc().init(arena);
}
}
}

/// To avoid false sharing, the global memory allocator align
/// all requests to a cache line.
unsafe impl GlobalAlloc for LockedAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let layout = Self::align_layout(layout);
unsafe { self.0.alloc(layout) }
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let layout = Self::align_layout(layout);
unsafe { self.0.dealloc(ptr, layout) }
}
}

#[cfg(all(test, not(target_os = "none")))]
mod tests {
use core::mem;

use super::*;

#[test]
fn empty() {
let mut arena: [u8; 0x1000] = [0; 0x1000];
let allocator: LockedAllocator = LockedAllocator(
talc::Talc::new(unsafe {
talc::InitOnOom::new(talc::Span::from_slice(
arena.as_slice() as *const [u8] as *mut [u8]
))
})
.lock(),
);

let layout = Layout::from_size_align(1, 1).unwrap();
// we have 4 kbyte memory
assert!(unsafe { !allocator.alloc(layout.clone()).is_null() });

let layout = Layout::from_size_align(0x1000, mem::align_of::<usize>()).unwrap();
let addr = unsafe { allocator.alloc(layout) };
assert!(addr.is_null());
}
}
99 changes: 0 additions & 99 deletions src/mm/allocator/bootstrap.rs

This file was deleted.

48 changes: 0 additions & 48 deletions src/mm/allocator/bump.rs

This file was deleted.

Loading

0 comments on commit 64ef8b5

Please sign in to comment.