Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Page allocator enhancements #36

Merged
merged 13 commits into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion security-monitor/src/core/heap_allocator/allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ impl HeapAllocator {
match self.alloc_dynamic(layout) {
Some(address) => address,
None => {
debug!("Heap allocation failed. No more pages in the memory tracker");
debug!("Heap allocation failed. Could not add more memory to the heap because there are no more free pages in the page allocator");
panic!("Out of memory");
}
}
Expand Down
18 changes: 9 additions & 9 deletions security-monitor/src/core/initialization/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use crate::core::control_data::{ControlData, HardwareHart, CONTROL_DATA};
use crate::core::interrupt_controller::InterruptController;
use crate::core::memory_layout::{ConfidentialMemoryAddress, MemoryLayout};
use crate::core::memory_protector::{HypervisorMemoryProtector, PageSize};
use crate::core::page_allocator::{MemoryTracker, Page, UnAllocated};
use crate::core::page_allocator::{Page, PageAllocator, UnAllocated};
use crate::error::{Error, HardwareFeatures, InitType, NOT_INITIALIZED_HART, NOT_INITIALIZED_HARTS};
use alloc::vec::Vec;
use core::mem::size_of;
Expand Down Expand Up @@ -54,7 +54,7 @@ fn init_security_monitor(flattened_device_tree_address: *const u8) -> Result<(),
// TODO: make sure the system has enough physical memory
let (confidential_memory_start, confidential_memory_end) = initialize_memory_layout(&fdt)?;

// Create page tokens, heap, memory tracker
// Creates page tokens, heap, page allocator
initalize_security_monitor_state(confidential_memory_start, confidential_memory_end)?;

let number_of_harts = verify_harts(&fdt)?;
Expand Down Expand Up @@ -145,7 +145,7 @@ fn initalize_security_monitor_state(
// start allocating objects on heap, e.g., page tokens. We have to first
// initialize the global allocator, which permits us to use heap. To initialize heap
// we need to decide what is the confidential memory address range and split this memory
// into regions owned by heap allocator and page allocator (memory tracker).
// into regions owned by heap allocator and page allocator.
let confidential_memory_size = confidential_memory_start.offset_from(confidential_memory_end);
let number_of_pages = usize::try_from(confidential_memory_size)? / PageSize::smallest().in_bytes();
// calculate if we have enough memory in the system to store page tokens. In the worst case we
Expand All @@ -160,14 +160,14 @@ fn initalize_security_monitor_state(
let heap_end_address = MemoryLayout::read().confidential_address_at_offset(&mut heap_start_address, heap_size_in_bytes)?;
crate::core::heap_allocator::init_heap(heap_start_address, heap_size_in_bytes);

// Memory tracker starts directly after the heap
// PageAllocator's memory starts directly after the HeapAllocator's memory
let page_allocator_start_address = heap_end_address;
assert!(page_allocator_start_address.is_aligned_to(PageSize::smallest().in_bytes()));
// Memory tracker takes ownership of the rest of the confidential memory.
// PageAllocator takes ownership of the rest of the confidential memory.
let page_allocator_end_address = confidential_memory_end;
// It is safe to construct the memory tracker because we own the corresponding memory region and pass this
// ownership to the memory tracker.
unsafe { MemoryTracker::initialize(page_allocator_start_address, page_allocator_end_address)? };
// It is safe to construct the PageAllocator because we own the corresponding memory region and pass this
// ownership to the PageAllocator.
unsafe { PageAllocator::initialize(page_allocator_start_address, page_allocator_end_address)? };
unsafe { InterruptController::initialize()? };

CONTROL_DATA.call_once(|| RwLock::new(ControlData::new()));
Expand All @@ -179,7 +179,7 @@ fn prepare_harts(number_of_harts: usize) -> Result<(), Error> {
// we need to allocate stack for the dumped state of each physical HART.
let mut harts_states = Vec::with_capacity(number_of_harts);
for hart_id in 0..number_of_harts {
let stack = MemoryTracker::acquire_continous_pages(1, PageSize::Size2MiB)?.remove(0);
let stack = PageAllocator::acquire_continous_pages(1, PageSize::Size2MiB)?.remove(0);
let hypervisor_memory_protector = HypervisorMemoryProtector::create();
debug!("HART[{}] stack {:x}-{:x}", hart_id, stack.start_address(), stack.end_address());
harts_states.insert(hart_id, HardwareHart::init(hart_id, stack, hypervisor_memory_protector));
Expand Down
4 changes: 4 additions & 0 deletions security-monitor/src/core/memory_protector/mmu/page_size.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,8 @@ impl PageSize {
pub fn smallest() -> PageSize {
PageSize::Size4KiB
}

pub fn all_from_largest_to_smallest() -> alloc::vec::Vec<PageSize> {
alloc::vec![Self::Size128TiB, Self::Size512GiB, Self::Size1GiB, Self::Size2MiB, Self::Size4KiB]
}
}
8 changes: 4 additions & 4 deletions security-monitor/src/core/memory_protector/mmu/page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::core::memory_protector::mmu::page_table_entry::{
};
use crate::core::memory_protector::mmu::page_table_memory::PageTableMemory;
use crate::core::memory_protector::mmu::paging_system::{PageTableLevel, PagingSystem};
use crate::core::page_allocator::{MemoryTracker, SharedPage};
use crate::core::page_allocator::{PageAllocator, SharedPage};
use crate::error::Error;
use alloc::boxed::Box;
use alloc::vec::Vec;
Expand Down Expand Up @@ -65,7 +65,7 @@ impl PageTable {
} else if PageTableBits::is_leaf(entry_raw) {
let address = NonConfidentialMemoryAddress::new(PageTableAddress::decode(entry_raw))?;
let page_size = paging_system.page_size(level);
let page = MemoryTracker::acquire_continous_pages(1, page_size)?
let page = PageAllocator::acquire_continous_pages(1, page_size)?
.remove(0)
.copy_from_non_confidential_memory(address)
.map_err(|_| Error::PageTableCorrupted())?;
Expand Down Expand Up @@ -163,7 +163,7 @@ impl PageTable {
self.page_table_memory.set_entry(index, &entry);
let entry_to_remove = core::mem::replace(&mut self.entries[index], entry);
if let PageTableEntry::Leaf(page, _, _) = entry_to_remove {
MemoryTracker::release_page(page.deallocate());
PageAllocator::release_page(page.deallocate());
}
}
}
Expand All @@ -174,7 +174,7 @@ impl Drop for PageTable {
// that own a page.
self.entries.drain(..).for_each(|entry| {
if let PageTableEntry::Leaf(page, _, _) = entry {
MemoryTracker::release_page(page.deallocate());
PageAllocator::release_page(page.deallocate());
}
});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::core::memory_layout::{MemoryLayout, NonConfidentialMemoryAddress};
use crate::core::memory_protector::mmu::page_table_entry::PageTableEntry;
use crate::core::memory_protector::mmu::paging_system::PageTableLevel;
use crate::core::memory_protector::mmu::PageSize;
use crate::core::page_allocator::{Allocated, MemoryTracker, Page};
use crate::core::page_allocator::{Allocated, Page, PageAllocator};
use crate::error::Error;
use alloc::vec::Vec;
use core::ops::Range;
Expand All @@ -26,7 +26,7 @@ impl PageTableMemory {
address: NonConfidentialMemoryAddress, paging_system: PagingSystem, level: PageTableLevel,
) -> Result<Self, Error> {
let number_of_pages = paging_system.configuration_pages(level);
let pages = MemoryTracker::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)?
let pages = PageAllocator::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)?
.into_iter()
.enumerate()
.map(|(i, page)| {
Expand All @@ -42,7 +42,7 @@ impl PageTableMemory {

pub(super) fn empty(paging_system: PagingSystem, level: PageTableLevel) -> Result<Self, Error> {
let number_of_pages = paging_system.configuration_pages(level);
let pages = MemoryTracker::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)?.into_iter().map(|f| f.zeroize()).collect();
let pages = PageAllocator::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)?.into_iter().map(|f| f.zeroize()).collect();
let number_of_entries = paging_system.entries(level);
let entry_size = paging_system.entry_size();
Ok(Self { pages, number_of_entries, entry_size })
Expand Down Expand Up @@ -99,6 +99,6 @@ impl PageTableMemory {
impl Drop for PageTableMemory {
fn drop(&mut self) {
let deallocated_pages: Vec<_> = self.pages.drain(..).map(|p| p.deallocate()).collect();
MemoryTracker::release_pages(deallocated_pages);
PageAllocator::release_pages(deallocated_pages);
}
}
2 changes: 1 addition & 1 deletion security-monitor/src/core/page_allocator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-FileContributor: Wojciech Ozga <woz@zurich.ibm.com>, IBM Research - Zurich
// SPDX-License-Identifier: Apache-2.0
pub use page::{Allocated, Page, PageState, UnAllocated};
pub use page_allocator::MemoryTracker;
pub use page_allocator::PageAllocator;
pub use shared_page::SharedPage;

mod page;
Expand Down
5 changes: 2 additions & 3 deletions security-monitor/src/core/page_allocator/page.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,8 @@ impl Page<UnAllocated> {
}

impl Page<Allocated> {
/// Clears the entire memory content by writing 0s to it and then converts
/// the Page from Allocated to UnAllocated so it can be returned to the
/// memory tracker.
/// Clears the entire memory content by writing 0s to it and then converts the Page from Allocated to UnAllocated so it can be returned
/// to the page allocator.
pub fn deallocate(mut self) -> Page<UnAllocated> {
self.clear();
Page { address: self.address, size: self.size, _marker: PhantomData }
Expand Down
Loading
Loading