From a7f89643d579688c0532641adc3d1744d1577e53 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 06:47:24 -0600 Subject: [PATCH 01/13] Renamed memory tracker to page allocator. Reimplemented page allocator internal logic to simplify the reasoning. Signed-off-by: Wojciech Ozga --- .../src/core/heap_allocator/allocator.rs | 2 +- .../src/core/initialization/mod.rs | 18 +- .../core/memory_protector/mmu/page_size.rs | 4 + .../core/memory_protector/mmu/page_table.rs | 8 +- .../memory_protector/mmu/page_table_memory.rs | 8 +- .../src/core/page_allocator/mod.rs | 2 +- .../src/core/page_allocator/page.rs | 5 +- .../src/core/page_allocator/page_allocator.rs | 243 ++++++++++-------- 8 files changed, 166 insertions(+), 124 deletions(-) diff --git a/security-monitor/src/core/heap_allocator/allocator.rs b/security-monitor/src/core/heap_allocator/allocator.rs index 9ffbf6b..48b7007 100644 --- a/security-monitor/src/core/heap_allocator/allocator.rs +++ b/security-monitor/src/core/heap_allocator/allocator.rs @@ -120,7 +120,7 @@ impl HeapAllocator { match self.alloc_dynamic(layout) { Some(address) => address, None => { - debug!("Heap allocation failed. No more pages in the memory tracker"); + debug!("Heap allocation failed. Could not add more memory to the heap because there are no more free pages in the page allocator"); panic!("Out of memory"); } } diff --git a/security-monitor/src/core/initialization/mod.rs b/security-monitor/src/core/initialization/mod.rs index d45b19b..5fca1a3 100644 --- a/security-monitor/src/core/initialization/mod.rs +++ b/security-monitor/src/core/initialization/mod.rs @@ -5,7 +5,7 @@ use crate::core::control_data::{ControlData, HardwareHart, CONTROL_DATA}; use crate::core::interrupt_controller::InterruptController; use crate::core::memory_layout::{ConfidentialMemoryAddress, MemoryLayout}; use crate::core::memory_protector::{HypervisorMemoryProtector, PageSize}; -use crate::core::page_allocator::{MemoryTracker, Page, UnAllocated}; +use crate::core::page_allocator::{Page, PageAllocator, UnAllocated}; use crate::error::{Error, HardwareFeatures, InitType, NOT_INITIALIZED_HART, NOT_INITIALIZED_HARTS}; use alloc::vec::Vec; use core::mem::size_of; @@ -54,7 +54,7 @@ fn init_security_monitor(flattened_device_tree_address: *const u8) -> Result<(), // TODO: make sure the system has enough physical memory let (confidential_memory_start, confidential_memory_end) = initialize_memory_layout(&fdt)?; - // Create page tokens, heap, memory tracker + // Creates page tokens, heap, page allocator initalize_security_monitor_state(confidential_memory_start, confidential_memory_end)?; let number_of_harts = verify_harts(&fdt)?; @@ -145,7 +145,7 @@ fn initalize_security_monitor_state( // start allocating objects on heap, e.g., page tokens. We have to first // initialize the global allocator, which permits us to use heap. To initialize heap // we need to decide what is the confidential memory address range and split this memory - // into regions owned by heap allocator and page allocator (memory tracker). + // into regions owned by heap allocator and page allocator. let confidential_memory_size = confidential_memory_start.offset_from(confidential_memory_end); let number_of_pages = usize::try_from(confidential_memory_size)? / PageSize::smallest().in_bytes(); // calculate if we have enough memory in the system to store page tokens. In the worst case we @@ -160,14 +160,14 @@ fn initalize_security_monitor_state( let heap_end_address = MemoryLayout::read().confidential_address_at_offset(&mut heap_start_address, heap_size_in_bytes)?; crate::core::heap_allocator::init_heap(heap_start_address, heap_size_in_bytes); - // Memory tracker starts directly after the heap + // PageAllocator's memory starts directly after the HeapAllocator's memory let page_allocator_start_address = heap_end_address; assert!(page_allocator_start_address.is_aligned_to(PageSize::smallest().in_bytes())); - // Memory tracker takes ownership of the rest of the confidential memory. + // PageAllocator takes ownership of the rest of the confidential memory. let page_allocator_end_address = confidential_memory_end; - // It is safe to construct the memory tracker because we own the corresponding memory region and pass this - // ownership to the memory tracker. - unsafe { MemoryTracker::initialize(page_allocator_start_address, page_allocator_end_address)? }; + // It is safe to construct the PageAllocator because we own the corresponding memory region and pass this + // ownership to the PageAllocator. + unsafe { PageAllocator::initialize(page_allocator_start_address, page_allocator_end_address)? }; unsafe { InterruptController::initialize()? }; CONTROL_DATA.call_once(|| RwLock::new(ControlData::new())); @@ -179,7 +179,7 @@ fn prepare_harts(number_of_harts: usize) -> Result<(), Error> { // we need to allocate stack for the dumped state of each physical HART. let mut harts_states = Vec::with_capacity(number_of_harts); for hart_id in 0..number_of_harts { - let stack = MemoryTracker::acquire_continous_pages(1, PageSize::Size2MiB)?.remove(0); + let stack = PageAllocator::acquire_continous_pages(1, PageSize::Size2MiB)?.remove(0); let hypervisor_memory_protector = HypervisorMemoryProtector::create(); debug!("HART[{}] stack {:x}-{:x}", hart_id, stack.start_address(), stack.end_address()); harts_states.insert(hart_id, HardwareHart::init(hart_id, stack, hypervisor_memory_protector)); diff --git a/security-monitor/src/core/memory_protector/mmu/page_size.rs b/security-monitor/src/core/memory_protector/mmu/page_size.rs index c9c88a0..25f2b8e 100644 --- a/security-monitor/src/core/memory_protector/mmu/page_size.rs +++ b/security-monitor/src/core/memory_protector/mmu/page_size.rs @@ -49,4 +49,8 @@ impl PageSize { pub fn smallest() -> PageSize { PageSize::Size4KiB } + + pub fn all_from_largest_to_smallest() -> alloc::vec::Vec { + alloc::vec![Self::Size128TiB, Self::Size512GiB, Self::Size1GiB, Self::Size2MiB, Self::Size4KiB] + } } diff --git a/security-monitor/src/core/memory_protector/mmu/page_table.rs b/security-monitor/src/core/memory_protector/mmu/page_table.rs index c6bf498..c561e6e 100644 --- a/security-monitor/src/core/memory_protector/mmu/page_table.rs +++ b/security-monitor/src/core/memory_protector/mmu/page_table.rs @@ -7,7 +7,7 @@ use crate::core::memory_protector::mmu::page_table_entry::{ }; use crate::core::memory_protector::mmu::page_table_memory::PageTableMemory; use crate::core::memory_protector::mmu::paging_system::{PageTableLevel, PagingSystem}; -use crate::core::page_allocator::{MemoryTracker, SharedPage}; +use crate::core::page_allocator::{PageAllocator, SharedPage}; use crate::error::Error; use alloc::boxed::Box; use alloc::vec::Vec; @@ -65,7 +65,7 @@ impl PageTable { } else if PageTableBits::is_leaf(entry_raw) { let address = NonConfidentialMemoryAddress::new(PageTableAddress::decode(entry_raw))?; let page_size = paging_system.page_size(level); - let page = MemoryTracker::acquire_continous_pages(1, page_size)? + let page = PageAllocator::acquire_continous_pages(1, page_size)? .remove(0) .copy_from_non_confidential_memory(address) .map_err(|_| Error::PageTableCorrupted())?; @@ -163,7 +163,7 @@ impl PageTable { self.page_table_memory.set_entry(index, &entry); let entry_to_remove = core::mem::replace(&mut self.entries[index], entry); if let PageTableEntry::Leaf(page, _, _) = entry_to_remove { - MemoryTracker::release_page(page.deallocate()); + PageAllocator::release_page(page.deallocate()); } } } @@ -174,7 +174,7 @@ impl Drop for PageTable { // that own a page. self.entries.drain(..).for_each(|entry| { if let PageTableEntry::Leaf(page, _, _) = entry { - MemoryTracker::release_page(page.deallocate()); + PageAllocator::release_page(page.deallocate()); } }); } diff --git a/security-monitor/src/core/memory_protector/mmu/page_table_memory.rs b/security-monitor/src/core/memory_protector/mmu/page_table_memory.rs index 84a81f1..620d04c 100644 --- a/security-monitor/src/core/memory_protector/mmu/page_table_memory.rs +++ b/security-monitor/src/core/memory_protector/mmu/page_table_memory.rs @@ -6,7 +6,7 @@ use crate::core::memory_layout::{MemoryLayout, NonConfidentialMemoryAddress}; use crate::core::memory_protector::mmu::page_table_entry::PageTableEntry; use crate::core::memory_protector::mmu::paging_system::PageTableLevel; use crate::core::memory_protector::mmu::PageSize; -use crate::core::page_allocator::{Allocated, MemoryTracker, Page}; +use crate::core::page_allocator::{Allocated, Page, PageAllocator}; use crate::error::Error; use alloc::vec::Vec; use core::ops::Range; @@ -26,7 +26,7 @@ impl PageTableMemory { address: NonConfidentialMemoryAddress, paging_system: PagingSystem, level: PageTableLevel, ) -> Result { let number_of_pages = paging_system.configuration_pages(level); - let pages = MemoryTracker::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)? + let pages = PageAllocator::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)? .into_iter() .enumerate() .map(|(i, page)| { @@ -42,7 +42,7 @@ impl PageTableMemory { pub(super) fn empty(paging_system: PagingSystem, level: PageTableLevel) -> Result { let number_of_pages = paging_system.configuration_pages(level); - let pages = MemoryTracker::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)?.into_iter().map(|f| f.zeroize()).collect(); + let pages = PageAllocator::acquire_continous_pages(number_of_pages, Self::PAGE_SIZE)?.into_iter().map(|f| f.zeroize()).collect(); let number_of_entries = paging_system.entries(level); let entry_size = paging_system.entry_size(); Ok(Self { pages, number_of_entries, entry_size }) @@ -99,6 +99,6 @@ impl PageTableMemory { impl Drop for PageTableMemory { fn drop(&mut self) { let deallocated_pages: Vec<_> = self.pages.drain(..).map(|p| p.deallocate()).collect(); - MemoryTracker::release_pages(deallocated_pages); + PageAllocator::release_pages(deallocated_pages); } } diff --git a/security-monitor/src/core/page_allocator/mod.rs b/security-monitor/src/core/page_allocator/mod.rs index f3d9008..bfadac8 100644 --- a/security-monitor/src/core/page_allocator/mod.rs +++ b/security-monitor/src/core/page_allocator/mod.rs @@ -2,7 +2,7 @@ // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 pub use page::{Allocated, Page, PageState, UnAllocated}; -pub use page_allocator::MemoryTracker; +pub use page_allocator::PageAllocator; pub use shared_page::SharedPage; mod page; diff --git a/security-monitor/src/core/page_allocator/page.rs b/security-monitor/src/core/page_allocator/page.rs index 10d0cd9..11bd7f1 100644 --- a/security-monitor/src/core/page_allocator/page.rs +++ b/security-monitor/src/core/page_allocator/page.rs @@ -85,9 +85,8 @@ impl Page { } impl Page { - /// Clears the entire memory content by writing 0s to it and then converts - /// the Page from Allocated to UnAllocated so it can be returned to the - /// memory tracker. + /// Clears the entire memory content by writing 0s to it and then converts the Page from Allocated to UnAllocated so it can be returned + /// to the page allocator. pub fn deallocate(mut self) -> Page { self.clear(); Page { address: self.address, size: self.size, _marker: PhantomData } diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index ae98ebc..622fc8f 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -8,32 +8,31 @@ use crate::error::Error; use alloc::collections::BTreeMap; use alloc::vec; use alloc::vec::Vec; -use core::ops::Range; use spin::{Once, RwLock, RwLockWriteGuard}; -const NOT_INITIALIZED_PAGE_ALLOCATOR: &str = "Bug. Could not access memory tracker because it is not initialized"; +const NOT_INITIALIZED_PAGE_ALLOCATOR: &str = "Bug. Could not access page allocator because it is not initialized"; -/// A static global structure containing unallocated pages. Once<> guarantees -/// that it the memory tracker can only be initialized once. -static PAGE_ALLOCATOR: Once> = Once::new(); +/// A static global structure containing unallocated pages. Once<> guarantees that it the PageAllocator can only be initialized once. +static PAGE_ALLOCATOR: Once> = Once::new(); -/// Memory tracker allocates pages of confidential memory. It guarantees that a single page is not -/// allocated twice. It does so by giving away page tokens that represent ownership of a page in a -/// confidental memory. Page tokens are created when constructing the memory tracker. -pub struct MemoryTracker { +/// PageAllocator jobs is to pass ownership of free pages residing in the confidential memory. It guarantees that a physical page is not +/// allocated twice. It does so by giving away page tokens that represent ownership of a physical page located in a confidental memory. +/// PageAllocator constructor creates page tokens (maintaining an invariant that there are no two page tokens describing the same physical +/// address). +pub struct PageAllocator { map: BTreeMap>>, } -impl<'a> MemoryTracker { - /// Initializes the global instance of a `MemoryTracker`. Returns error if it has already been initialized. +impl<'a> PageAllocator { + /// Initializes the global instance of a `PageAllocator`. Returns error if it has already been initialized. /// /// # Arguments: /// - /// See the `MemoryTracker::new` for requirements on arguments. + /// See the `PageAllocator::new` for requirements on arguments. /// /// # Safety /// - /// See the `MemoryTracker::new` for safety requirements. + /// See the `PageAllocator::new` for safety requirements. pub unsafe fn initialize(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result<(), Error> { let page_allocator = unsafe { Self::new(memory_start, memory_end) }?; assure_not!(PAGE_ALLOCATOR.is_completed(), Error::Reinitialization())?; @@ -41,19 +40,24 @@ impl<'a> MemoryTracker { Ok(()) } - /// Constructs the memory tracker over the memory region defined by start and end addresses. + /// Constructs the PageAllocator over the memory region defined by start and end addresses. /// It creates page tokens of unallocated pages. /// /// # Arguments: /// /// `memory_start` address must be aligned to the smallest page size. - /// `memory_end` does not belong to the memory region owned by the memory tracker. The total memory - /// size of the memory tracker must be a multiply of the smallest page size. + /// `memory_end` does not belong to the memory region owned by the PageAllocator. The total memory + /// size assigned to the PageAllocator must be a multiply of the smallest page size. + /// + /// # Guarantees + /// + /// * The map contains keys for every possible page size. + /// * There are no two page tokens that describe the same memory address. /// /// # Safety /// /// This function must only be called only once during the system lifecycle. The caller must guarantee - /// that the memory tracker becomes the exclusive owner of the memory region described by the input + /// that the PageAllocator becomes the exclusive owner of the memory region described by the input /// arguments. unsafe fn new(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result { debug!("Memory tracker {:x}-{:x}", memory_start.as_usize(), memory_end as usize); @@ -62,127 +66,162 @@ impl<'a> MemoryTracker { let mut map = BTreeMap::new(); let memory_layout = MemoryLayout::read(); - let mut page_address = memory_start; - for page_size in &[PageSize::Size1GiB, PageSize::Size2MiB, PageSize::Size4KiB] { - let free_memory_in_bytes = usize::try_from(page_address.offset_from(memory_end))?; - let number_of_new_pages = free_memory_in_bytes / page_size.in_bytes(); - let new_pages = (0..number_of_new_pages) - .map(|i| { - let page_offset_in_bytes = i * page_size.in_bytes(); - let address = - memory_layout.confidential_address_at_offset_bounded(&mut page_address, page_offset_in_bytes, memory_end)?; - // Safety: It is safe to create this page token here if: - // 1) this `MemoryTracker` constructor is guaranteed to be called only once - // during the system lifetime - // 2) all pages created here are guaranteed to be disjoined. - let new_page = Page::::init(address, page_size.clone()); - Ok(new_page) - }) - .collect::, Error>>()?; - debug!("Created {} page tokens of size {:?}", new_pages.len(), page_size); - let pages_size_in_bytes = new_pages.len() * page_size.in_bytes(); - map.insert(page_size.clone(), new_pages); - - match memory_layout.confidential_address_at_offset_bounded(&mut page_address, pages_size_in_bytes, memory_end) { - Ok(ptr) => page_address = ptr, - Err(_) => break, - } + + let mut next_address = Ok(memory_start); + for page_size in PageSize::all_from_largest_to_smallest() { + let page_tokens = match next_address { + Ok(ref mut address) => { + let page_tokens = Self::create_page_tokens(address, memory_end, page_size)?; + let occupied_memory_in_bytes = page_tokens.len() * page_size.in_bytes(); + next_address = memory_layout.confidential_address_at_offset_bounded(address, occupied_memory_in_bytes, memory_end); + page_tokens + } + Err(_) => Vec::<_>::with_capacity(512), + }; + debug!("Created {} page tokens of size {:?}", page_tokens.len(), page_size); + map.insert(page_size.clone(), page_tokens); } Ok(Self { map }) } + /// Creates page tokens of the given size over the given memory region. + unsafe fn create_page_tokens( + memory_start: &mut ConfidentialMemoryAddress, memory_end: *const usize, page_size: PageSize, + ) -> Result>, Error> { + let memory_layout = MemoryLayout::read(); + let free_memory_in_bytes = usize::try_from(memory_start.offset_from(memory_end))?; + let number_of_new_pages = free_memory_in_bytes / page_size.in_bytes(); + (0..number_of_new_pages) + .map(|page_number| { + let page_offset_in_bytes = page_number * page_size.in_bytes(); + let address = memory_layout.confidential_address_at_offset_bounded(memory_start, page_offset_in_bytes, memory_end)?; + // Safety: It is safe to create this page token here if: + // 1) this `MemoryTracker` constructor is guaranteed to be called only once + // during the system lifetime + // 2) all pages created here are guaranteed to be disjoined. + Ok(Page::::init(address, page_size.clone())) + }) + .collect::, Error>>() + } + + /// Returns page tokens that all together have ownership over a continous unallocated memory region of the requested size. Returns error + /// if could not obtain write access to the global instance of the page allocator or if there is not enough page tokens satisfying the + /// requested criteria. pub fn acquire_continous_pages(number_of_pages: usize, page_size: PageSize) -> Result>, Error> { - let pages = Self::try_write(|tracker| Ok(tracker.acquire(number_of_pages, page_size)))?; + let pages = Self::try_write(|page_allocator| Ok(page_allocator.acquire(number_of_pages, page_size)))?; assure_not!(pages.is_empty(), Error::OutOfMemory())?; Ok(pages) } + /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent of deallocation of the + /// physical memory region owned by the returned page tokens. + /// + /// TODO: to prevent fragmentation, run a procedure that will try to combine page tokens of smaller sizes into page tokens of bigger + /// sizes. Otherwise, after long run, the security monitor's might start occupying to much memory (due to large number of page tokens) + /// and being slow. pub fn release_pages(pages: Vec>) { - let _ = Self::try_write(|tracker| { + let _ = Self::try_write(|page_allocator| { Ok(pages.into_iter().for_each(|page| { - tracker.map.get_mut(&page.size()).and_then(|v| Some(v.push(page))); + page_allocator.map.get_mut(&page.size()).and_then(|v| Some(v.push(page))); })) }) - .inspect_err(|_| debug!("Memory leak: failed to store released pages in the memory tracker")); + .inspect_err(|_| debug!("Memory leak: failed to store released pages in the page allocator")); } pub fn release_page(page: Page) { Self::release_pages(vec![page]) } + /// Returns vector of unallocated page tokens representing a continous memory region. If it failes to find allocation within free pages + /// of the requested size, it divides larger page tokens. Empty vector is returns if there are not enough page tokens in the system that + /// meet the requested criteria. fn acquire(&mut self, number_of_pages: usize, page_size: PageSize) -> Vec> { - self.find_allocation(number_of_pages, page_size) - .and_then(|range| self.map.get_mut(&page_size).and_then(|pages| Some(pages.drain(range).collect()))) - .unwrap_or(vec![]) + let mut available_pages = self.acquire_continous_pages_of_given_size(number_of_pages, page_size); + // it might be that there is not enough page tokens of the requested page size. In such a case, let's try to divide page tokens of + // larger page sizes and try the allocation again. + if available_pages.is_empty() { + self.divide_pages(page_size); + available_pages = self.acquire_continous_pages_of_given_size(number_of_pages, page_size); + } + available_pages } - // this function will divide larger pages when it failes to find allocation within free pages of the requested size. - fn find_allocation(&mut self, number_of_pages: usize, page_size: PageSize) -> Option> { - if self.find_allocation_within_page_size(number_of_pages, page_size).is_none() { - self.divide_pages(page_size); + /// Tries to allocate a continous chunk of physical memory composed of the requested number of pages. Returns a vector of unallocated + /// page tokens, all of them having the same size, or an empty vector if the allocation fails. + fn acquire_continous_pages_of_given_size(&mut self, number_of_pages: usize, page_size: PageSize) -> Vec> { + // Below unwrap is safe because the PageAllocator constructor guarantees that the map contains keys for every possible page size. + let pages = self.map.get_mut(&page_size).unwrap(); + if pages.len() < number_of_pages { + // early return because there is not enough page tokens for the requested page size. + return vec![]; } - self.find_allocation_within_page_size(number_of_pages, page_size) + + // Checks if consecutive pages at the given range compose a continous memory region. The assumption is that pages are sorted. + // Thus, it is enough to look check if all neighboring page tokens compose a continous memory region. + let is_memory_region_continous = |pages: &mut Vec>, start_index: usize, end_index: usize| { + (start_index..(end_index - 1)) + .map(|page_index| pages[page_index].end_address() == pages[page_index + 1].start_address()) + .fold(true, |accumulator, value| accumulator && value) + }; + + let mut allocated_pages = Vec::with_capacity(number_of_pages); + let last_possible_index = pages.len() - number_of_pages; + (0..last_possible_index) + .find(|&allocation_start_index| { + let allocation_end_index = allocation_start_index + number_of_pages; + is_memory_region_continous(pages, allocation_start_index, allocation_end_index) + }) + .inspect(|allocation_start_index| { + // we found allocation, lets return page tokens to the caller + (0..number_of_pages).for_each(|_| { + // `Vec::push` appends to the end of the vector, so we preserve the order of pages. `Vec::remove` removes the page token + // at the given index and shifts left all other page tokens, so we preserve the order of pages in the map + allocated_pages.push(pages.remove(*allocation_start_index)) + }) + }); + allocated_pages } - fn divide_pages(&mut self, page_size: PageSize) -> bool { - let mut result = false; - let mut page_size_to_divide = page_size.larger(); - while let Some(fs) = page_size_to_divide { - if fs == page_size { + /// Tries to divide existing page tokens, so that the PageAllocator has page tokens of the requested page size. + fn divide_pages(&mut self, page_size: PageSize) { + let mut page_size_to_divide_next = page_size.larger(); + while let Some(page_size_to_divide_now) = page_size_to_divide_next { + if page_size_to_divide_now == page_size { break; } - if self.divide_page(fs) { - page_size_to_divide = fs.smaller(); - result = true; + if self.divide_page(page_size_to_divide_now) { + // as soon as we manage to find and divide a larger page token, we start to to iterate back over smaller page sizes and + // divide them into even smaller page tokens. Eventually, we end up with the requested page_size that will exit the while + // loop. + page_size_to_divide_next = page_size_to_divide_now.smaller(); } else { - page_size_to_divide = fs.larger(); - } - } - result - } - - /// Tries to divide a page of size 'from' into smaller pages - fn divide_page(&mut self, from: PageSize) -> bool { - if let Some(to) = from.smaller() { - if let Some(page) = self.map.get_mut(&from).and_then(|pages| pages.pop()) { - if let Some(ref mut pages) = self.map.get_mut(&to) { - pages.append(&mut page.divide()); - return true; - } + // in the case when there is no more page tokens in the system, the page_size_to_divide becomes eventually None and + // exits the while loop. + page_size_to_divide_next = page_size_to_divide_now.larger(); } } - false } - fn find_allocation_within_page_size(&mut self, number_of_pages: usize, page_size: PageSize) -> Option> { - if let Some(pages) = self.map.get_mut(&page_size) { - if pages.len() < number_of_pages { - return None; - } - // check if there is a continous region of requested pages - let are_pages_continous = |pages: &mut Vec>, j: usize| { - // should we do below calculation using pointers and its `byte_offset_from` method? - pages[j].end_address() == pages[j + 1].start_address() - }; - - for i in 0..(pages.len() - number_of_pages) { - for j in i..(i + number_of_pages) { - if !are_pages_continous(pages, j) { - // this is not a continous allocation - break; - } - if j == i + number_of_pages - 1 { - return Some(Range { start: i, end: i + number_of_pages }); - } - } - } - } - None + /// Tries to divide a page of the given size into smaller pages. Returns false if there is no page of the given size of the given size + /// is the smallest possible page size supported by the architecture. + fn divide_page(&mut self, from_size: PageSize) -> bool { + from_size + .smaller() + .and_then(|to_size| { + // Below unwraps are safe because the PageAllocator constructor guarantees that the map contains keys for every possible + // page size. + self.map.get_mut(&from_size).unwrap().pop().and_then(|page| { + self.map.get_mut(&to_size).unwrap().append(&mut page.divide()); + Some(true) + }) + }) + .unwrap_or(false) } + /// returns a mutable reference to the PageAllocator after obtaining a lock on the mutex fn try_write(op: O) -> Result - where O: FnOnce(&mut RwLockWriteGuard<'static, MemoryTracker>) -> Result { + where O: FnOnce(&mut RwLockWriteGuard<'static, PageAllocator>) -> Result { op(&mut PAGE_ALLOCATOR.get().expect(NOT_INITIALIZED_PAGE_ALLOCATOR).write()) } } From a54c157413869878f4c28af43387abe6b2300d6d Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:49:24 +0100 Subject: [PATCH 02/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 622fc8f..717d838 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -15,9 +15,9 @@ const NOT_INITIALIZED_PAGE_ALLOCATOR: &str = "Bug. Could not access page allocat /// A static global structure containing unallocated pages. Once<> guarantees that it the PageAllocator can only be initialized once. static PAGE_ALLOCATOR: Once> = Once::new(); -/// PageAllocator jobs is to pass ownership of free pages residing in the confidential memory. It guarantees that a physical page is not -/// allocated twice. It does so by giving away page tokens that represent ownership of a physical page located in a confidental memory. -/// PageAllocator constructor creates page tokens (maintaining an invariant that there are no two page tokens describing the same physical +/// The `PageAllocator`'s job is to pass ownership of free pages residing in the confidential memory. It guarantees that a physical page is not +/// allocated twice. It does so by giving away `Page` tokens that represent ownership of a physical page located in the confidental memory as described by `MemoryLayout`. +/// `PageAllocator`'s constructor creates page tokens (maintaining an invariant that there are no two page tokens describing the same physical /// address). pub struct PageAllocator { map: BTreeMap>>, From 4071f5b903979d8e74f91a62e9da6f991b53cff1 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:49:33 +0100 Subject: [PATCH 03/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 717d838..7fb43b4 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -46,8 +46,8 @@ impl<'a> PageAllocator { /// # Arguments: /// /// `memory_start` address must be aligned to the smallest page size. - /// `memory_end` does not belong to the memory region owned by the PageAllocator. The total memory - /// size assigned to the PageAllocator must be a multiply of the smallest page size. + /// `memory_end` is one-past-the end of the memory region owned by the `PageAllocator`. The total memory + /// size assigned to the `PageAllocator` must be a multiple of the smallest page size. /// /// # Guarantees /// From 57b3e732e5308a00487bd317ada1b87d22cab0d2 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:49:40 +0100 Subject: [PATCH 04/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 7fb43b4..8623d80 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -56,7 +56,7 @@ impl<'a> PageAllocator { /// /// # Safety /// - /// This function must only be called only once during the system lifecycle. The caller must guarantee + /// This function must only be called once during the system lifecycle. The caller must guarantee /// that the PageAllocator becomes the exclusive owner of the memory region described by the input /// arguments. unsafe fn new(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result { From 396da46bc8b2e928aa74c5268d8a690f138a2fa0 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:49:49 +0100 Subject: [PATCH 05/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 8623d80..7fe1168 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -99,7 +99,7 @@ impl<'a> PageAllocator { // Safety: It is safe to create this page token here if: // 1) this `MemoryTracker` constructor is guaranteed to be called only once // during the system lifetime - // 2) all pages created here are guaranteed to be disjoined. + // 2) all pages created here are guaranteed to be disjoint. Ok(Page::::init(address, page_size.clone())) }) .collect::, Error>>() From e5f18ab9b051d655d5c179febd8f2444789b0cc9 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:50:02 +0100 Subject: [PATCH 06/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 7fe1168..936407a 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -106,7 +106,7 @@ impl<'a> PageAllocator { } /// Returns page tokens that all together have ownership over a continous unallocated memory region of the requested size. Returns error - /// if could not obtain write access to the global instance of the page allocator or if there is not enough page tokens satisfying the + /// if it could not obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying the /// requested criteria. pub fn acquire_continous_pages(number_of_pages: usize, page_size: PageSize) -> Result>, Error> { let pages = Self::try_write(|page_allocator| Ok(page_allocator.acquire(number_of_pages, page_size)))?; From 431492d95c6a8ccece0210b17441b73b6b972b20 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:50:16 +0100 Subject: [PATCH 07/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 936407a..973ee2b 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -114,7 +114,7 @@ impl<'a> PageAllocator { Ok(pages) } - /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent of deallocation of the + /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent to deallocation of the /// physical memory region owned by the returned page tokens. /// /// TODO: to prevent fragmentation, run a procedure that will try to combine page tokens of smaller sizes into page tokens of bigger From 832b0947fa25512f0a81107add8097331e9fd5a6 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:50:34 +0100 Subject: [PATCH 08/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 973ee2b..2dac6de 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -134,7 +134,7 @@ impl<'a> PageAllocator { } /// Returns vector of unallocated page tokens representing a continous memory region. If it failes to find allocation within free pages - /// of the requested size, it divides larger page tokens. Empty vector is returns if there are not enough page tokens in the system that + /// of the requested size, it divides larger page tokens. Empty vector is returned if there are not enough page tokens in the system that /// meet the requested criteria. fn acquire(&mut self, number_of_pages: usize, page_size: PageSize) -> Vec> { let mut available_pages = self.acquire_continous_pages_of_given_size(number_of_pages, page_size); From d78c4101c3a3955a8e54be38a377ef88e96da7df Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:50:42 +0100 Subject: [PATCH 09/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 2dac6de..82f46ff 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -203,7 +203,7 @@ impl<'a> PageAllocator { } } - /// Tries to divide a page of the given size into smaller pages. Returns false if there is no page of the given size of the given size + /// Tries to divide a page of the given size into smaller pages. Returns false if there is no page of the given size or the given size /// is the smallest possible page size supported by the architecture. fn divide_page(&mut self, from_size: PageSize) -> bool { from_size From 227344ede6110acb722c7fa696505bfd90cfc2fd Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:50:49 +0100 Subject: [PATCH 10/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 82f46ff..40a8f65 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -196,7 +196,7 @@ impl<'a> PageAllocator { // loop. page_size_to_divide_next = page_size_to_divide_now.smaller(); } else { - // in the case when there is no more page tokens in the system, the page_size_to_divide becomes eventually None and + // in the case when there are no more page tokens in the system, the page_size_to_divide becomes eventually None and // exits the while loop. page_size_to_divide_next = page_size_to_divide_now.larger(); } From 3a18cb6a949c4f74c7517008ed11eb34bfb6709e Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Fri, 9 Feb 2024 14:50:58 +0100 Subject: [PATCH 11/13] Update security-monitor/src/core/page_allocator/page_allocator.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Lennard Gäher <33029057+lgaeher@users.noreply.github.com> --- security-monitor/src/core/page_allocator/page_allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 40a8f65..5c10108 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -157,8 +157,8 @@ impl<'a> PageAllocator { return vec![]; } - // Checks if consecutive pages at the given range compose a continous memory region. The assumption is that pages are sorted. - // Thus, it is enough to look check if all neighboring page tokens compose a continous memory region. + // Checks if consecutive pages at the given range compose a continuous memory region. The assumption is that pages are sorted. + // Thus, it is enough to check if all neighbouring page tokens compose a continuous memory region. let is_memory_region_continous = |pages: &mut Vec>, start_index: usize, end_index: usize| { (start_index..(end_index - 1)) .map(|page_index| pages[page_index].end_address() == pages[page_index + 1].start_address()) From 1c1eab93607799d167b68e3af79e60e0c8dbf06c Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Mon, 12 Feb 2024 09:23:13 -0600 Subject: [PATCH 12/13] Changed the algorithm of creating page tokens to make sure that pages are aligned to their size as required by the riscv spec Signed-off-by: Wojciech Ozga --- .../src/core/page_allocator/page_allocator.rs | 175 +++++++++++------- security-monitor/src/error.rs | 1 - 2 files changed, 106 insertions(+), 70 deletions(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 5c10108..7115801 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -10,104 +10,143 @@ use alloc::vec; use alloc::vec::Vec; use spin::{Once, RwLock, RwLockWriteGuard}; -const NOT_INITIALIZED_PAGE_ALLOCATOR: &str = "Bug. Could not access page allocator because it is not initialized"; - -/// A static global structure containing unallocated pages. Once<> guarantees that it the PageAllocator can only be initialized once. +/// A static global structure containing unallocated pages. Once<> guarantees that the PageAllocator can only be initialized once. static PAGE_ALLOCATOR: Once> = Once::new(); -/// The `PageAllocator`'s job is to pass ownership of free pages residing in the confidential memory. It guarantees that a physical page is not -/// allocated twice. It does so by giving away `Page` tokens that represent ownership of a physical page located in the confidental memory as described by `MemoryLayout`. -/// `PageAllocator`'s constructor creates page tokens (maintaining an invariant that there are no two page tokens describing the same physical -/// address). +/// The `PageAllocator`'s job is to pass ownership of free pages residing in the confidential memory. It guarantees that a physical page is +/// not allocated twice. It does so by giving away `Page` tokens that represent ownership of a physical page located in the confidental +/// memory as described by `MemoryLayout`. `PageAllocator`'s constructor creates page tokens (maintaining an invariant that there are no two +/// page tokens describing the same physical address). pub struct PageAllocator { map: BTreeMap>>, } impl<'a> PageAllocator { - /// Initializes the global instance of a `PageAllocator`. Returns error if it has already been initialized. + // Usually there are 512 pages of size x that can fit in a single page of size y, where y is next page size larger than x (e.g., 2MiB + // and 4KiB). + const EXPECTED_NUMBER_OF_TOKENS_PER_SIZE: usize = 512; + const NOT_INITIALIZED: &str = "Bug. Could not access page allocator because it is not initialized"; + + /// Initializes the global instance of a `PageAllocator`. Returns error if the `PageAllocator` has already been initialized. /// - /// # Arguments: + /// # Arguments /// - /// See the `PageAllocator::new` for requirements on arguments. + /// See the `PageAllocator::add_memory_region` for requirements on arguments. /// /// # Safety /// - /// See the `PageAllocator::new` for safety requirements. + /// See the `PageAllocator::add_memory_region` for safety requirements. pub unsafe fn initialize(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result<(), Error> { - let page_allocator = unsafe { Self::new(memory_start, memory_end) }?; assure_not!(PAGE_ALLOCATOR.is_completed(), Error::Reinitialization())?; + let mut page_allocator = Self::empty(); + page_allocator.add_memory_region(memory_start, memory_end); PAGE_ALLOCATOR.call_once(|| RwLock::new(page_allocator)); Ok(()) } - /// Constructs the PageAllocator over the memory region defined by start and end addresses. - /// It creates page tokens of unallocated pages. + /// Constructs an empty page allocator that contains no tokens. /// - /// # Arguments: + /// # Guarantees /// - /// `memory_start` address must be aligned to the smallest page size. - /// `memory_end` is one-past-the end of the memory region owned by the `PageAllocator`. The total memory - /// size assigned to the `PageAllocator` must be a multiple of the smallest page size. + /// * The PageAllocator's map contains keys for every possible page size. + fn empty() -> Self { + let mut map = BTreeMap::new(); + for page_size in PageSize::all_from_largest_to_smallest() { + let page_tokens = Vec::<_>::with_capacity(Self::EXPECTED_NUMBER_OF_TOKENS_PER_SIZE); + map.insert(page_size.clone(), page_tokens); + } + Self { map } + } + + /// Adds a physial memory region to the PageAllocator. The ownership over this memory region is passed from the caller to the + /// PageAllocator. This function constructs page tokens over this memory region and stores them in the PageAllocator. + /// + /// # Arguments + /// + /// `memory_region_start` address must be aligned to the smallest page size and lower than `memory_region_end`. + /// `memory_region_end` address must be aligned to the smallest page size. This address is one-past-the end of the memory region whose + /// ownership is given to the `PageAllocator`. /// /// # Guarantees /// - /// * The map contains keys for every possible page size. /// * There are no two page tokens that describe the same memory address. /// /// # Safety /// - /// This function must only be called once during the system lifecycle. The caller must guarantee - /// that the PageAllocator becomes the exclusive owner of the memory region described by the input - /// arguments. - unsafe fn new(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result { - debug!("Memory tracker {:x}-{:x}", memory_start.as_usize(), memory_end as usize); - assert!(memory_start.is_aligned_to(PageSize::smallest().in_bytes())); - assert!(memory_start.offset_from(memory_end) as usize % PageSize::smallest().in_bytes() == 0); + /// The caller must guarantee that he passes the ownership to the memory region described by the input arguments to the PageAllocator. + unsafe fn add_memory_region(&mut self, memory_region_start: ConfidentialMemoryAddress, memory_region_end: *const usize) { + debug!("Memory tracker: adding memory region: 0x{:x} - 0x{:x}", memory_region_start.as_usize(), memory_region_end as usize); + assert!(memory_region_start.is_aligned_to(PageSize::smallest().in_bytes())); + assert!(memory_region_end.is_aligned_to(PageSize::smallest().in_bytes())); + assert!(memory_region_start.as_usize() < memory_region_end as usize); - let mut map = BTreeMap::new(); + // Our strategy is to create as few page tokens as possible to keep the memory overhead as low as possible. Therefore, we prefer to + // create page tokens for the largest page size when possible. We use a greedy approach. We look for the largest possible page that + // can be accomodated for the given address and create a page token for it. We start with the smallest possible page size and then + // keep increasing it until we find the largest possible page size. Then, we keep decreasing the page size until we reach the end of + // the memory region. let memory_layout = MemoryLayout::read(); + let mut memory_address = Some(memory_region_start); + let mut page_size = PageSize::smallest(); - let mut next_address = Ok(memory_start); - for page_size in PageSize::all_from_largest_to_smallest() { - let page_tokens = match next_address { - Ok(ref mut address) => { - let page_tokens = Self::create_page_tokens(address, memory_end, page_size)?; - let occupied_memory_in_bytes = page_tokens.len() * page_size.in_bytes(); - next_address = memory_layout.confidential_address_at_offset_bounded(address, occupied_memory_in_bytes, memory_end); - page_tokens - } - Err(_) => Vec::<_>::with_capacity(512), - }; - debug!("Created {} page tokens of size {:?}", page_tokens.len(), page_size); - map.insert(page_size.clone(), page_tokens); - } + // We might have to create a few tokens of 4KiB until we reach the address at which we can fit a 2MiB page. Then, we might have to + // create a few tokens for 2MiB pages until we get the address where 1 GiB page would fit. Consider the following example, + // where we first create 7x 4 KiB tokens (++), then 3x 2 MiB tokens (**), and only then start creating 1 GiB tokens (##). + // + // ++ ++ ++ ++ ++ ++ ++ *********************** *********************** *********************** #### + // || | | | | | | | || | | | | | | | || | | | | | | | || | | | | | | | || ... + // ^memory_region_start ^2 MiB ^2 MiB ^2 MiB ^1GiB + // + // At certain point we will not be able to fit more page tokens of the highest size (1GiB in our example) because remaining space + // will be lower than the used page size. We might, however, still fit tokens of smaller sizes. This will be a analogous (but + // opposite) situation to the one presented above. According to the following example, we will fit 3x 2 MiB (**) and 4x 4 KiB (++) + // page tokens to the remaining memory region. + // + // *********************** *********************** *********************** ++ ++ ++ ++ + // || | | | | | | | || | | | | | | | || | | | | | | | || | | | | | | | || ... + // ^1 GiB ^2 MiB ^2 MiB ^2 MiB ^memory_region_end - Ok(Self { map }) - } + // According to the RISC-V spec, pages must be aligned to their size. + let is_address_page_aligned = + |address: &ConfidentialMemoryAddress, page_size: &PageSize| address.is_aligned_to(page_size.in_bytes()); + // Page can be created only if all bytes are belonging to the given memory region + let can_create_page = |address: &ConfidentialMemoryAddress, page_size: &PageSize| { + let page_last_address = page_size.in_bytes() - 1; + memory_layout.confidential_address_at_offset_bounded(&address, page_last_address, memory_region_end).is_ok() + }; - /// Creates page tokens of the given size over the given memory region. - unsafe fn create_page_tokens( - memory_start: &mut ConfidentialMemoryAddress, memory_end: *const usize, page_size: PageSize, - ) -> Result>, Error> { - let memory_layout = MemoryLayout::read(); - let free_memory_in_bytes = usize::try_from(memory_start.offset_from(memory_end))?; - let number_of_new_pages = free_memory_in_bytes / page_size.in_bytes(); - (0..number_of_new_pages) - .map(|page_number| { - let page_offset_in_bytes = page_number * page_size.in_bytes(); - let address = memory_layout.confidential_address_at_offset_bounded(memory_start, page_offset_in_bytes, memory_end)?; - // Safety: It is safe to create this page token here if: - // 1) this `MemoryTracker` constructor is guaranteed to be called only once - // during the system lifetime - // 2) all pages created here are guaranteed to be disjoint. - Ok(Page::::init(address, page_size.clone())) - }) - .collect::, Error>>() + while let Some(address) = memory_address.take() { + // Let's find the largest possible size of a page that could align to this address. + while let Some(larger_size) = page_size.larger().filter(|larger_size| is_address_page_aligned(&address, &larger_size)) { + page_size = larger_size; + } + // Now let's find the largest size of a page that really fits in the given memory region. We do not have to check the alignment, + // because the smallest pages sizes are multiplies of the larger page sizes. + while let Some(smaller_size) = page_size.smaller().filter(|smaller_size| !can_create_page(&address, &smaller_size)) { + page_size = smaller_size; + } + // The following line ensures that the while loop will complete because, regardless of whether we manage to create a page token + // or not, we will increment the `memory_address` in each loop so that it eventually passes the end of the given memory region. + memory_address = memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes(), memory_region_end).ok(); + // If the next memory address (`memory_address`) is still in the memory range, then we are sure we can create the page token. + // Otherwise, we must check the boundary condition: Are we creating the last page token over a memory whose last byte + // (`address`+`page_size.in_bytes()`) is next to the end of the memory region (`memory_region_end`)? + if memory_address.is_some() || can_create_page(&address, &page_size) { + let new_page_token = Page::::init(address, page_size.clone()); + // Below unwrap is safe because the PageAllocator constructor guarantees the initialization of the map for all possible page + // sizes. + self.map.get_mut(&page_size).unwrap().push(new_page_token); + } + } + + self.map.iter().for_each(|(page_size, tokens)| { + debug!("Created {} page tokens of size {:?}", tokens.len(), page_size); + }) } /// Returns page tokens that all together have ownership over a continous unallocated memory region of the requested size. Returns error - /// if it could not obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying the - /// requested criteria. + /// if it could not obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying + /// the requested criteria. pub fn acquire_continous_pages(number_of_pages: usize, page_size: PageSize) -> Result>, Error> { let pages = Self::try_write(|page_allocator| Ok(page_allocator.acquire(number_of_pages, page_size)))?; assure_not!(pages.is_empty(), Error::OutOfMemory())?; @@ -134,8 +173,8 @@ impl<'a> PageAllocator { } /// Returns vector of unallocated page tokens representing a continous memory region. If it failes to find allocation within free pages - /// of the requested size, it divides larger page tokens. Empty vector is returned if there are not enough page tokens in the system that - /// meet the requested criteria. + /// of the requested size, it divides larger page tokens. Empty vector is returned if there are not enough page tokens in the system + /// that meet the requested criteria. fn acquire(&mut self, number_of_pages: usize, page_size: PageSize) -> Vec> { let mut available_pages = self.acquire_continous_pages_of_given_size(number_of_pages, page_size); // it might be that there is not enough page tokens of the requested page size. In such a case, let's try to divide page tokens of @@ -160,9 +199,7 @@ impl<'a> PageAllocator { // Checks if consecutive pages at the given range compose a continuous memory region. The assumption is that pages are sorted. // Thus, it is enough to check if all neighbouring page tokens compose a continuous memory region. let is_memory_region_continous = |pages: &mut Vec>, start_index: usize, end_index: usize| { - (start_index..(end_index - 1)) - .map(|page_index| pages[page_index].end_address() == pages[page_index + 1].start_address()) - .fold(true, |accumulator, value| accumulator && value) + (start_index..(end_index - 1)).all(|page_index| pages[page_index].end_address() == pages[page_index + 1].start_address()) }; let mut allocated_pages = Vec::with_capacity(number_of_pages); @@ -222,6 +259,6 @@ impl<'a> PageAllocator { /// returns a mutable reference to the PageAllocator after obtaining a lock on the mutex fn try_write(op: O) -> Result where O: FnOnce(&mut RwLockWriteGuard<'static, PageAllocator>) -> Result { - op(&mut PAGE_ALLOCATOR.get().expect(NOT_INITIALIZED_PAGE_ALLOCATOR).write()) + op(&mut PAGE_ALLOCATOR.get().expect(Self::NOT_INITIALIZED).write()) } } diff --git a/security-monitor/src/error.rs b/security-monitor/src/error.rs index 2a64afc..2f40862 100644 --- a/security-monitor/src/error.rs +++ b/security-monitor/src/error.rs @@ -64,7 +64,6 @@ pub enum Error { ReachedMaxNumberOfRemoteHartRequests(), #[error("Sending interrupt error")] InterruptSendingError(), - // SBI HSM extension related errors #[error("Cannot start a confidential hart because it is not in the Stopped state.")] CannotStartNotStoppedHart(), From 97b2231998c37ea4bc927ad844cb484be53f3862 Mon Sep 17 00:00:00 2001 From: Wojciech Ozga Date: Mon, 12 Feb 2024 09:52:35 -0600 Subject: [PATCH 13/13] minor changes in comments Signed-off-by: Wojciech Ozga --- security-monitor/src/core/page_allocator/page_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security-monitor/src/core/page_allocator/page_allocator.rs b/security-monitor/src/core/page_allocator/page_allocator.rs index 7115801..056d7fe 100644 --- a/security-monitor/src/core/page_allocator/page_allocator.rs +++ b/security-monitor/src/core/page_allocator/page_allocator.rs @@ -121,7 +121,7 @@ impl<'a> PageAllocator { page_size = larger_size; } // Now let's find the largest size of a page that really fits in the given memory region. We do not have to check the alignment, - // because the smallest pages sizes are multiplies of the larger page sizes. + // because the larger pages sizes are multiplies of the smaller page sizes. while let Some(smaller_size) = page_size.smaller().filter(|smaller_size| !can_create_page(&address, &smaller_size)) { page_size = smaller_size; }