From 06d9dbf1f44bf1ce64893b3692536f3805261d03 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Tue, 22 Aug 2023 19:07:02 -0700 Subject: [PATCH] Allow page allocation requests to specify a desired alignment (#1029) * The new `AllocationRequest::AlignedAt` variant can accept an alignment value, specified in number of 4K pages (not bytes). * This is needed to support easier allocation of huge pages, which have a large alignment requirement, e.g., a 2MiB huge page requires a contiguous 512-page allocation aligned to that same boundary of 512 normal 4K pages (512 * 4KiB = 2MiB). --- Cargo.lock | 10 +++ Cargo.toml | 1 + .../test_aligned_page_allocation/Cargo.toml | 15 ++++ .../test_aligned_page_allocation/src/lib.rs | 45 ++++++++++++ kernel/memory/src/lib.rs | 4 ++ kernel/memory_structs/src/lib.rs | 10 +++ kernel/page_allocator/src/lib.rs | 68 ++++++++++++++----- theseus_features/Cargo.toml | 2 + 8 files changed, 139 insertions(+), 16 deletions(-) create mode 100644 applications/test_aligned_page_allocation/Cargo.toml create mode 100644 applications/test_aligned_page_allocation/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index d526ba33f7..b97a4b5912 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3757,6 +3757,15 @@ dependencies = [ "sync_irq", ] +[[package]] +name = "test_aligned_page_allocation" +version = "0.1.0" +dependencies = [ + "app_io", + "log", + "memory", +] + [[package]] name = "test_async" version = "0.1.0" @@ -4047,6 +4056,7 @@ dependencies = [ "seconds_counter", "shell", "swap", + "test_aligned_page_allocation", "test_async", "test_backtrace", "test_block_io", diff --git a/Cargo.toml b/Cargo.toml index 721c060cb5..56bdae404f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,7 @@ exclude = [ ## Exclude application crates used for testing specific Theseus functionality. ## TODO: move these to a specific "tests" folder so we can exclude that entire folder. + "applications/test_aligned_page_allocation", "applications/test_backtrace", "applications/test_block_io", "applications/test_channel", diff --git a/applications/test_aligned_page_allocation/Cargo.toml b/applications/test_aligned_page_allocation/Cargo.toml new file mode 100644 index 0000000000..a81b8f9f54 --- /dev/null +++ b/applications/test_aligned_page_allocation/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "test_aligned_page_allocation" +version = "0.1.0" +description = "Tests the `AllocationRequest::AlignedTo` variant, which is needed for huge pages" +authors = ["Kevin Boos "] +edition = "2021" + +[dependencies] +log = "0.4.8" + +[dependencies.memory] +path = "../../kernel/memory" + +[dependencies.app_io] +path = "../../kernel/app_io" diff --git a/applications/test_aligned_page_allocation/src/lib.rs b/applications/test_aligned_page_allocation/src/lib.rs new file mode 100644 index 0000000000..ebe99e7c5f --- /dev/null +++ b/applications/test_aligned_page_allocation/src/lib.rs @@ -0,0 +1,45 @@ +//! A set of basic tests for the [`AllocationRequest::AlignedTo`] variant. + +#![no_std] + +extern crate alloc; + +use alloc::{ + vec::Vec, + string::String, +}; +use app_io::println; +use memory::AllocationRequest; + +static TEST_SET: [usize; 9] = [1, 2, 4, 8, 27, 48, 256, 512, 1024]; + +pub fn main(_args: Vec) -> isize { + match rmain() { + Ok(_) => 0, + Err(e) => { + println!("Error: {}", e); + -1 + } + } +} + +fn rmain() -> Result<(), &'static str> { + for num_pages in TEST_SET.into_iter() { + for alignment in TEST_SET.into_iter() { + println!("Attempting to allocate {num_pages} pages with alignment of {alignment} 4K pages..."); + match memory::allocate_pages_deferred( + AllocationRequest::AlignedTo { alignment_4k_pages: alignment }, + num_pages, + ) { + Ok((ap, _action)) => { + assert_eq!(ap.start().number() % alignment, 0); + assert_eq!(ap.size_in_pages(), num_pages); + println!(" Success: {ap:?}"); + } + Err(e) => println!(" !! FAILURE: {e:?}"), + } + } + } + + Ok(()) +} diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index 600e3063da..a3161bd58e 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -26,6 +26,8 @@ pub use memory_structs::*; pub use page_allocator::{ AllocatedPages, AllocationRequest, + allocate_pages_deferred, + allocate_pages_by_bytes_deferred, allocate_pages, allocate_pages_at, allocate_pages_by_bytes, @@ -37,6 +39,8 @@ pub use page_allocator::{ pub use frame_allocator::{ AllocatedFrames, UnmappedFrames, + allocate_frames_deferred, + allocate_frames_by_bytes_deferred, allocate_frames, allocate_frames_at, allocate_frames_by_bytes, diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index d7511fcd80..3563eae66a 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -7,6 +7,7 @@ #![no_std] #![feature(step_trait)] +#![feature(int_roundings)] #![allow(incomplete_features)] #![feature(adt_const_params)] @@ -287,6 +288,15 @@ macro_rules! implement_page_frame { number: addr.value() / PAGE_SIZE, } } + + #[doc = "Returns a new `" $TypeName "` that is aligned up from this \ + `" $TypeName "` to the nearest multiple of `alignment_4k_pages`."] + #[doc(alias = "next_multiple_of")] + pub const fn align_up(&self, alignment_4k_pages: usize) -> $TypeName { + $TypeName { + number: self.number.next_multiple_of(alignment_4k_pages) + } + } } impl fmt::Debug for $TypeName { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index fb632a56a5..cdb476c768 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -38,7 +38,7 @@ use static_array_rb_tree::*; /// Certain regions are pre-designated for special usage, specifically the kernel's initial identity mapping. -/// They will be allocated from if an address within them is specifically requested; +/// They will be allocated from if an address within them is specifically /// otherwise, they will only be allocated from as a "last resort" if all other non-designated address ranges are exhausted. /// /// Any virtual addresses **less than or equal** to this address are considered "designated". @@ -536,10 +536,15 @@ fn find_specific_chunk( /// If no range is specified, this function first attempts to find a suitable chunk /// that is **not** within the designated regions, /// and only allocates from the designated regions as a backup option. +/// +/// If an alignment is specified (in terms of number of 4KiB pages), then the starting page +/// in the allocated range must be aligned to that number of pages. +/// If no specific alignment is needed, the default aligment of 1 page should be used. fn find_any_chunk( list: &mut StaticArrayRBTree, num_pages: usize, within_range: Option<&PageRange>, + alignment_4k_pages: usize, ) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { let designated_low_end = DESIGNATED_PAGES_LOW_END.get() .ok_or(AllocationError::NotInitialized)?; @@ -555,7 +560,8 @@ fn find_any_chunk( if let Some(chunk) = elem { // Use max and min below to ensure that the range of pages we allocate from // is within *both* the current chunk's bounds and the range's bounds. - let lowest_possible_start_page = *max(chunk.start(), range.start()); + let lowest_possible_start_page = max(chunk.start(), range.start()) + .align_up(alignment_4k_pages); let highest_possible_end_page = *min(chunk.end(), range.end()); if lowest_possible_start_page + num_pages <= highest_possible_end_page { return adjust_chosen_chunk( @@ -589,7 +595,8 @@ fn find_any_chunk( while let Some(chunk) = cursor.get().map(|w| w.deref()) { // Use max and min below to ensure that the range of pages we allocate from // is within *both* the current chunk's bounds and the range's bounds. - let lowest_possible_start_page = *max(chunk.start(), range.start()); + let lowest_possible_start_page = max(chunk.start(), range.start()) + .align_up(alignment_4k_pages); let highest_possible_end_page = *min(chunk.end(), range.end()); if lowest_possible_start_page + num_pages <= highest_possible_end_page { return adjust_chosen_chunk( @@ -621,8 +628,14 @@ fn find_any_chunk( Inner::Array(ref mut arr) => { for elem in arr.iter_mut() { if let Some(chunk) = elem { - if num_pages <= chunk.size_in_pages() { - return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::Array(elem)); + let lowest_possible_start_page = chunk.start().align_up(alignment_4k_pages); + if lowest_possible_start_page + num_pages <= *chunk.end() { + return adjust_chosen_chunk( + lowest_possible_start_page, + num_pages, + &chunk.clone(), + ValueRefMut::Array(elem), + ); } } } @@ -644,8 +657,14 @@ fn find_any_chunk( // The first iterates over the lower designated region, from higher addresses to lower, down to zero. let mut cursor = tree.upper_bound_mut(Bound::Included(designated_low_end)); while let Some(chunk) = cursor.get().map(|w| w.deref()) { - if num_pages < chunk.size_in_pages() { - return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::RBTree(cursor)); + let lowest_possible_start_page = chunk.start().align_up(alignment_4k_pages); + if lowest_possible_start_page + num_pages <= *chunk.end() { + return adjust_chosen_chunk( + lowest_possible_start_page, + num_pages, + &chunk.clone(), + ValueRefMut::RBTree(cursor), + ); } cursor.move_prev(); } @@ -657,8 +676,14 @@ fn find_any_chunk( // we already iterated over non-designated pages in the first match statement above, so we're out of memory. break; } - if num_pages < chunk.size_in_pages() { - return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::RBTree(cursor)); + let lowest_possible_start_page = chunk.start().align_up(alignment_4k_pages); + if lowest_possible_start_page + num_pages <= *chunk.end() { + return adjust_chosen_chunk( + lowest_possible_start_page, + num_pages, + &chunk.clone(), + ValueRefMut::RBTree(cursor), + ); } cursor.move_prev(); } @@ -729,23 +754,31 @@ fn adjust_chosen_chunk( } -/// Possible options when requested pages from the page allocator. +/// Possible options when requesting pages from the page allocator. pub enum AllocationRequest<'r> { - /// The allocated pages can be located at any virtual address. - Any, /// The allocated pages must start exactly at the given `VirtualAddress`. AtVirtualAddress(VirtualAddress), + /// The allocated pages may be located at any virtual address, + /// but the starting page must be aligned to a multiple of `alignment_4k_pages`. + /// An alignment of `1` page is equivalent to specifying no alignment requirement. + /// + /// Note: alignment is specified in number of 4KiB pages, not number of bytes. + AlignedTo { alignment_4k_pages: usize }, /// The allocated pages can be located anywhere within the given range. WithinRange(&'r PageRange), + /// The allocated pages can be located at any virtual address + /// and have no special alignment requirements beyond a single page. + Any, } + /// The core page allocation routine that allocates the given number of virtual pages, /// optionally at the requested starting `VirtualAddress`. /// /// This simply reserves a range of virtual addresses, it does not allocate /// actual physical memory frames nor do any memory mapping. /// Thus, the returned `AllocatedPages` aren't directly usable until they are mapped to physical frames. -/// +/// /// Allocation is based on a red-black tree and is thus `O(log(n))`. /// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal. /// @@ -780,11 +813,14 @@ pub fn allocate_pages_deferred( AllocationRequest::AtVirtualAddress(vaddr) => { find_specific_chunk(&mut locked_list, Page::containing_address(vaddr), num_pages) } - AllocationRequest::Any => { - find_any_chunk(&mut locked_list, num_pages, None) + AllocationRequest::AlignedTo { alignment_4k_pages } => { + find_any_chunk(&mut locked_list, num_pages, None, alignment_4k_pages) } AllocationRequest::WithinRange(range) => { - find_any_chunk(&mut locked_list, num_pages, Some(range)) + find_any_chunk(&mut locked_list, num_pages, Some(range), 1) + } + AllocationRequest::Any => { + find_any_chunk(&mut locked_list, num_pages, None, 1) } }; res.map_err(From::from) // convert from AllocationError to &str diff --git a/theseus_features/Cargo.toml b/theseus_features/Cargo.toml index 15d3ee2d6c..b4f504aea4 100644 --- a/theseus_features/Cargo.toml +++ b/theseus_features/Cargo.toml @@ -47,6 +47,7 @@ hello = { path = "../applications/hello", optional = true } raw_mode = { path = "../applications/raw_mode", optional = true } print_fault_log = { path = "../applications/print_fault_log", optional = true } seconds_counter = { path = "../applications/seconds_counter", optional = true } +test_aligned_page_allocation = { path = "../applications/test_aligned_page_allocation", optional = true } test_async = { path = "../applications/test_async", optional = true } test_backtrace = { path = "../applications/test_backtrace", optional = true } test_block_io = { path = "../applications/test_block_io", optional = true } @@ -146,6 +147,7 @@ theseus_tests = [ "hello", "raw_mode", "seconds_counter", + "test_aligned_page_allocation", "test_async", "test_backtrace", "test_block_io",