Skip to content

Commit

Permalink
Allow page allocation requests to specify a desired alignment (theseu…
Browse files Browse the repository at this point in the history
…s-os#1029)

* The new `AllocationRequest::AlignedAt` variant can accept an
  alignment value, specified in number of 4K pages (not bytes).

* This is needed to support easier allocation of huge pages,
  which have a large alignment requirement, e.g., a 2MiB huge page
  requires a contiguous 512-page allocation aligned to that
  same boundary of 512 normal 4K pages (512 * 4KiB = 2MiB).
  • Loading branch information
kevinaboos authored and tsoutsman committed Sep 6, 2023
1 parent 379c2f3 commit 06d9dbf
Show file tree
Hide file tree
Showing 8 changed files with 139 additions and 16 deletions.
10 changes: 10 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ exclude = [

## Exclude application crates used for testing specific Theseus functionality.
## TODO: move these to a specific "tests" folder so we can exclude that entire folder.
"applications/test_aligned_page_allocation",
"applications/test_backtrace",
"applications/test_block_io",
"applications/test_channel",
Expand Down
15 changes: 15 additions & 0 deletions applications/test_aligned_page_allocation/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
[package]
name = "test_aligned_page_allocation"
version = "0.1.0"
description = "Tests the `AllocationRequest::AlignedTo` variant, which is needed for huge pages"
authors = ["Kevin Boos <kevinaboos@gmail.com>"]
edition = "2021"

[dependencies]
log = "0.4.8"

[dependencies.memory]
path = "../../kernel/memory"

[dependencies.app_io]
path = "../../kernel/app_io"
45 changes: 45 additions & 0 deletions applications/test_aligned_page_allocation/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
//! A set of basic tests for the [`AllocationRequest::AlignedTo`] variant.

#![no_std]

extern crate alloc;

use alloc::{
vec::Vec,
string::String,
};
use app_io::println;
use memory::AllocationRequest;

static TEST_SET: [usize; 9] = [1, 2, 4, 8, 27, 48, 256, 512, 1024];

pub fn main(_args: Vec<String>) -> isize {
match rmain() {
Ok(_) => 0,
Err(e) => {
println!("Error: {}", e);
-1
}
}
}

fn rmain() -> Result<(), &'static str> {
for num_pages in TEST_SET.into_iter() {
for alignment in TEST_SET.into_iter() {
println!("Attempting to allocate {num_pages} pages with alignment of {alignment} 4K pages...");
match memory::allocate_pages_deferred(
AllocationRequest::AlignedTo { alignment_4k_pages: alignment },
num_pages,
) {
Ok((ap, _action)) => {
assert_eq!(ap.start().number() % alignment, 0);
assert_eq!(ap.size_in_pages(), num_pages);
println!(" Success: {ap:?}");
}
Err(e) => println!(" !! FAILURE: {e:?}"),
}
}
}

Ok(())
}
4 changes: 4 additions & 0 deletions kernel/memory/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ pub use memory_structs::*;
pub use page_allocator::{
AllocatedPages,
AllocationRequest,
allocate_pages_deferred,
allocate_pages_by_bytes_deferred,
allocate_pages,
allocate_pages_at,
allocate_pages_by_bytes,
Expand All @@ -37,6 +39,8 @@ pub use page_allocator::{
pub use frame_allocator::{
AllocatedFrames,
UnmappedFrames,
allocate_frames_deferred,
allocate_frames_by_bytes_deferred,
allocate_frames,
allocate_frames_at,
allocate_frames_by_bytes,
Expand Down
10 changes: 10 additions & 0 deletions kernel/memory_structs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

#![no_std]
#![feature(step_trait)]
#![feature(int_roundings)]
#![allow(incomplete_features)]
#![feature(adt_const_params)]

Expand Down Expand Up @@ -287,6 +288,15 @@ macro_rules! implement_page_frame {
number: addr.value() / PAGE_SIZE,
}
}

#[doc = "Returns a new `" $TypeName "` that is aligned up from this \
`" $TypeName "` to the nearest multiple of `alignment_4k_pages`."]
#[doc(alias = "next_multiple_of")]
pub const fn align_up(&self, alignment_4k_pages: usize) -> $TypeName {
$TypeName {
number: self.number.next_multiple_of(alignment_4k_pages)
}
}
}
impl fmt::Debug for $TypeName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Expand Down
68 changes: 52 additions & 16 deletions kernel/page_allocator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use static_array_rb_tree::*;


/// Certain regions are pre-designated for special usage, specifically the kernel's initial identity mapping.
/// They will be allocated from if an address within them is specifically requested;
/// They will be allocated from if an address within them is specifically
/// otherwise, they will only be allocated from as a "last resort" if all other non-designated address ranges are exhausted.
///
/// Any virtual addresses **less than or equal** to this address are considered "designated".
Expand Down Expand Up @@ -536,10 +536,15 @@ fn find_specific_chunk(
/// If no range is specified, this function first attempts to find a suitable chunk
/// that is **not** within the designated regions,
/// and only allocates from the designated regions as a backup option.
///
/// If an alignment is specified (in terms of number of 4KiB pages), then the starting page
/// in the allocated range must be aligned to that number of pages.
/// If no specific alignment is needed, the default aligment of 1 page should be used.
fn find_any_chunk(
list: &mut StaticArrayRBTree<Chunk>,
num_pages: usize,
within_range: Option<&PageRange>,
alignment_4k_pages: usize,
) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> {
let designated_low_end = DESIGNATED_PAGES_LOW_END.get()
.ok_or(AllocationError::NotInitialized)?;
Expand All @@ -555,7 +560,8 @@ fn find_any_chunk(
if let Some(chunk) = elem {
// Use max and min below to ensure that the range of pages we allocate from
// is within *both* the current chunk's bounds and the range's bounds.
let lowest_possible_start_page = *max(chunk.start(), range.start());
let lowest_possible_start_page = max(chunk.start(), range.start())
.align_up(alignment_4k_pages);
let highest_possible_end_page = *min(chunk.end(), range.end());
if lowest_possible_start_page + num_pages <= highest_possible_end_page {
return adjust_chosen_chunk(
Expand Down Expand Up @@ -589,7 +595,8 @@ fn find_any_chunk(
while let Some(chunk) = cursor.get().map(|w| w.deref()) {
// Use max and min below to ensure that the range of pages we allocate from
// is within *both* the current chunk's bounds and the range's bounds.
let lowest_possible_start_page = *max(chunk.start(), range.start());
let lowest_possible_start_page = max(chunk.start(), range.start())
.align_up(alignment_4k_pages);
let highest_possible_end_page = *min(chunk.end(), range.end());
if lowest_possible_start_page + num_pages <= highest_possible_end_page {
return adjust_chosen_chunk(
Expand Down Expand Up @@ -621,8 +628,14 @@ fn find_any_chunk(
Inner::Array(ref mut arr) => {
for elem in arr.iter_mut() {
if let Some(chunk) = elem {
if num_pages <= chunk.size_in_pages() {
return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::Array(elem));
let lowest_possible_start_page = chunk.start().align_up(alignment_4k_pages);
if lowest_possible_start_page + num_pages <= *chunk.end() {
return adjust_chosen_chunk(
lowest_possible_start_page,
num_pages,
&chunk.clone(),
ValueRefMut::Array(elem),
);
}
}
}
Expand All @@ -644,8 +657,14 @@ fn find_any_chunk(
// The first iterates over the lower designated region, from higher addresses to lower, down to zero.
let mut cursor = tree.upper_bound_mut(Bound::Included(designated_low_end));
while let Some(chunk) = cursor.get().map(|w| w.deref()) {
if num_pages < chunk.size_in_pages() {
return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::RBTree(cursor));
let lowest_possible_start_page = chunk.start().align_up(alignment_4k_pages);
if lowest_possible_start_page + num_pages <= *chunk.end() {
return adjust_chosen_chunk(
lowest_possible_start_page,
num_pages,
&chunk.clone(),
ValueRefMut::RBTree(cursor),
);
}
cursor.move_prev();
}
Expand All @@ -657,8 +676,14 @@ fn find_any_chunk(
// we already iterated over non-designated pages in the first match statement above, so we're out of memory.
break;
}
if num_pages < chunk.size_in_pages() {
return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::RBTree(cursor));
let lowest_possible_start_page = chunk.start().align_up(alignment_4k_pages);
if lowest_possible_start_page + num_pages <= *chunk.end() {
return adjust_chosen_chunk(
lowest_possible_start_page,
num_pages,
&chunk.clone(),
ValueRefMut::RBTree(cursor),
);
}
cursor.move_prev();
}
Expand Down Expand Up @@ -729,23 +754,31 @@ fn adjust_chosen_chunk(
}


/// Possible options when requested pages from the page allocator.
/// Possible options when requesting pages from the page allocator.
pub enum AllocationRequest<'r> {
/// The allocated pages can be located at any virtual address.
Any,
/// The allocated pages must start exactly at the given `VirtualAddress`.
AtVirtualAddress(VirtualAddress),
/// The allocated pages may be located at any virtual address,
/// but the starting page must be aligned to a multiple of `alignment_4k_pages`.
/// An alignment of `1` page is equivalent to specifying no alignment requirement.
///
/// Note: alignment is specified in number of 4KiB pages, not number of bytes.
AlignedTo { alignment_4k_pages: usize },
/// The allocated pages can be located anywhere within the given range.
WithinRange(&'r PageRange),
/// The allocated pages can be located at any virtual address
/// and have no special alignment requirements beyond a single page.
Any,
}


/// The core page allocation routine that allocates the given number of virtual pages,
/// optionally at the requested starting `VirtualAddress`.
///
/// This simply reserves a range of virtual addresses, it does not allocate
/// actual physical memory frames nor do any memory mapping.
/// Thus, the returned `AllocatedPages` aren't directly usable until they are mapped to physical frames.
///
///
/// Allocation is based on a red-black tree and is thus `O(log(n))`.
/// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal.
///
Expand Down Expand Up @@ -780,11 +813,14 @@ pub fn allocate_pages_deferred(
AllocationRequest::AtVirtualAddress(vaddr) => {
find_specific_chunk(&mut locked_list, Page::containing_address(vaddr), num_pages)
}
AllocationRequest::Any => {
find_any_chunk(&mut locked_list, num_pages, None)
AllocationRequest::AlignedTo { alignment_4k_pages } => {
find_any_chunk(&mut locked_list, num_pages, None, alignment_4k_pages)
}
AllocationRequest::WithinRange(range) => {
find_any_chunk(&mut locked_list, num_pages, Some(range))
find_any_chunk(&mut locked_list, num_pages, Some(range), 1)
}
AllocationRequest::Any => {
find_any_chunk(&mut locked_list, num_pages, None, 1)
}
};
res.map_err(From::from) // convert from AllocationError to &str
Expand Down
2 changes: 2 additions & 0 deletions theseus_features/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ hello = { path = "../applications/hello", optional = true }
raw_mode = { path = "../applications/raw_mode", optional = true }
print_fault_log = { path = "../applications/print_fault_log", optional = true }
seconds_counter = { path = "../applications/seconds_counter", optional = true }
test_aligned_page_allocation = { path = "../applications/test_aligned_page_allocation", optional = true }
test_async = { path = "../applications/test_async", optional = true }
test_backtrace = { path = "../applications/test_backtrace", optional = true }
test_block_io = { path = "../applications/test_block_io", optional = true }
Expand Down Expand Up @@ -146,6 +147,7 @@ theseus_tests = [
"hello",
"raw_mode",
"seconds_counter",
"test_aligned_page_allocation",
"test_async",
"test_backtrace",
"test_block_io",
Expand Down

0 comments on commit 06d9dbf

Please sign in to comment.