From 7f546f5c3cb7e9c9b962f6af2e692895214cb52b Mon Sep 17 00:00:00 2001 From: NIMogen Date: Tue, 12 Sep 2023 14:20:22 -0700 Subject: [PATCH 1/9] restored changes to page allocator, mapper, page table entry, and flags --- Cargo.lock | 18 + applications/test_1gb_huge_pages/Cargo.toml | 16 + applications/test_1gb_huge_pages/src/lib.rs | 62 +++ applications/test_huge_pages/Cargo.toml | 22 + applications/test_huge_pages/src/lib.rs | 95 ++++ kernel/memory/src/paging/mapper.rs | 407 +++++++++++++----- kernel/nano_core/linker_higher_half-x86_64.ld | 5 +- kernel/page_allocator/src/lib.rs | 249 ++++++++++- kernel/page_table_entry/src/lib.rs | 36 ++ kernel/pte_flags/src/pte_flags_x86_64.rs | 14 +- 10 files changed, 807 insertions(+), 117 deletions(-) create mode 100644 applications/test_1gb_huge_pages/Cargo.toml create mode 100644 applications/test_1gb_huge_pages/src/lib.rs create mode 100644 applications/test_huge_pages/Cargo.toml create mode 100644 applications/test_huge_pages/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2d8455475b..bff4ad05d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3749,6 +3749,14 @@ dependencies = [ "sync_irq", ] +[[package]] +name = "test_1gb_huge_pages" +version = "0.1.0" +dependencies = [ + "app_io", + "memory", +] + [[package]] name = "test_aligned_page_allocation" version = "0.1.0" @@ -3816,6 +3824,16 @@ dependencies = [ "root", ] +[[package]] +name = "test_huge_pages" +version = "0.1.0" +dependencies = [ + "app_io", + "frame_allocator", + "memory", + "page_allocator", +] + [[package]] name = "test_identity_mapping" version = "0.1.0" diff --git a/applications/test_1gb_huge_pages/Cargo.toml b/applications/test_1gb_huge_pages/Cargo.toml new file mode 100644 index 0000000000..49ffde7bdc --- /dev/null +++ b/applications/test_1gb_huge_pages/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "test_1gb_huge_pages" +version = "0.1.0" +authors = ["Noah Mogensen "] +description = "Application for testing allocation, mapping, and memory access for 1gb huge pages." + +[dependencies] + +[dependencies.memory] +path = "../../kernel/memory" + +[dependencies.app_io] +path = "../../kernel/app_io" + +# [dependencies.application_main_fn] +# path = "../../compiler_plugins" diff --git a/applications/test_1gb_huge_pages/src/lib.rs b/applications/test_1gb_huge_pages/src/lib.rs new file mode 100644 index 0000000000..b3b51e97ac --- /dev/null +++ b/applications/test_1gb_huge_pages/src/lib.rs @@ -0,0 +1,62 @@ +#![no_std] +#[macro_use] extern crate alloc; +#[macro_use] extern crate app_io; + +extern crate memory; + +use alloc::vec::Vec; +use alloc::string::String; +use memory::{ + PteFlags, + VirtualAddress, PhysicalAddress, + allocate_frames_at, allocate_pages_at +}; + +pub fn main(_args: Vec) -> isize { + println!("NOTE: Before running, make sure you Theseus has been run with enough memory (make orun QEMU_MEMORY=3G)."); + let kernel_mmi_ref = memory::get_kernel_mmi_ref() + .ok_or("KERNEL_MMI was not yet initialized!") + .unwrap(); + + // Now the same for 1gb pages + let mut aligned_1gb_page = allocate_pages_at( + VirtualAddress::new_canonical(0x40000000), + 512*512).expect("Failed to allocate range for 1GiB page. Make sure you have enough memory for the kernel (compile with make orun QEMU_MEMORY=3G)."); + aligned_1gb_page.to_1gb_allocated_pages(); + + let aligned_4k_frames = allocate_frames_at( + PhysicalAddress::new_canonical(0x40000000), //0x1081A000 + 512 * 512).expect("Failed to allocate enough frames at desired address. Make sure you have enough memory for the kernel (compile with make orun QEMU_MEMORY=3G)."); + + let mut mapped_1gb_page = kernel_mmi_ref.lock().page_table.map_allocated_pages_to( + aligned_1gb_page, + aligned_4k_frames, + PteFlags::new().valid(true).writable(true), + ).expect("test_huge_pages: call to map_allocated_pages failed for 1GiB page"); + + kernel_mmi_ref.lock().page_table.dump_pte(mapped_1gb_page.start_address()); + + // See if address can be translated + let translated = kernel_mmi_ref.lock() + .page_table.translate(VirtualAddress::new_canonical(0x40000000)) + .unwrap(); + println!("Virtual address 0x40000000-> {}", translated); + + let val = mapped_1gb_page + .as_type_mut::(0) + .expect("test huge pages: call to as_type_mut() on mapped 2Mb page failed"); + println!("Value at offset of 1GiB huge page before updating is {}", *val); + + *val = 35; + + let updated_val = mapped_1gb_page + .as_type::(0) + .expect("test huge pages: call to as_type() on mapped 2Mb page failed"); + println!("Value at offset of 1GiB huge page after updating is {}", *updated_val); + + // Dump entries to see if dropping has unmapped everything properly + drop(mapped_1gb_page); + kernel_mmi_ref.lock().page_table.dump_pte(VirtualAddress::new_canonical(0x40000000)); + + 0 +} diff --git a/applications/test_huge_pages/Cargo.toml b/applications/test_huge_pages/Cargo.toml new file mode 100644 index 0000000000..77ebd7db4c --- /dev/null +++ b/applications/test_huge_pages/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "test_huge_pages" +version = "0.1.0" +authors = ["Noah Mogensen "] +description = "Application for testing allocation, mapping, and memory access for 2mb huge pages." + +[dependencies] + +[dependencies.memory] +path = "../../kernel/memory" + +[dependencies.frame_allocator] +path = "../../kernel/frame_allocator" + +[dependencies.page_allocator] +path = "../../kernel/page_allocator" + +[dependencies.app_io] +path = "../../kernel/app_io" + +# [dependencies.application_main_fn] +# path = "../../compiler_plugins" diff --git a/applications/test_huge_pages/src/lib.rs b/applications/test_huge_pages/src/lib.rs new file mode 100644 index 0000000000..ef5b6b244f --- /dev/null +++ b/applications/test_huge_pages/src/lib.rs @@ -0,0 +1,95 @@ +#![no_std] +#[macro_use] extern crate alloc; +#[macro_use] extern crate app_io; + +extern crate memory; +extern crate frame_allocator; +extern crate page_allocator; + +use alloc::vec::Vec; +use alloc::string::String; +use memory::{ + Page, PteFlags, + VirtualAddress, PhysicalAddress, + Page1G, Page2M, + allocate_frames_at, allocate_pages_at +}; +// For checking valid test addresses, if necessary +// use frame_allocator::dump_frame_allocator_state; +// use page_allocator::dump_page_allocator_state; + +pub fn main(_args: Vec) -> isize { + println!("NOTE: Before running, make sure you Theseus has been run with enough memory (make orun QEMU_MEMORY=3G)."); + + let kernel_mmi_ref = memory::get_kernel_mmi_ref() + .ok_or("KERNEL_MMI was not yet initialized!") + .unwrap(); + + // Preliminary tests + let page1g = Page::::containing_address_1gb(VirtualAddress::zero()); + println!("Size of page 1 is {:?}", page1g.page_size()); + let page2m = Page::::containing_address_2mb(VirtualAddress::zero()); + println!("Size of page 2 is {:?}", page2m.page_size()); + + + // match page1g.page_size() { + // PAGE_1GB_SIZE => println!("Page 1 recognized as 1GiB"), + // _ => println!("Page 1 not recognized as 1GiB"), + // } + // match page2m.page_size() { + // PAGE_2MB_SIZE => println!("Page 2 recognized as 2MiB"), + // _ => println!("Page 2 not recognized as 2MiB"), + // } + // let _allocated_1g = allocate_1gb_pages(1) + // .ok_or("test_huge_pages: could not allocate 1GiB page") + // .unwrap(); + // let _allocated_2m = allocate_2mb_pages(1) + // .ok_or("test_huge_pages: could not allocate 2MiB page") + // .unwrap(); + // println!("Huge pages successfully allocated!"); + // // end preliminary tests + + let mut aligned_2mb_page = allocate_pages_at( + VirtualAddress::new_canonical(0x60000000), + 512).expect("Could not allocate pages. Make sure you have enough memory for the kernel (compile with make orun QEMU_MEMORY=3G)."); + aligned_2mb_page.to_2mb_allocated_pages(); + + // frame allocator has not been modified to deal with huge frames yet + let aligned_4k_frames = allocate_frames_at( + PhysicalAddress::new_canonical(0x60000000), + 512).expect("Could not allocate frames. Make sure you have enough memory for the kernel (compile with make orun QEMU_MEMORY=3G)."); + + let mut mapped_2mb_page = kernel_mmi_ref.lock().page_table.map_allocated_pages_to( + aligned_2mb_page, + aligned_4k_frames, + PteFlags::new().valid(true).writable(true), + ).expect("test_huge_pages: call to map_allocated_pages failed"); + + kernel_mmi_ref.lock().page_table.dump_pte(mapped_2mb_page.start_address()); + + // See if address can be translated + let translated = kernel_mmi_ref.lock() + .page_table.translate(mapped_2mb_page.start_address()) + .unwrap(); + println!("Virtual address {} -> {}", mapped_2mb_page.start_address(), translated); + + + // Testing mem access with huge pages + let val = mapped_2mb_page + .as_type_mut::(0) + .expect("test huge pages: call to as_type_mut() on mapped 2Mb page failed"); + println!("Value at offset of 2Mib huge page before updating is {}", *val); + + *val = 35; + + let updated_val = mapped_2mb_page + .as_type::(0) + .expect("test huge pages: call to as_type() on mapped 2Mb page failed"); + println!("Value at offset of 2Mib huge page after updating is {}", *updated_val); + + // Dump entries to see if dropping has unmapped everything properly + drop(mapped_2mb_page); + kernel_mmi_ref.lock().page_table.dump_pte(VirtualAddress::new_canonical(0x20000000)); + + 0 +} diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index e5d03bb1d7..930d4a04db 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -18,7 +18,8 @@ use core::{ slice, }; use log::{error, warn, debug, trace}; -use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames, UnmappedFrames}; +use memory_structs::{Page4K, Page2M, Page1G, MemChunkSize}; +use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, PageRange, FrameRange, AllocatedPages, AllocatedFrames, UnmappedFrames}; use crate::paging::{ get_current_p4, table::{P4, UPCOMING_P4, Table, Level4}, @@ -205,31 +206,81 @@ impl Mapper { // Only the lowest-level P1 entry can be considered exclusive, and only when // we are mapping it exclusively (i.e., owned `AllocatedFrames` are passed in). - let actual_flags = flags + let mut actual_flags = flags .valid(true) .exclusive(Frames::OWNED); let pages_count = pages.size_in_pages(); let frames_count = frames.borrow().size_in_frames(); - if pages_count != frames_count { - error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", - pages, pages_count, frames.borrow(), frames_count - ); - return Err("map_allocated_pages_to(): page count must equal frame count"); - } - // iterate over pages and frames in lockstep - for (page, frame) in pages.range().clone().into_iter().zip(frames.borrow().into_iter()) { - let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); - let p2 = p3.next_table_create(page.p3_index(), higher_level_flags); - let p1 = p2.next_table_create(page.p2_index(), higher_level_flags); - if !p1[page.p1_index()].is_unused() { - error!("map_allocated_pages_to(): page {:#X} -> frame {:#X}, page was already in use!", page.start_address(), frame.start_address()); - return Err("map_allocated_pages_to(): page was already in use"); - } + // Select correct mapping method. + // The different branches are mostly the same. For huge pages an additional flag is set, and + // the frame is mapped to the page table level corresponding page size. + match pages.page_size() { + MemChunkSize::Normal4K => { + // This check is dependent on the page size until size-awareness is added to Frames + if pages_count != frames_count { + error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", + pages, pages_count, frames.borrow(), frames_count + ); + return Err("map_allocated_pages_to(): page count must equal frame count"); + } + + // iterate over pages and frames in lockstep + for (page, frame) in pages.range().clone().into_iter().zip(frames.borrow().into_iter()) { + let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); + let p2 = p3.next_table_create(page.p3_index(), higher_level_flags); + let p1 = p2.next_table_create(page.p2_index(), higher_level_flags); + if !p1[page.p1_index()].is_unused() { + error!("map_allocated_pages_to(): page {:#X} -> frame {:#X}, page was already in use!", page.start_address(), frame.start_address()); + return Err("map_allocated_pages_to(): page was already in use"); + } + + p1[page.p1_index()].set_entry(frame, actual_flags); + } + } + MemChunkSize::Huge2M => { + if pages_count * 512 != frames_count { + error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", + pages, pages_count, frames.borrow(), frames_count + ); + return Err("map_allocated_pages_to(): page count must equal frame count"); + } + // Temporarily define a custom step over the page range until correct behaviour is implemented for huge pages + for (page, frame) in pages.range_2mb().clone().into_iter().zip(frames.borrow().into_iter().step_by(512)) { + actual_flags = actual_flags.huge(true); + let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); + let p2 = p3.next_table_create(page.p3_index(), higher_level_flags); + + if !p2[page.p2_index()].is_unused() { + error!("map_allocated_pages_to(): page {:#X} -> frame {:#X}, page was already in use!", page.start_address(), frame.start_address()); + return Err("map_allocated_pages_to(): page was already in use"); + } + + p2[page.p2_index()].set_entry(frame, actual_flags); + } + } + MemChunkSize::Huge1G => { + if pages_count * (512 * 512) != frames_count { + error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", + pages, pages_count, frames.borrow(), frames_count + ); + return Err("map_allocated_pages_to(): page count must equal frame count"); + } + // Temporarily define a custom step over the page range until correct behaviour is implemented for huge pages + for (page, frame) in pages.range_1gb().clone().into_iter().zip(frames.borrow().into_iter().step_by(512 * 512)) { + actual_flags = actual_flags.huge(true); + let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); + + if !p3[page.p3_index()].is_unused() { + error!("map_allocated_pages_to(): page {:#X} -> frame {:#X}, page was already in use!", page.start_address(), frame.start_address()); + return Err("map_allocated_pages_to(): page was already in use"); + } - p1[page.p1_index()].set_entry(frame, actual_flags); + p3[page.p3_index()].set_entry(frame, actual_flags); + } + } } Ok(( @@ -281,22 +332,31 @@ impl Mapper { .valid(true) .exclusive(true); - for page in pages.range().clone() { - let af = frame_allocator::allocate_frames(1).ok_or("map_allocated_pages(): couldn't allocate new frame, out of memory")?; - - let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); - let p2 = p3.next_table_create(page.p3_index(), higher_level_flags); - let p1 = p2.next_table_create(page.p2_index(), higher_level_flags); - - if !p1[page.p1_index()].is_unused() { - error!("map_allocated_pages(): page {:#X} -> frame {:#X}, page was already in use!", - page.start_address(), af.start_address() - ); - return Err("map_allocated_pages(): page was already in use"); - } - - p1[page.p1_index()].set_entry(af.as_allocated_frame(), actual_flags); - core::mem::forget(af); // we currently forget frames allocated here since we don't yet have a way to track them. + match pages.page_size() { + MemChunkSize::Normal4K => { + for page in pages.range().clone() { + let af = frame_allocator::allocate_frames(1).ok_or("map_allocated_pages(): couldn't allocate new frame, out of memory")?; + let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); + let p2 = p3.next_table_create(page.p3_index(), higher_level_flags); + let p1 = p2.next_table_create(page.p2_index(), higher_level_flags); + + if !p1[page.p1_index()].is_unused() { + error!("map_allocated_pages(): page {:#X} -> frame {:#X}, page was already in use!", + page.start_address(), af.start_address() + ); + return Err("map_allocated_pages(): page was already in use"); + } + + p1[page.p1_index()].set_entry(af.as_allocated_frame(), actual_flags); + core::mem::forget(af); // we currently forget frames allocated here since we don't yet have a way to track them. + } + } + MemChunkSize::Huge2M => { + todo!("Mapping 2MiB huge pages to randomly-allocated huge frames is not yet supported") + } + MemChunkSize::Huge1G => { + todo!("Mapping 1GiB huge pages to randomly-allocated huge frames is not yet supported") + } } Ok(MappedPages { @@ -537,18 +597,44 @@ impl MappedPages { return Ok(()); } - for page in self.pages.range().clone() { - let p1 = active_table_mapper.p4_mut() - .next_table_mut(page.p4_index()) - .and_then(|p3| p3.next_table_mut(page.p3_index())) - .and_then(|p2| p2.next_table_mut(page.p2_index())) - .ok_or("mapping code does not support huge pages")?; - - p1[page.p1_index()].set_flags(new_flags); + match self.pages.page_size() { + MemChunkSize::Normal4K => { + for page in self.pages.range().clone() { + let p1 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .and_then(|p2| p2.next_table_mut(page.p2_index())) + .ok_or("BUG: remap() - could not get p1 entry for 4kb page")?; + + p1[page.p1_index()].set_flags(new_flags); + + tlb_flush_virt_addr(page.start_address()); + } + } + MemChunkSize::Huge2M => { + for page in self.pages.range_2mb().clone().into_iter() { + let p2 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .ok_or("BUG: remap() - could not get p1 entry for 2mb page")?; - tlb_flush_virt_addr(page.start_address()); + p2[page.p2_index()].set_flags(new_flags); + + tlb_flush_virt_addr(page.start_address()); + } + } + MemChunkSize::Huge1G => { + for page in self.pages.range_1gb().clone().into_iter() { + let p3 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index()) + .ok_or("BUG: remap() - could not get p1 entry for 1gb page")?; + + p3[page.p3_index()].set_flags(new_flags); + + tlb_flush_virt_addr(page.start_address()); + } + } } - if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { func(self.pages.range().clone()); } @@ -608,75 +694,191 @@ impl MappedPages { "BUG: MappedPages::unmap(): current P4 must equal original P4, \ cannot unmap MappedPages from a different page table than they were originally mapped to!" ); - } + } let mut first_frame_range: Option = None; // this is what we'll return let mut current_frame_range: Option = None; - for page in self.pages.range().clone() { - let p1 = active_table_mapper.p4_mut() - .next_table_mut(page.p4_index()) - .and_then(|p3| p3.next_table_mut(page.p3_index())) - .and_then(|p2| p2.next_table_mut(page.p2_index())) - .ok_or("mapping code does not support huge pages")?; - let pte = &mut p1[page.p1_index()]; - if pte.is_unused() { - return Err("unmap(): page not mapped"); - } - - let unmapped_frames = pte.set_unmapped(); - tlb_flush_virt_addr(page.start_address()); - - // Here, create (or extend) a contiguous ranges of frames here based on the `unmapped_frames` - // freed from the newly-unmapped P1 PTE entry above. - match unmapped_frames { - UnmapResult::Exclusive(newly_unmapped_frames) => { - let newly_unmapped_frames = INTO_UNMAPPED_FRAMES_FUNC.get() - .ok_or("BUG: Mapper::unmap(): the `INTO_UNMAPPED_FRAMES_FUNC` callback was not initialized") - .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; - - if let Some(mut curr_frames) = current_frame_range.take() { - match curr_frames.merge(newly_unmapped_frames) { - Ok(()) => { - // Here, the newly unmapped frames were contiguous with the current frame_range, - // and we successfully merged them into a single range of AllocatedFrames. - current_frame_range = Some(curr_frames); - } - Err(newly_unmapped_frames) => { - // Here, the newly unmapped frames were **NOT** contiguous with the current_frame_range, - // so we "finish" the current_frame_range (it's already been "taken") and start a new one - // based on the newly unmapped frames. + // Select the correct unmapping behaviour based on page size. + // The different branches mostly have the same logic, differing + // only in what level is unmapped and what unmapping function is used. + match self.pages.page_size() { + MemChunkSize::Normal4K => { + for page in self.pages.range().clone() { + let p1 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .and_then(|p2| p2.next_table_mut(page.p2_index())) + .ok_or("BUG: could not get p1 entry in unmap()")?; + let pte = &mut p1[page.p1_index()]; + if pte.is_unused() { + return Err("unmap(): page not mapped"); + } + + let unmapped_frames = pte.set_unmapped(); + tlb_flush_virt_addr(page.start_address()); + + // Here, create (or extend) a contiguous ranges of frames here based on the `unmapped_frames` + // freed from the newly-unmapped P1 PTE entry above. + match unmapped_frames { + UnmapResult::Exclusive(newly_unmapped_frames) => { + let newly_unmapped_frames = INTO_UNMAPPED_FRAMES_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_UNMAPPED_FRAMES_FUNC` callback was not initialized") + .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; + + if let Some(mut curr_frames) = current_frame_range.take() { + match curr_frames.merge(newly_unmapped_frames) { + Ok(()) => { + // Here, the newly unmapped frames were contiguous with the current frame_range, + // and we successfully merged them into a single range of AllocatedFrames. + current_frame_range = Some(curr_frames); + } + Err(newly_unmapped_frames) => { + // Here, the newly unmapped frames were **NOT** contiguous with the current_frame_range, + // so we "finish" the current_frame_range (it's already been "taken") and start a new one + // based on the newly unmapped frames. + current_frame_range = Some(newly_unmapped_frames); + + // If this is the first frame range we've unmapped, don't drop it -- save it as the return value. + if first_frame_range.is_none() { + first_frame_range = Some(curr_frames); + } else { + // If this is NOT the first frame range we've unmapped, then go ahead and drop it now, + // otherwise there will not be any other opportunity for it to be dropped. + // + // TODO: here in the future, we could add it to the optional input list (see this function's doc comments) + // of AllocatedFrames to return, i.e., `Option<&mut Vec>`. + trace!("MappedPages::unmap(): dropping additional non-contiguous frames {:?}", curr_frames); + // curr_frames is dropped here + } + } + } + } else { + // This was the first frames we unmapped, so start a new current_frame_range. current_frame_range = Some(newly_unmapped_frames); - - // If this is the first frame range we've unmapped, don't drop it -- save it as the return value. - if first_frame_range.is_none() { - first_frame_range = Some(curr_frames); - } else { - // If this is NOT the first frame range we've unmapped, then go ahead and drop it now, - // otherwise there will not be any other opportunity for it to be dropped. - // - // TODO: here in the future, we could add it to the optional input list (see this function's doc comments) - // of AllocatedFrames to return, i.e., `Option<&mut Vec>`. - trace!("MappedPages::unmap(): dropping additional non-contiguous frames {:?}", curr_frames); - // curr_frames is dropped here + } + } + UnmapResult::NonExclusive(_frames) => { + // trace!("Note: FYI: page {:X?} -> frames {:X?} was just unmapped but not mapped as EXCLUSIVE.", page, _frames); + } + } + } + + #[cfg(not(bm_map))] + { + if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { + func(self.pages.range().clone()); + } + } + } + MemChunkSize::Huge2M => { + // Temporarily define a custom step over huge page ranges until correct behaiour is implemented + for page in self.pages.range_2mb().clone().into_iter() { + let p2 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .ok_or("BUG: could not get p2 entry for 2mb page in unmap()")?; + let pte = &mut p2[page.p2_index()]; + if pte.is_unused() { + return Err("unmap(): page not mapped"); + } + let unmapped_frames = pte.set_unmapped_2mb(); + tlb_flush_virt_addr(page.start_address()); + + match unmapped_frames { + UnmapResult::Exclusive(newly_unmapped_frames) => { + let newly_unmapped_frames = INTO_UNMAPPED_FRAMES_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_UNMAPPED_FRAMES_FUNC` callback was not initialized") + .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; + if let Some(mut curr_frames) = current_frame_range.take() { + match curr_frames.merge(newly_unmapped_frames) { + Ok(()) => { + current_frame_range = Some(curr_frames); + } + Err(newly_unmapped_frames) => { + current_frame_range = Some(newly_unmapped_frames); + + if first_frame_range.is_none() { + first_frame_range = Some(curr_frames); + } else { + // TODO: here in the future, we could add it to the optional input list (see this function's doc comments) + // of AllocatedFrames to return, i.e., `Option<&mut Vec>`. + trace!("MappedPages::unmap(): dropping additional non-contiguous frames {:?}", curr_frames); + // curr_frames is dropped here + } + } } + } else { + current_frame_range = Some(newly_unmapped_frames); } } - } else { - // This was the first frames we unmapped, so start a new current_frame_range. - current_frame_range = Some(newly_unmapped_frames); + UnmapResult::NonExclusive(_frames) => { + //trace!("Note: FYI: page {:X?} -> frames {:X?} was just unmapped but not mapped as EXCLUSIVE.", page, _frames); + } } } - UnmapResult::NonExclusive(_frames) => { - // trace!("Note: FYI: page {:X?} -> frames {:X?} was just unmapped but not mapped as EXCLUSIVE.", page, _frames); + + #[cfg(not(bm_map))] + { + if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { + func(PageRange::::from(self.pages.range_2mb())); // convert to 4kb range for the TLB shootdown + } } } - } - - #[cfg(not(bm_map))] - { - if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { - func(self.pages.range().clone()); + MemChunkSize::Huge1G => { + // Temporarily define a custom step over huge page ranges until correct behaiour is implemented + for page in self.pages.range_1gb().clone().into_iter() { + let p3 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index()) + .ok_or("BUG: could not get p2 entry for 2gb page in unmap()")?; + let pte = &mut p3[page.p3_index()]; + if pte.is_unused() { + return Err("unmap(): page not mapped"); + } + + let unmapped_frames = pte.set_unmapped_1gb(); + tlb_flush_virt_addr(page.start_address()); + + match unmapped_frames { + UnmapResult::Exclusive(newly_unmapped_frames) => { + let newly_unmapped_frames = INTO_UNMAPPED_FRAMES_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_UNMAPPED_FRAMES_FUNC` callback was not initialized") + .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; + + if let Some(mut curr_frames) = current_frame_range.take() { + match curr_frames.merge(newly_unmapped_frames) { + Ok(()) => { + current_frame_range = Some(curr_frames); + } + Err(newly_unmapped_frames) => { + current_frame_range = Some(newly_unmapped_frames); + + if first_frame_range.is_none() { + first_frame_range = Some(curr_frames); + } else { + // TODO: here in the future, we could add it to the optional input list (see this function's doc comments) + // of AllocatedFrames to return, i.e., `Option<&mut Vec>`. + trace!("MappedPages::unmap(): dropping additional non-contiguous frames {:?}", curr_frames); + // curr_frames is dropped here + } + } + } + } else { + current_frame_range = Some(newly_unmapped_frames); + } + } + UnmapResult::NonExclusive(_frames) => { + // trace!("Note: FYI: page {:X?} -> frames {:X?} was just unmapped but not mapped as EXCLUSIVE.", page, _frames); + } + } + } + + #[cfg(not(bm_map))] + { + if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { + func(PageRange::::from(self.pages.range_1gb())); // convert to 4kb range for the TLB shootdown + } + } } } @@ -685,7 +887,6 @@ impl MappedPages { .or(current_frame_range.map(|f| f.into_allocated_frames()))) } - /// Reinterprets this `MappedPages`'s underlying memory region as a struct of the given type `T`, /// i.e., overlays a struct on top of this mapped memory region. /// @@ -1324,4 +1525,4 @@ impl Mutability for Mutable { } mod private { pub trait Sealed { } -} +} \ No newline at end of file diff --git a/kernel/nano_core/linker_higher_half-x86_64.ld b/kernel/nano_core/linker_higher_half-x86_64.ld index 378bae89ec..505519244f 100644 --- a/kernel/nano_core/linker_higher_half-x86_64.ld +++ b/kernel/nano_core/linker_higher_half-x86_64.ld @@ -8,8 +8,6 @@ OUTPUT_FORMAT(elf64-x86-64) */ KERNEL_OFFSET = 0xFFFFFFFF80000000; -__THESEUS_CLS_SIZE = SIZEOF(.cls); -__THESEUS_TLS_SIZE = SIZEOF(.tdata) + SIZEOF(.tbss); SECTIONS { @@ -115,3 +113,6 @@ SECTIONS { *(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*) } } + +__THESEUS_CLS_SIZE = SIZEOF(.cls); +__THESEUS_TLS_SIZE = SIZEOF(.tdata) + SIZEOF(.tbss); \ No newline at end of file diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index cdb476c768..ed52196933 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -32,10 +32,12 @@ mod static_array_rb_tree; use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}}; use kernel_config::memory::*; -use memory_structs::{VirtualAddress, Page, PageRange}; +use memory_structs::{VirtualAddress, Page, PageRange, Page1G, Page2M, Page4K, MemChunkSize}; use spin::{Mutex, Once}; use static_array_rb_tree::*; +use core::convert::TryFrom; + /// Certain regions are pre-designated for special usage, specifically the kernel's initial identity mapping. /// They will be allocated from if an address within them is specifically @@ -139,6 +141,189 @@ pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &' Ok(()) } +/// An enum used to wrap the generic PageRange variants corresponding to different page sizes. +/// Additional methods corresponding to PageRange methods are provided in order to destructure the enum variants. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PageRangeSized { + Normal4KiB(PageRange), + Huge2MiB(PageRange), + Huge1GiB(PageRange), +} + +// These methods mostly destructure the enum in order to call internal methods +impl PageRangeSized { + /// Get the size of the pages for the contained PageRange + pub fn page_size(&self) -> MemChunkSize { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.start().page_size() + } + PageRangeSized::Huge2MiB(pr) => { + pr.start().page_size() + } + PageRangeSized::Huge1GiB(pr) => { + pr.start().page_size() + } + } + } + + pub fn range(&self) -> Option<&PageRange> { + match self { + PageRangeSized::Normal4KiB(pr) => { + Some(pr) + } + _ => { + None + } + } + } + + /// range() equivalent for 2MiB page ranges + pub fn range_2mb(&self) -> Result, &'static str> { + match self { + PageRangeSized::Huge2MiB(pr) => { + Ok(pr.clone()) + } + // PageRangeSized::Normal4KiB(pr) => { + // Ok(PageRange::::try_from(*pr)?) + // } + _ => { + Err("Called range_2mb on a PageRange with a size other than 2mb") + } + } + } + + /// range() equivalent for 1GiB page ranges + pub fn range_1gb(&self) -> Result, &'static str> { + match self { + PageRangeSized::Huge1GiB(pr) => { + Ok(pr.clone()) + } + // PageRangeSized::Normal4KiB(pr) => { + // Ok(PageRange::::try_from(*pr)?) + // } + _ => { + Err("Called range_1gb on a PageRange with a size other than 1gb") + } + } + } + + pub fn contains(&self, page: &Page) -> bool { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.contains(page) + } + // PageRangeSized::Huge2MiB(pr) => { + // pr.contains(page) + // } + // PageRangeSized::Huge1GiB(pr) => { + // pr.contains(page) + // } + _ => { + false // TODO: change placeholder value + } + } + } + + pub const fn offset_of_address(&self, addr: VirtualAddress) -> Option { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.offset_of_address(addr) + } + PageRangeSized::Huge2MiB(pr) => { + pr.offset_of_address(addr) + } + PageRangeSized::Huge1GiB(pr) => { + pr.offset_of_address(addr) + } + } + } + + pub const fn address_at_offset(&self, offset: usize) -> Option { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.address_at_offset(offset) + } + PageRangeSized::Huge2MiB(pr) => { + pr.address_at_offset(offset) + } + PageRangeSized::Huge1GiB(pr) => { + pr.address_at_offset(offset) + } + } + } + + /// Returns the starting `VirtualAddress` in this range of pages. + pub fn start_address(&self) -> VirtualAddress { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.start_address() + } + PageRangeSized::Huge2MiB(pr) => { + pr.start_address() + } + PageRangeSized::Huge1GiB(pr) => { + pr.start_address() + } + } + } + + /// Returns the size in bytes of this range of pages. + pub fn size_in_bytes(&self) -> usize { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.size_in_bytes() + } + PageRangeSized::Huge2MiB(pr) => { + pr.size_in_bytes() + } + PageRangeSized::Huge1GiB(pr) => { + pr.size_in_bytes() + } + } + } + + /// Returns the size in number of pages of this range of pages. + pub fn size_in_pages(&self) -> usize { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.size_in_pages() + } + PageRangeSized::Huge2MiB(pr) => { + pr.size_in_pages() + } + PageRangeSized::Huge1GiB(pr) => { + pr.size_in_pages() + } + } + } + + /// Returns the starting `Page` in this range of pages. + /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + pub fn start(&self) -> &Page { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.start() + } + _ => { + panic!("Attempt to get the start of a huge page range as a 4KiB page."); + } + } + } + + /// Returns the ending `Page` (inclusive) in this range of pages. + /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + pub fn end(&self) -> &Page { + match self { + PageRangeSized::Normal4KiB(pr) => { + pr.end() + } + _ => { + panic!("Attempt to get the end of a huge page range as a 4KiB page."); + } + } + } +} /// A range of contiguous pages. /// @@ -159,7 +344,7 @@ struct Chunk { impl Chunk { fn as_allocated_pages(&self) -> AllocatedPages { AllocatedPages { - pages: self.pages.clone(), + pages: PageRangeSized::Normal4KiB(self.pages.clone()), } } @@ -206,7 +391,7 @@ impl Borrow for &'_ Chunk { /// This object represents ownership of the allocated virtual pages; /// if this object falls out of scope, its allocated pages will be auto-deallocated upon drop. pub struct AllocatedPages { - pages: PageRange, + pages: PageRangeSized, } // AllocatedPages must not be Cloneable, and it must not expose its inner pages as mutable. @@ -223,7 +408,7 @@ impl AllocatedPages { /// Can be used as a placeholder, but will not permit any real usage. pub const fn empty() -> AllocatedPages { AllocatedPages { - pages: PageRange::empty() + pages: PageRangeSized::Normal4KiB(PageRange::empty()) } } @@ -254,7 +439,17 @@ impl AllocatedPages { /// Returns a reference to the inner `PageRange`, which is cloneable/iterable. pub fn range(&self) -> &PageRange { - &self.pages + &self.pages.range().expect("Called range() on a PageRange with a size other than 4kb") + } + + /// Returns the inner `PageRange`, which is cloneable/iterable. + pub fn range_2mb(&self) -> PageRange { + self.pages.range_2mb().expect("Called range_2mb() on a PageRange with a size other than 2mb") + } + + /// Returns the inner `PageRange`, which is cloneable/iterable. + pub fn range_1gb(&self) -> PageRange { + self.pages.range_1gb().expect("Called range_1gb() on a PageRange with a size other than 1gb") } /// Returns the offset of the given `VirtualAddress` within this range of pages, @@ -294,7 +489,7 @@ impl AllocatedPages { if *ap.start() != (*self.end() + 1) { return Err(ap); } - self.pages = PageRange::new(*self.start(), *ap.end()); + self.pages = PageRangeSized::Normal4KiB(PageRange::new(*self.start(), *ap.end())); // ensure the now-merged AllocatedPages doesn't run its drop handler and free its pages. core::mem::forget(ap); Ok(()) @@ -337,10 +532,33 @@ impl AllocatedPages { // ensure the original AllocatedPages doesn't run its drop handler and free its pages. core::mem::forget(self); Ok(( - AllocatedPages { pages: first }, - AllocatedPages { pages: second }, + AllocatedPages { pages: PageRangeSized::Normal4KiB(first) }, + AllocatedPages { pages: PageRangeSized::Normal4KiB(second) }, )) } + + /// Returns the size of the pages in this page range. + pub fn page_size(&self) -> MemChunkSize { + self.pages.page_size() + } + + pub fn to_2mb_allocated_pages(&mut self) { + self.pages = PageRangeSized::Huge2MiB( + PageRange::::try_from( + self.pages + .range() + .unwrap() + .clone()).unwrap()) + } + + pub fn to_1gb_allocated_pages(&mut self) { + self.pages = PageRangeSized::Huge1GiB( + PageRange::::try_from( + self.pages + .range() + .unwrap() + .clone()).unwrap()) + } } impl Drop for AllocatedPages { @@ -348,8 +566,19 @@ impl Drop for AllocatedPages { if self.size_in_pages() == 0 { return; } // trace!("page_allocator: deallocating {:?}", self); + // Convert huge pages back to default size if needed. + let pages = match self.page_size() { + MemChunkSize::Normal4K => self.pages.range().unwrap().clone(), + MemChunkSize::Huge2M => { + PageRange::::from(self.pages.range_2mb().unwrap()) + }, + MemChunkSize::Huge1G => { + PageRange::::from(self.pages.range_1gb().unwrap()) + } + }; + let chunk = Chunk { - pages: self.pages.clone(), + pages: pages, }; let mut list = FREE_PAGE_LIST.lock(); match &mut list.0 { @@ -925,4 +1154,4 @@ pub fn dump_page_allocator_state() { debug!("{:X?}", c); } debug!("---------------------------------------------------"); -} +} \ No newline at end of file diff --git a/kernel/page_table_entry/src/lib.rs b/kernel/page_table_entry/src/lib.rs index 2606ed3039..d73a22d08e 100644 --- a/kernel/page_table_entry/src/lib.rs +++ b/kernel/page_table_entry/src/lib.rs @@ -63,6 +63,42 @@ impl PageTableEntry { } } + /// Since only 4kb frames are used right now, we can't use the type parameter to get the correct size. + /// This separate function may not be necessary in the future. + pub fn set_unmapped_2mb(&mut self) -> UnmapResult { + let frame = self.frame_value(); + let flags = self.flags(); + self.zero(); + + let frame_range = FrameRange::new( + frame, + Frame::containing_address(frame.start_address() + (4096 * 512))); + + if flags.is_exclusive() { + UnmapResult::Exclusive(UnmappedFrameRange(frame_range)) + } else { + UnmapResult::NonExclusive(frame_range) + } + } + /// Since only 4kb frames are used right now, we can't use the type parameter to get the correct size. + /// This separate function may not be necessary in the future. + pub fn set_unmapped_1gb(&mut self) -> UnmapResult { + let frame = self.frame_value(); + let flags = self.flags(); + self.zero(); + + let frame_range = FrameRange::new( + frame, + Frame::containing_address(frame.start_address() + (4096 * (512 * 512)))); + + if flags.is_exclusive() { + UnmapResult::Exclusive(UnmappedFrameRange(frame_range)) + } else { + UnmapResult::NonExclusive(frame_range) + } + } + + /// Returns this `PageTableEntry`'s flags. pub fn flags(&self) -> PteFlagsArch { PteFlagsArch::from_bits_truncate(self.0 & !PTE_FRAME_MASK) diff --git a/kernel/pte_flags/src/pte_flags_x86_64.rs b/kernel/pte_flags/src/pte_flags_x86_64.rs index d8c0ffc568..5938f689d4 100644 --- a/kernel/pte_flags/src/pte_flags_x86_64.rs +++ b/kernel/pte_flags/src/pte_flags_x86_64.rs @@ -224,6 +224,16 @@ impl PteFlagsX86_64 { self } + /// Returns a copy of this `PteFlagsX86_64` with the `HUGE` bit set or cleared. + /// + /// * If `enable` is `true`, this page will be treated as a huge page. + /// * If `enable` is `false`, this page will be treated as a page of standard 4KiB size. + #[must_use] + pub fn huge(mut self, enable: bool) -> Self { + self.set(Self::HUGE_PAGE, enable); + self + } + #[doc(alias("present"))] pub const fn is_valid(&self) -> bool { self.contains(Self::VALID) @@ -271,10 +281,10 @@ impl PteFlagsX86_64 { /// * P4, P3, and P2 entries should never set `NOT_EXECUTABLE`, /// only the lowest-level P1 entry should. /// * Clears the `EXCLUSIVE` bit. - /// * Currently, we do not use the `EXCLUSIVE` bit for P4, P3, or P2 entries, + /// * Currently, we only use the `EXCLUSIVE` bit for leaf nodes that directly map a frame in the page table, /// because another page table frame may re-use it (create another alias to it) /// without our page table implementation knowing about it. - /// * Only P1-level PTEs can map a frame exclusively. + /// * Only P1-level PTEs, and in the case of huge pages, P2 and P3-level PTEs, can map a frame exclusively. /// * Clears the PAT index value, as we only support PAT on P1-level PTEs. /// * Sets the `VALID` bit, as every P4, P3, and P2 entry must be valid. #[must_use] From dc91766fff926e1d42f379699faff5733a31e15d Mon Sep 17 00:00:00 2001 From: NIMogen Date: Wed, 13 Sep 2023 11:55:23 -0700 Subject: [PATCH 2/9] Suggest 5g of QEMU_MEMORY for 1gb page tests; contains works for other page sizes --- applications/test_1gb_huge_pages/src/lib.rs | 2 +- kernel/memory/src/paging/mapper.rs | 2 +- kernel/page_allocator/src/lib.rs | 32 ++++++++++----------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/applications/test_1gb_huge_pages/src/lib.rs b/applications/test_1gb_huge_pages/src/lib.rs index b3b51e97ac..728031a709 100644 --- a/applications/test_1gb_huge_pages/src/lib.rs +++ b/applications/test_1gb_huge_pages/src/lib.rs @@ -13,7 +13,7 @@ use memory::{ }; pub fn main(_args: Vec) -> isize { - println!("NOTE: Before running, make sure you Theseus has been run with enough memory (make orun QEMU_MEMORY=3G)."); + println!("NOTE: Before running, make sure you Theseus has been run with enough memory (make orun QEMU_MEMORY=5G)."); let kernel_mmi_ref = memory::get_kernel_mmi_ref() .ok_or("KERNEL_MMI was not yet initialized!") .unwrap(); diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 930d4a04db..e314ef6175 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -1525,4 +1525,4 @@ impl Mutability for Mutable { } mod private { pub trait Sealed { } -} \ No newline at end of file +} diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index ed52196933..13e3b48e90 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -167,6 +167,7 @@ impl PageRangeSized { } } + /// Returns a reference to the contained PageRange holding 4kb pages. Returns None if called on a PageRange holding huge pages. pub fn range(&self) -> Option<&PageRange> { match self { PageRangeSized::Normal4KiB(pr) => { @@ -184,9 +185,6 @@ impl PageRangeSized { PageRangeSized::Huge2MiB(pr) => { Ok(pr.clone()) } - // PageRangeSized::Normal4KiB(pr) => { - // Ok(PageRange::::try_from(*pr)?) - // } _ => { Err("Called range_2mb on a PageRange with a size other than 2mb") } @@ -199,9 +197,6 @@ impl PageRangeSized { PageRangeSized::Huge1GiB(pr) => { Ok(pr.clone()) } - // PageRangeSized::Normal4KiB(pr) => { - // Ok(PageRange::::try_from(*pr)?) - // } _ => { Err("Called range_1gb on a PageRange with a size other than 1gb") } @@ -213,14 +208,14 @@ impl PageRangeSized { PageRangeSized::Normal4KiB(pr) => { pr.contains(page) } - // PageRangeSized::Huge2MiB(pr) => { - // pr.contains(page) - // } - // PageRangeSized::Huge1GiB(pr) => { - // pr.contains(page) - // } - _ => { - false // TODO: change placeholder value + // page is a Page, so we need to perform a temporary conversion for other sizes + PageRangeSized::Huge2MiB(pr) => { + let pr_4k = PageRange::::from(pr.clone()); + pr_4k.contains(page) + } + PageRangeSized::Huge1GiB(pr) => { + let pr_4k = PageRange::::from(pr.clone()); + pr_4k.contains(page) } } } @@ -428,16 +423,19 @@ impl AllocatedPages { } /// Returns the starting `Page` in this range of pages. + /// This should ONLY be called on 4kb pages, and panics if called on huge pages. TODO: Change the panic behaviour pub fn start(&self) -> &Page { self.pages.start() } - /// Returns the ending `Page` (inclusive) in this range of pages. + /// Returns the ending `Page` (inclusive) in this range of + /// This should ONLY be called on 4kb pages, and panics if called on huge pages. TODO: Change the panic behaviour pub fn end(&self) -> &Page { self.pages.end() } /// Returns a reference to the inner `PageRange`, which is cloneable/iterable. + /// Use alternative range() methods for `PageRange` and `PageRange`. pub fn range(&self) -> &PageRange { &self.pages.range().expect("Called range() on a PageRange with a size other than 4kb") } @@ -542,6 +540,7 @@ impl AllocatedPages { self.pages.page_size() } + /// Converts a range of 4kb `AllocatedPages` into a range of 2mb `AllocatedPages`. pub fn to_2mb_allocated_pages(&mut self) { self.pages = PageRangeSized::Huge2MiB( PageRange::::try_from( @@ -551,6 +550,7 @@ impl AllocatedPages { .clone()).unwrap()) } + /// Converts a range of 4kb `AllocatedPages` into a range of 1gb `AllocatedPages`. pub fn to_1gb_allocated_pages(&mut self) { self.pages = PageRangeSized::Huge1GiB( PageRange::::try_from( @@ -1154,4 +1154,4 @@ pub fn dump_page_allocator_state() { debug!("{:X?}", c); } debug!("---------------------------------------------------"); -} \ No newline at end of file +} From 891969755e4397491a3f4382f52b0b4202faf527 Mon Sep 17 00:00:00 2001 From: NIMogen Date: Thu, 21 Sep 2023 13:15:14 -0700 Subject: [PATCH 3/9] added size-awareness to frames; moved sized enums into macro definitions in memory_structs --- applications/test_huge_pages/src/lib.rs | 5 +- kernel/frame_allocator/src/lib.rs | 389 ++++++++++-------- .../src/static_array_rb_tree.rs | 6 +- kernel/memory/src/lib.rs | 1 + kernel/memory/src/paging/mapper.rs | 9 +- kernel/memory_structs/src/lib.rs | 283 ++++++++++++- kernel/page_allocator/src/lib.rs | 389 ++++++++++-------- libs/range_inclusive/src/lib.rs | 6 + 8 files changed, 723 insertions(+), 365 deletions(-) diff --git a/applications/test_huge_pages/src/lib.rs b/applications/test_huge_pages/src/lib.rs index ef5b6b244f..2bceba3f32 100644 --- a/applications/test_huge_pages/src/lib.rs +++ b/applications/test_huge_pages/src/lib.rs @@ -59,9 +59,11 @@ pub fn main(_args: Vec) -> isize { PhysicalAddress::new_canonical(0x60000000), 512).expect("Could not allocate frames. Make sure you have enough memory for the kernel (compile with make orun QEMU_MEMORY=3G)."); + let allocated_2mb_frames = aligned_4k_frames.into_2mb_allocated_frames().expect("Could not convert range of allocated frames into huge allocated frames"); + let mut mapped_2mb_page = kernel_mmi_ref.lock().page_table.map_allocated_pages_to( aligned_2mb_page, - aligned_4k_frames, + allocated_2mb_frames, PteFlags::new().valid(true).writable(true), ).expect("test_huge_pages: call to map_allocated_pages failed"); @@ -90,6 +92,5 @@ pub fn main(_args: Vec) -> isize { // Dump entries to see if dropping has unmapped everything properly drop(mapped_2mb_page); kernel_mmi_ref.lock().page_table.dump_pte(VirtualAddress::new_canonical(0x20000000)); - 0 } diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 2bc3a5de4a..299c91d77a 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -1,17 +1,17 @@ //! Provides an allocator for physical memory frames. -//! The minimum unit of allocation is a single frame. +//! The minimum unit of allocation is a single frame. //! //! This is currently a modified and more complex version of the `page_allocator` crate. //! TODO: extract the common code and create a generic allocator that can be specialized to allocate pages or frames. -//! -//! This also supports early allocation of frames before heap allocation is available, -//! and does so behind the scenes using the same single interface. +//! +//! This also supports early allocation of frames before heap allocation is available, +//! and does so behind the scenes using the same single interface. //! Early pre-heap allocations are limited to tracking a small number of available chunks (currently 32). -//! +//! //! Once heap allocation is available, it uses a dynamically-allocated list of frame chunks to track allocations. -//! -//! The core allocation function is [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html), -//! but there are several convenience functions that offer simpler interfaces for general usage. +//! +//! The core allocation function is [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html), +//! but there are several convenience functions that offer simpler interfaces for general usage. //! //! # Notes and Missing Features //! This allocator only makes one attempt to merge deallocated frames into existing @@ -23,6 +23,7 @@ #![allow(clippy::blocks_in_if_conditions)] #![allow(incomplete_features)] #![feature(adt_const_params)] +#![feature(step_trait)] extern crate alloc; #[cfg(test)] @@ -31,11 +32,11 @@ mod test; mod static_array_rb_tree; // mod static_array_linked_list; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, ops::{Deref, DerefMut}, fmt}; +use core::{iter::Step, borrow::Borrow, cmp::{Ordering, min, max}, ops::{Deref, DerefMut}, fmt}; use intrusive_collections::Bound; use kernel_config::memory::*; use log::{error, warn, debug, trace}; -use memory_structs::{PhysicalAddress, Frame, FrameRange, MemoryState}; +use memory_structs::{PhysicalAddress, Frame, FrameRange, MemoryState, MemChunkSize, Page4K, Page2M, Page1G, Page, PageSize, FrameRangeSized}; use spin::Mutex; use static_array_rb_tree::*; use static_assertions::assert_not_impl_any; @@ -44,40 +45,39 @@ const FRAME_SIZE: usize = PAGE_SIZE; const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); -// Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. +// Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. -/// The single, system-wide list of free physical memory frames available for general usage. -static FREE_GENERAL_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); -/// The single, system-wide list of free physical memory frames reserved for specific usage. -static FREE_RESERVED_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +/// The single, system-wide list of free physical memory frames available for general usage. +static FREE_GENERAL_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +/// The single, system-wide list of free physical memory frames reserved for specific usage. +static FREE_RESERVED_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); /// The fixed list of all known regions that are available for general use. -/// This does not indicate whether these regions are currently allocated, +/// This does not indicate whether these regions are currently allocated, /// rather just where they exist and which regions are known to this allocator. static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); -/// The fixed list of all known regions that are reserved for specific purposes. -/// This does not indicate whether these regions are currently allocated, +/// The fixed list of all known regions that are reserved for specific purposes. +/// This does not indicate whether these regions are currently allocated, /// rather just where they exist and which regions are known to this allocator. static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); - /// Initialize the frame allocator with the given list of available and reserved physical memory regions. /// /// Any regions in either of the lists may overlap, this is checked for and handled properly. /// Reserved regions take priority -- if a reserved region partially or fully overlaps any part of a free region, -/// that portion will be considered reserved, not free. -/// -/// The iterator (`R`) over reserved physical memory regions must be cloneable, -/// as this runs before heap allocation is available, and we may need to iterate over it multiple times. -/// +/// that portion will be considered reserved, not free. +/// +/// The iterator (`R`) over reserved physical memory regions must be cloneable, +/// as this runs before heap allocation is available, and we may need to iterate over it multiple times. +/// /// ## Return /// Upon success, this function returns a callback function that allows the caller -/// (the memory subsystem init function) to convert a range of unmapped frames +/// (the memory subsystem init function) to convert a range of unmapped frames /// back into an [`UnmappedFrames`] object. pub fn init( free_physical_memory_areas: F, reserved_physical_memory_areas: R, -) -> Result UnmappedFrames, &'static str> +) -> Result UnmappedFrames, &'static str> where P: Borrow, F: IntoIterator, R: IntoIterator + Clone, @@ -85,7 +85,7 @@ pub fn init( if FREE_GENERAL_FRAMES_LIST .lock().len() != 0 || FREE_RESERVED_FRAMES_LIST.lock().len() != 0 || GENERAL_REGIONS .lock().len() != 0 || - RESERVED_REGIONS .lock().len() != 0 + RESERVED_REGIONS .lock().len() != 0 { return Err("BUG: Frame allocator was already initialized, cannot be initialized twice."); } @@ -144,7 +144,7 @@ pub fn init( } - // Finally, one last sanity check -- ensure no two regions overlap. + // Finally, one last sanity check -- ensure no two regions overlap. let all_areas = free_list[..free_list_idx].iter().flatten() .chain(reserved_list.iter().flatten()); for (i, elem) in all_areas.clone().enumerate() { @@ -183,7 +183,7 @@ pub fn init( } -/// The main logic of the initialization routine +/// The main logic of the initialization routine /// used to populate the list of free frame chunks. /// /// This function recursively iterates over the given `area` of frames @@ -198,9 +198,9 @@ fn check_and_add_free_region( where P: Borrow, R: IntoIterator + Clone, { - // This will be set to the frame that is the start of the current free region. + // This will be set to the frame that is the start of the current free region. let mut current_start = *area.start(); - // This will be set to the frame that is the end of the current free region. + // This will be set to the frame that is the end of the current free region. let mut current_end = *area.end(); // trace!("looking at sub-area {:X?} to {:X?}", current_start, current_end); @@ -260,7 +260,7 @@ fn check_and_add_free_region( /// since it ignores their actual range of frames. #[derive(Clone, Debug, Eq)] pub struct PhysicalMemoryRegion { - /// The Frames covered by this region, an inclusive range. + /// The Frames covered by this region, an inclusive range. pub frames: FrameRange, /// The type of this memory region, e.g., whether it's in a free or reserved region. pub typ: MemoryRegionType, @@ -270,7 +270,7 @@ impl PhysicalMemoryRegion { PhysicalMemoryRegion { frames, typ } } - /// Returns a new `PhysicalMemoryRegion` with an empty range of frames. + /// Returns a new `PhysicalMemoryRegion` with an empty range of frames. #[allow(unused)] const fn empty() -> PhysicalMemoryRegion { PhysicalMemoryRegion { @@ -311,8 +311,8 @@ impl Borrow for &'_ PhysicalMemoryRegion { pub enum MemoryRegionType { /// Memory that is available for any general purpose. Free, - /// Memory that is reserved for special use and is only ever allocated from if specifically requested. - /// This includes custom memory regions added by third parties, e.g., + /// Memory that is reserved for special use and is only ever allocated from if specifically requested. + /// This includes custom memory regions added by third parties, e.g., /// device memory discovered and added by device drivers later during runtime. Reserved, /// Memory of an unknown type. @@ -325,7 +325,7 @@ pub enum MemoryRegionType { /// /// Each `Frames` object is globally unique, meaning that the owner of a `Frames` object /// has globally-exclusive access to the range of frames it contains. -/// +/// /// A `Frames` object can be in one of four states: /// * `Free`: frames are owned by the frame allocator and have not been allocated for any use. /// * `Allocated`: frames have been removed from the allocator's free list and are owned elsewhere; @@ -345,7 +345,7 @@ pub enum MemoryRegionType { /// ``` /// (Free) <---> (Allocated) --> (Mapped) --> (Unmapped) --> (Allocated) <---> (Free) /// ``` -/// +/// /// # Ordering and Equality /// /// `Frames` implements the `Ord` trait, and its total ordering is ONLY based on @@ -355,7 +355,7 @@ pub enum MemoryRegionType { /// both of which are also based ONLY on the **starting** `Frame` of the `Frames`. /// Thus, comparing two `Frames` with the `==` or `!=` operators may not work as expected. /// since it ignores their actual range of frames. -/// +/// /// Similarly, `Frames` implements the `Borrow` trait to return a `Frame`, /// not a `FrameRange`. This is required so we can search for `Frames` in a sorted collection /// using a `Frame` value. @@ -365,7 +365,7 @@ pub struct Frames { /// The type of this memory chunk, e.g., whether it's in a free or reserved region. typ: MemoryRegionType, /// The Frames covered by this chunk, an inclusive range. - frames: FrameRange + frames: FrameRangeSized } /// A type alias for `Frames` in the `Free` state. @@ -390,14 +390,14 @@ impl FreeFrames { /// The frame allocator logic is responsible for ensuring that no two `Frames` objects overlap. pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Self { Frames { - typ, - frames, + typ: typ, + frames: FrameRangeSized::Normal4KiB(frames), } } /// Consumes this `Frames` in the `Free` state and converts them into the `Allocated` state. - pub fn into_allocated_frames(mut self) -> AllocatedFrames { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + pub fn into_allocated_frames(mut self) -> AllocatedFrames { + let frames = core::mem::replace(&mut self.frames, FrameRangeSized::Normal4KiB(FrameRange::empty())); let af = Frames { typ: self.typ, frames, @@ -410,8 +410,8 @@ impl FreeFrames { impl AllocatedFrames { /// Consumes this `Frames` in the `Allocated` state and converts them into the `Mapped` state. /// This should only be called once a `MappedPages` has been created from the `Frames`. - pub fn into_mapped_frames(mut self) -> MappedFrames { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + pub fn into_mapped_frames(mut self) -> MappedFrames { + let frames = core::mem::replace(&mut self.frames, FrameRangeSized::Normal4KiB(FrameRange::empty())); let mf = Frames { typ: self.typ, frames, @@ -431,12 +431,34 @@ impl AllocatedFrames { _phantom: core::marker::PhantomData, } } + + pub fn into_2mb_allocated_frames(self) -> Result { + Ok(AllocatedFrames { + typ: self.typ, + frames: FrameRangeSized::Huge2MiB( + FrameRange::::try_from(self.frames + .range() + .unwrap() + .clone())?) + }) + } + + pub fn into_1gb_allocated_frames(self) -> Result { + Ok(AllocatedFrames { + typ: self.typ, + frames: FrameRangeSized::Huge1GiB( + FrameRange::::try_from(self.frames + .range() + .unwrap() + .clone())?) + }) + } } impl UnmappedFrames { /// Consumes this `Frames` in the `Unmapped` state and converts them into the `Allocated` state. - pub fn into_allocated_frames(mut self) -> AllocatedFrames { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + pub fn into_allocated_frames(mut self) -> AllocatedFrames { + let frames = core::mem::replace(&mut self.frames, FrameRangeSized::Normal4KiB(FrameRange::empty())); let af = Frames { typ: self.typ, frames @@ -450,7 +472,7 @@ impl UnmappedFrames { /// This function is a callback used to convert `UnmappedFrameRange` into `UnmappedFrames`. /// /// `UnmappedFrames` represents frames that have been unmapped by a page that had -/// previously exclusively mapped them, indicating that no others pages have been mapped +/// previously exclusively mapped them, indicating that no others pages have been mapped /// to those same frames, and thus, those frames can be safely deallocated. /// /// This exists to break the cyclic dependency chain between this crate and @@ -462,7 +484,7 @@ pub(crate) fn into_unmapped_frames(frames: FrameRange) -> UnmappedFrames { } else { MemoryRegionType::Free }; - Frames{ typ, frames } + Frames{ typ: typ, frames: FrameRangeSized::Normal4KiB(frames) } } @@ -471,16 +493,17 @@ impl Drop for Frames { match S { MemoryState::Free => { if self.size_in_frames() == 0 { return; } - - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + + let frames = core::mem::replace(&mut self.frames, FrameRangeSized::Normal4KiB(FrameRange::empty())); + // Now, you can match against FrameRangeSized and convert the specific P to Page4K if necessary let free_frames: FreeFrames = Frames { typ: self.typ, frames }; - + let mut list = if free_frames.typ == MemoryRegionType::Reserved { FREE_RESERVED_FRAMES_LIST.lock() } else { FREE_GENERAL_FRAMES_LIST.lock() - }; - + }; + match &mut list.0 { // For early allocations, just add the deallocated chunk to the free pages list. Inner::Array(_) => { @@ -490,8 +513,8 @@ impl Drop for Frames { error!("Failed to insert deallocated frames into the list (array). The initial static array should be created with a larger size."); } } - - // For full-fledged deallocations, determine if we can merge the deallocated frames + + // For full-fledged deallocations, determine if we can merge the deallocated frames // with an existing contiguously-adjacent chunk or if we need to insert a new chunk. Inner::RBTree(ref mut tree) => { let mut cursor_mut = tree.lower_bound_mut(Bound::Included(free_frames.start())); @@ -544,14 +567,14 @@ impl Drop for Frames { } log::error!("BUG: couldn't insert deallocated {:?} into free frames list", self.frames); } - MemoryState::Allocated => { + MemoryState::Allocated => { // trace!("Converting AllocatedFrames to FreeFrames. Drop handler will be called again {:?}", self.frames); - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); - let _to_drop = FreeFrames { typ: self.typ, frames }; + let frames = core::mem::replace(&mut self.frames, FrameRangeSized::Normal4KiB(FrameRange::empty())); + let _to_drop = FreeFrames { typ: self.typ, frames }; } MemoryState::Mapped => panic!("We should never drop a mapped frame! It should be forgotten instead."), MemoryState::Unmapped => { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + let frames = core::mem::replace(&mut self.frames, FrameRangeSized::Normal4KiB(FrameRange::empty())); let _to_drop = AllocatedFrames { typ: self.typ, frames }; } } @@ -562,10 +585,38 @@ impl<'f> IntoIterator for &'f AllocatedFrames { type IntoIter = AllocatedFramesIter<'f>; type Item = AllocatedFrame<'f>; fn into_iter(self) -> Self::IntoIter { - AllocatedFramesIter { - _owner: self, - range: self.frames.iter(), + match self.frames.page_size() { + MemChunkSize::Normal4K => { + AllocatedFramesIter { + _owner: self, + range: self.range().unwrap().iter() + } + } + MemChunkSize::Huge2M => { + AllocatedFramesIter { + _owner: self, + range: FrameRange::::from(self.range_2mb().unwrap()).iter() + } + // AllocatedFramesIter { + // _owner: self, + // range: range_inclusive::RangeInclusive::new(FrameSized::Huge2M(*self.start_2m()), FrameSized::Huge2M(*self.end_2m())).iter() + // } + } + MemChunkSize::Huge1G => { + AllocatedFramesIter { + _owner: self, + range: FrameRange::::from(self.range_1gb().unwrap()).iter() + } + // AllocatedFramesIter { + // _owner: self, + // range: range_inclusive::RangeInclusive::new(FrameSized::Huge1G(*self.start_1g()), FrameSized::Huge1G(*self.end_1g())).iter() + // } + } } + // AllocatedFramesIter { + // _owner: self, + // range: self.range().unwrap().iter(), + // } } } @@ -579,13 +630,18 @@ impl<'f> IntoIterator for &'f AllocatedFrames { /// [`RangeInclusive`] instances rather than borrowing a reference to it. /// /// [`RangeInclusive`]: range_inclusive::RangeInclusive -pub struct AllocatedFramesIter<'f> { +pub struct AllocatedFramesIter<'f, P: PageSize = Page4K> { _owner: &'f AllocatedFrames, - range: range_inclusive::RangeInclusiveIterator, + range: range_inclusive::RangeInclusiveIterator>, } -impl<'f> Iterator for AllocatedFramesIter<'f> { - type Item = AllocatedFrame<'f>; +impl<'f, P: 'static + PageSize> Iterator for AllocatedFramesIter<'f, P> { + type Item = AllocatedFrame<'f, P>; fn next(&mut self) -> Option { + // match self._owner.page_size() { + // MemChunkSize::Normal4K => {} + // MemChunkSize::Huge2M => {} + // MemChunkSize::Huge1G => {} + // } self.range.next().map(|frame| AllocatedFrame { frame, _phantom: core::marker::PhantomData, @@ -594,16 +650,17 @@ impl<'f> Iterator for AllocatedFramesIter<'f> { } } + /// A reference to a single frame within a range of `AllocatedFrames`. -/// +/// /// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. #[derive(Debug)] -pub struct AllocatedFrame<'f> { - frame: Frame, - _phantom: core::marker::PhantomData<&'f Frame>, +pub struct AllocatedFrame<'f, P: 'static + PageSize = Page4K> { + frame: Frame

, + _phantom: core::marker::PhantomData<&'f Frame

>, } -impl<'f> Deref for AllocatedFrame<'f> { - type Target = Frame; +impl<'f, P: PageSize> Deref for AllocatedFrame<'f, P> { + type Target = Frame

; fn deref(&self) -> &Self::Target { &self.frame } @@ -622,12 +679,12 @@ impl Frames { self.typ } - /// Returns a new `Frames` with an empty range of frames. + /// Returns a new `Frames` with an empty range of frames. /// Can be used as a placeholder, but will not permit any real usage. pub const fn empty() -> Frames { Frames { typ: MemoryRegionType::Unknown, - frames: FrameRange::empty(), + frames: FrameRangeSized::Normal4KiB(FrameRange::empty()), } } @@ -636,19 +693,19 @@ impl Frames { /// This function performs no allocation or re-mapping, it exists for convenience and usability purposes. /// /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. - /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. + /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. /// /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, /// otherwise `Err(other)` is returned. pub fn merge(&mut self, other: Self) -> Result<(), Self> { - if self.is_empty() || other.is_empty() { + if self.range().unwrap().is_empty() || other.range().unwrap().is_empty() { return Err(other); } let frames = if *self.start() == *other.end() + 1 { // `other` comes contiguously before `self` FrameRange::new(*other.start(), *self.end()) - } + } else if *self.end() + 1 == *other.start() { // `self` comes contiguously before `other` FrameRange::new(*self.start(), *other.end()) @@ -659,31 +716,31 @@ impl Frames { }; // ensure the now-merged Frames doesn't run its drop handler - core::mem::forget(other); - self.frames = frames; + core::mem::forget(other); + self.frames = FrameRangeSized::Normal4KiB(frames); Ok(()) } /// Splits up the given `Frames` into multiple smaller `Frames`. - /// + /// /// Returns a `SplitFrames` instance containing three `Frames`: /// 1. The range of frames in `self` that are before the beginning of `frames_to_extract`. /// 2. The `Frames` containing the requested range of frames, `frames_to_extract`. /// 3. The range of frames in `self` that are after the end of `frames_to_extract`. - /// + /// /// If `frames_to_extract` is not contained within `self`, then `self` is returned unchanged within an `Err`. pub fn split_range( self, frames_to_extract: FrameRange ) -> Result, Self> { - - if !self.contains_range(&frames_to_extract) { + + if !self.range().unwrap().contains_range(&frames_to_extract) { return Err(self); } - + let start_frame = *frames_to_extract.start(); let start_to_end = frames_to_extract; - + let before_start = if start_frame == MIN_FRAME || start_frame == *self.start() { None } else { @@ -699,27 +756,27 @@ impl Frames { let typ = self.typ; // ensure the original Frames doesn't run its drop handler and free its frames. core::mem::forget(self); - Ok(SplitFrames { - before_start: before_start.map(|frames| Frames { typ, frames }), - start_to_end: Frames { typ, frames: start_to_end }, - after_end: after_end.map(|frames| Frames { typ, frames }), + Ok(SplitFrames { + before_start: before_start.map(|frames| Frames { typ: typ, frames: FrameRangeSized::Normal4KiB(frames) }), + start_to_end: Frames { typ, frames: FrameRangeSized::Normal4KiB(start_to_end) }, + after_end: after_end.map(|frames| Frames { typ, frames: FrameRangeSized::Normal4KiB(frames) }), }) } /// Splits this `Frames` into two separate `Frames` objects: /// * `[beginning : at_frame - 1]` /// * `[at_frame : end]` - /// + /// /// This function follows the behavior of [`core::slice::split_at()`], - /// thus, either one of the returned `Frames` objects may be empty. + /// thus, either one of the returned `Frames` objects may be empty. /// * If `at_frame == self.start`, the first returned `Frames` object will be empty. /// * If `at_frame == self.end + 1`, the second returned `Frames` object will be empty. - /// + /// /// Returns an `Err` containing this `Frames` if `at_frame` is otherwise out of bounds, or if `self` was empty. - /// + /// /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at pub fn split_at(self, at_frame: Frame) -> Result<(Self, Self), Self> { - if self.is_empty() { return Err(self); } + if self.range().unwrap().is_empty() { return Err(self); } let end_of_first = at_frame - 1; @@ -727,9 +784,9 @@ impl Frames { let first = FrameRange::empty(); let second = FrameRange::new(at_frame, *self.end()); (first, second) - } + } else if at_frame == (*self.end() + 1) && end_of_first >= *self.start() { - let first = FrameRange::new(*self.start(), *self.end()); + let first = FrameRange::new(*self.start(), *self.end()); let second = FrameRange::empty(); (first, second) } @@ -744,17 +801,17 @@ impl Frames { let typ = self.typ; // ensure the original Frames doesn't run its drop handler and free its frames. - core::mem::forget(self); + core::mem::forget(self); Ok(( - Frames { typ, frames: first }, - Frames { typ, frames: second }, + Frames { typ, frames: FrameRangeSized::Normal4KiB(first) }, + Frames { typ, frames: FrameRangeSized::Normal4KiB(second) }, )) } } impl Deref for Frames { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { + type Target = FrameRangeSized; + fn deref(&self) -> &FrameRangeSized { &self.frames } } @@ -787,17 +844,17 @@ impl fmt::Debug for Frames { /// A series of pending actions related to frame allocator bookkeeping, -/// which may result in heap allocation. -/// -/// The actions are triggered upon dropping this struct. -/// This struct can be returned from the `allocate_frames()` family of functions -/// in order to allow the caller to precisely control when those actions -/// that may result in heap allocation should occur. -/// Such actions include adding chunks to lists of free frames or frames in use. -/// -/// The vast majority of use cases don't care about such precise control, +/// which may result in heap allocation. +/// +/// The actions are triggered upon dropping this struct. +/// This struct can be returned from the `allocate_frames()` family of functions +/// in order to allow the caller to precisely control when those actions +/// that may result in heap allocation should occur. +/// Such actions include adding chunks to lists of free frames or frames in use. +/// +/// The vast majority of use cases don't care about such precise control, /// so you can simply drop this struct at any time or ignore it -/// with a `let _ = ...` binding to instantly drop it. +/// with a `let _ = ...` binding to instantly drop it. pub struct DeferredAllocAction<'list> { /// A reference to the list into which we will insert the free general-purpose `Chunk`s. free_list: &'list Mutex>, @@ -809,7 +866,7 @@ pub struct DeferredAllocAction<'list> { free2: FreeFrames, } impl<'list> DeferredAllocAction<'list> { - fn new(free1: F1, free2: F2) -> DeferredAllocAction<'list> + fn new(free1: F1, free2: F2) -> DeferredAllocAction<'list> where F1: Into>, F2: Into>, { @@ -827,8 +884,8 @@ impl<'list> Drop for DeferredAllocAction<'list> { fn drop(&mut self) { let frames1 = core::mem::replace(&mut self.free1, Frames::empty()); let frames2 = core::mem::replace(&mut self.free2, Frames::empty()); - - // Insert all of the chunks, both allocated and free ones, into the list. + + // Insert all of the chunks, both allocated and free ones, into the list. if frames1.size_in_frames() > 0 { match frames1.typ() { MemoryRegionType::Free => { self.free_list.lock().insert(frames1).unwrap(); } @@ -854,7 +911,7 @@ enum AllocationError { AddressNotFree(Frame, usize), /// The requested address was outside the range of this allocator. AddressNotFound(Frame, usize), - /// The address space was full, or there was not a large-enough chunk + /// The address space was full, or there was not a large-enough chunk /// or enough remaining chunks that could satisfy the requested allocation size. OutOfAddressSpace(usize), /// The starting address was found, but not all successive contiguous frames were available. @@ -902,7 +959,7 @@ fn find_specific_chunk( return allocate_from_chosen_chunk(FrameRange::new(requested_frame, requested_frame + num_frames - 1), ValueRefMut::RBTree(cursor_mut), None); } else { // We found the chunk containing the requested address, but it was too small to cover all of the requested frames. - // Let's try to merge the next-highest contiguous chunk to see if those two chunks together + // Let's try to merge the next-highest contiguous chunk to see if those two chunks together // cover enough frames to fulfill the allocation request. // // trace!("Frame allocator: found chunk containing requested address, but it was too small. \ @@ -914,7 +971,7 @@ fn find_specific_chunk( cursor_mut.move_next();// cursor now points to the next chunk if let Some(next_chunk) = cursor_mut.get().map(|w| w.deref()) { if *chunk.end() + 1 == *next_chunk.start() { - // Here: next chunk was contiguous with the original chunk. + // Here: next chunk was contiguous with the original chunk. if requested_end_frame <= *next_chunk.end() { // trace!("Frame allocator: found suitably-large contiguous next {:?} after initial too-small {:?}", next_chunk, chunk); let next = cursor_mut.remove().map(|f| f.into_inner()); @@ -940,10 +997,10 @@ fn find_specific_chunk( } }; if let Some(next_chunk) = next_contiguous_chunk { - // We found a suitable chunk that came contiguously after the initial too-small chunk. + // We found a suitable chunk that came contiguously after the initial too-small chunk. // We would like to merge it into the initial chunk with just the reference (since we have a cursor pointing to it already), // but we can't get a mutable reference to the element the cursor is pointing to. - // So both chunks will be removed and then merged. + // So both chunks will be removed and then merged. return allocate_from_chosen_chunk(FrameRange::new(requested_frame, requested_frame + num_frames - 1), ValueRefMut::RBTree(cursor_mut), Some(next_chunk)); } } @@ -969,7 +1026,7 @@ fn find_any_chunk( // Skip chunks that are too-small or in the designated regions. if chunk.size_in_frames() < num_frames || chunk.typ() != MemoryRegionType::Free { continue; - } + } else { return allocate_from_chosen_chunk(FrameRange::new(*chunk.start(), *chunk.start() + num_frames - 1), ValueRefMut::Array(elem), None); } @@ -977,8 +1034,8 @@ fn find_any_chunk( } } Inner::RBTree(ref mut tree) => { - // Because we allocate new frames by peeling them off from the beginning part of a chunk, - // it's MUCH faster to start the search for free frames from higher addresses moving down. + // Because we allocate new frames by peeling them off from the beginning part of a chunk, + // it's MUCH faster to start the search for free frames from higher addresses moving down. // This results in an O(1) allocation time in the general case, until all address ranges are already in use. let mut cursor = tree.upper_bound_mut(Bound::<&FreeFrames>::Unbounded); while let Some(chunk) = cursor.get().map(|w| w.deref()) { @@ -1002,12 +1059,12 @@ fn find_any_chunk( } -/// Removes a `Frames` object from the RBTree. +/// Removes a `Frames` object from the RBTree. /// `frames_ref` is basically a wrapper over the cursor which stores the position of the frames. fn retrieve_frames_from_ref(mut frames_ref: ValueRefMut) -> Option { // Remove the chosen chunk from the free frame list. let removed_val = frames_ref.remove(); - + match removed_val { RemovedValue::Array(c) => c, RemovedValue::RBTree(option_frames) => { @@ -1016,10 +1073,10 @@ fn retrieve_frames_from_ref(mut frames_ref: ValueRefMut) -> Option, @@ -1107,7 +1164,7 @@ fn add_reserved_region_to_lists( match &mut frames_list.0 { Inner::Array(ref mut arr) => { for chunk in arr.iter().flatten() { - if let Some(_overlap) = chunk.overlap(&frames) { + if let Some(_overlap) = chunk.range().unwrap().overlap(&frames) { // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", // frames, _overlap, chunk // ); @@ -1123,7 +1180,7 @@ fn add_reserved_region_to_lists( // so we can stop looking for overlapping regions once we pass the end of the new frames to add. break; } - if let Some(_overlap) = chunk.overlap(&frames) { + if let Some(_overlap) = chunk.range().unwrap().overlap(&frames) { // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", // frames, _overlap, chunk // ); @@ -1150,25 +1207,25 @@ fn add_reserved_region_to_lists( /// The core frame allocation routine that allocates the given number of physical frames, /// optionally at the requested starting `PhysicalAddress`. -/// -/// This simply reserves a range of frames; it does not perform any memory mapping. +/// +/// This simply reserves a range of frames; it does not perform any memory mapping. /// Thus, the memory represented by the returned `AllocatedFrames` isn't directly accessible /// until you map virtual pages to them. -/// +/// /// Allocation is based on a red-black tree and is thus `O(log(n))`. /// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal. -/// +/// /// # Arguments /// * `requested_paddr`: if `Some`, the returned `AllocatedFrames` will start at the `Frame` -/// containing this `PhysicalAddress`. +/// containing this `PhysicalAddress`. /// If `None`, the first available `Frame` range will be used, starting at any random physical address. -/// * `num_frames`: the number of `Frame`s to be allocated. -/// +/// * `num_frames`: the number of `Frame`s to be allocated. +/// /// # Return /// If successful, returns a tuple of two items: /// * the frames that were allocated, and -/// * an opaque struct representing details of bookkeeping-related actions that may cause heap allocation. -/// Those actions are deferred until this returned `DeferredAllocAction` struct object is dropped, +/// * an opaque struct representing details of bookkeeping-related actions that may cause heap allocation. +/// Those actions are deferred until this returned `DeferredAllocAction` struct object is dropped, /// allowing the caller (such as the heap implementation itself) to control when heap allocation may occur. pub fn allocate_frames_deferred( requested_paddr: Option, @@ -1178,7 +1235,7 @@ pub fn allocate_frames_deferred( warn!("frame_allocator: requested an allocation of 0 frames... stupid!"); return Err("cannot allocate zero frames"); } - + if let Some(paddr) = requested_paddr { let start_frame = Frame::containing_address(paddr); let mut free_reserved_frames_list = FREE_RESERVED_FRAMES_LIST.lock(); @@ -1224,9 +1281,9 @@ pub fn allocate_frames_deferred( /// Similar to [`allocated_frames_deferred()`](fn.allocate_frames_deferred.html), -/// but accepts a size value for the allocated frames in number of bytes instead of number of frames. -/// -/// This function still allocates whole frames by rounding up the number of bytes. +/// but accepts a size value for the allocated frames in number of bytes instead of number of frames. +/// +/// This function still allocates whole frames by rounding up the number of bytes. pub fn allocate_frames_by_bytes_deferred( requested_paddr: Option, num_bytes: usize, @@ -1242,8 +1299,8 @@ pub fn allocate_frames_by_bytes_deferred( /// Allocates the given number of frames with no constraints on the starting physical address. -/// -/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. +/// +/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. pub fn allocate_frames(num_frames: usize) -> Option { allocate_frames_deferred(None, num_frames) .map(|(af, _action)| af) @@ -1251,11 +1308,11 @@ pub fn allocate_frames(num_frames: usize) -> Option { } -/// Allocates frames with no constraints on the starting physical address, -/// with a size given by the number of bytes. -/// -/// This function still allocates whole frames by rounding up the number of bytes. -/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. +/// Allocates frames with no constraints on the starting physical address, +/// with a size given by the number of bytes. +/// +/// This function still allocates whole frames by rounding up the number of bytes. +/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { allocate_frames_by_bytes_deferred(None, num_bytes) .map(|(af, _action)| af) @@ -1263,10 +1320,10 @@ pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { } -/// Allocates frames starting at the given `PhysicalAddress` with a size given in number of bytes. -/// -/// This function still allocates whole frames by rounding up the number of bytes. -/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. +/// Allocates frames starting at the given `PhysicalAddress` with a size given in number of bytes. +/// +/// This function still allocates whole frames by rounding up the number of bytes. +/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result { allocate_frames_by_bytes_deferred(Some(paddr), num_bytes) .map(|(af, _action)| af) @@ -1274,8 +1331,8 @@ pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> /// Allocates the given number of frames starting at (inclusive of) the frame containing the given `PhysicalAddress`. -/// -/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. +/// +/// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result { allocate_frames_deferred(Some(paddr), num_frames) .map(|(af, _action)| af) @@ -1319,7 +1376,7 @@ where FramesIteratorRequest::AllocateAt { requested_frame, num_frames } => { frame_alloc_request = Some((requested_frame, num_frames)); break; - } + } } } @@ -1337,10 +1394,10 @@ where /// Converts the frame allocator from using static memory (a primitive array) to dynamically-allocated memory. -/// -/// Call this function once heap allocation is available. +/// +/// Call this function once heap allocation is available. /// Calling this multiple times is unnecessary but harmless, as it will do nothing after the first invocation. -#[doc(hidden)] +#[doc(hidden)] pub fn convert_frame_allocator_to_heap_based() { FREE_GENERAL_FRAMES_LIST.lock().convert_to_heap_allocated(); FREE_RESERVED_FRAMES_LIST.lock().convert_to_heap_allocated(); @@ -1348,8 +1405,8 @@ pub fn convert_frame_allocator_to_heap_based() { RESERVED_REGIONS.lock().convert_to_heap_allocated(); } -/// A debugging function used to dump the full internal state of the frame allocator. -#[doc(hidden)] +/// A debugging function used to dump the full internal state of the frame allocator. +#[doc(hidden)] pub fn dump_frame_allocator_state() { debug!("----------------- FREE GENERAL FRAMES ---------------"); FREE_GENERAL_FRAMES_LIST.lock().iter().for_each(|e| debug!("\t {:?}", e) ); diff --git a/kernel/frame_allocator/src/static_array_rb_tree.rs b/kernel/frame_allocator/src/static_array_rb_tree.rs index 2d9d0299dd..da4d0479ee 100644 --- a/kernel/frame_allocator/src/static_array_rb_tree.rs +++ b/kernel/frame_allocator/src/static_array_rb_tree.rs @@ -89,7 +89,7 @@ impl StaticArrayRBTree { } -impl StaticArrayRBTree { +impl StaticArrayRBTree { /// Push the given `value` into this collection. /// /// If the inner collection is an array, it is pushed onto the back of the array. @@ -105,7 +105,7 @@ impl StaticArrayRBTree { return Ok(ValueRefMut::Array(elem)); } } - log::error!("Out of space in StaticArrayRBTree's inner array, failed to insert value."); + log::error!("Out of space in StaticArrayRBTree's inner array, failed to insert value. {:?}", value); Err(value) } Inner::RBTree(tree) => { @@ -176,7 +176,7 @@ impl StaticArrayRBTree { // } } - +#[derive(Debug)] pub enum RemovedValue { Array(Option), RBTree(Option>>), diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index a3161bd58e..6dbd3a4145 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -39,6 +39,7 @@ pub use page_allocator::{ pub use frame_allocator::{ AllocatedFrames, UnmappedFrames, + AllocatedFrame, allocate_frames_deferred, allocate_frames_by_bytes_deferred, allocate_frames, diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index e314ef6175..36b6369911 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -19,7 +19,7 @@ use core::{ }; use log::{error, warn, debug, trace}; use memory_structs::{Page4K, Page2M, Page1G, MemChunkSize}; -use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, PageRange, FrameRange, AllocatedPages, AllocatedFrames, UnmappedFrames}; +use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, PageRange, FrameRange, AllocatedPages, AllocatedFrames, UnmappedFrames, AllocatedFrame}; use crate::paging::{ get_current_p4, table::{P4, UPCOMING_P4, Table, Level4}, @@ -241,14 +241,14 @@ impl Mapper { } } MemChunkSize::Huge2M => { - if pages_count * 512 != frames_count { + if pages_count != frames_count { error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", pages, pages_count, frames.borrow(), frames_count ); return Err("map_allocated_pages_to(): page count must equal frame count"); } // Temporarily define a custom step over the page range until correct behaviour is implemented for huge pages - for (page, frame) in pages.range_2mb().clone().into_iter().zip(frames.borrow().into_iter().step_by(512)) { + for (page, frame) in pages.range_2mb().clone().into_iter().zip(frames.borrow().into_iter() /*into_iter().step_by(512)*/) { actual_flags = actual_flags.huge(true); let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); let p2 = p3.next_table_create(page.p3_index(), higher_level_flags); @@ -258,7 +258,8 @@ impl Mapper { return Err("map_allocated_pages_to(): page was already in use"); } - p2[page.p2_index()].set_entry(frame, actual_flags); + // let af = Frame::::from(*frame.start()); + p2[page.p2_index()].set_entry(frame ,actual_flags); } } MemChunkSize::Huge1G => { diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index 20181c16eb..aa265b983c 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -3,7 +3,7 @@ //! The types of interest are divided into three categories: //! 1. addresses: `VirtualAddress` and `PhysicalAddress`. //! 2. "chunk" types: `Page` and `Frame`. -//! 3. ranges of chunks: `PageRange` and `FrameRange`. +//! 3. ranges of chunks: `PageRange` and `FrameRange`. #![no_std] #![feature(step_trait)] @@ -100,9 +100,9 @@ macro_rules! implement_address { #[doc = "A " $desc " memory address, which is a `usize` under the hood."] #[derive( - Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, - Binary, Octal, LowerHex, UpperHex, - BitAnd, BitOr, BitXor, BitAndAssign, BitOrAssign, BitXorAssign, + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, + Binary, Octal, LowerHex, UpperHex, + BitAnd, BitOr, BitXor, BitAndAssign, BitOrAssign, BitXorAssign, Add, Sub, AddAssign, SubAssign, FromBytes, )] @@ -111,7 +111,7 @@ macro_rules! implement_address { impl $TypeName { #[doc = "Creates a new `" $TypeName "`, returning an error if the address is not canonical.\n\n \ - This is useful for checking whether an address is valid before using it. + This is useful for checking whether an address is valid before using it. For example, on x86_64, virtual addresses are canonical if their upper bits `(64:48]` are sign-extended from bit 47, and physical addresses are canonical if their upper bits `(64:52]` are 0."] @@ -471,21 +471,30 @@ macro_rules! implement_page_frame { } } impl From<$TypeName> for $TypeName { - fn from(p: $TypeName) -> Self { - Self { + fn from(p: $TypeName) -> Self { + Self { number: p.number, size: PhantomData } } } impl From<$TypeName> for $TypeName { - fn from(p: $TypeName) -> Self { - Self { + fn from(p: $TypeName) -> Self { + Self { number: p.number, size: PhantomData } } } + + impl $TypeName

{ + pub fn convert_to_4k(&self) -> $TypeName { + $TypeName { + number: self.number, + size: PhantomData + } + } + } } }; } @@ -526,16 +535,16 @@ impl Page

{ macro_rules! implement_page_frame_range { ($TypeName:ident, $desc:literal, $short:ident, $chunk:ident, $address:ident) => { paste! { // using the paste crate's macro for easy concatenation - + #[doc = "A range of [`" $chunk "`]s that are contiguous in " $desc " memory."] #[derive(Clone, PartialEq, Eq)] pub struct $TypeName(RangeInclusive<$chunk::

>); impl $TypeName { - #[doc = "Creates a `" $TypeName "` that will always yield `None` when iterated."] - pub const fn empty() -> Self { - Self::new($chunk { number: 1, size: PhantomData }, $chunk { number: 0, size: PhantomData }) - } + // #[doc = "Creates a `" $TypeName "` that will always yield `None` when iterated."] + // pub const fn empty() -> Self { + // Self::new($chunk { number: 1, size: PhantomData }, $chunk { number: 0, size: PhantomData }) + // } #[doc = "A convenience method for creating a new `" $TypeName "` that spans \ all [`" $chunk "`]s from the given [`" $address "`] to an end bound based on the given size."] @@ -552,7 +561,24 @@ macro_rules! implement_page_frame_range { } } } + impl $TypeName { + #[doc = "Creates a `" $TypeName "` that will always yield `None` when iterated."] + pub const fn empty_2mb() -> Self { + Self::new($chunk { number: 1, size: PhantomData }, $chunk { number: 0, size: PhantomData }) + } + } + impl $TypeName { + #[doc = "Creates a `" $TypeName "` that will always yield `None` when iterated."] + pub const fn empty_1gb() -> Self { + Self::new($chunk { number: 1, size: PhantomData }, $chunk { number: 0, size: PhantomData }) + } + } impl $TypeName

{ + #[doc = "Creates a `" $TypeName "` that will always yield `None` when iterated."] + pub const fn empty() -> Self { + Self::new($chunk { number: 1, size: PhantomData }, $chunk { number: 0, size: PhantomData }) + } + #[doc = "Creates a new range of [`" $chunk "`]s that spans from `start` to `end`, both inclusive bounds."] pub const fn new(start: $chunk

, end: $chunk

) -> $TypeName

{ $TypeName(RangeInclusive::new(start, end)) @@ -669,7 +695,7 @@ macro_rules! implement_page_frame_range { } } - + #[doc = "A `" $TypeName "` that implements `Copy`."] #[derive(Clone, Copy)] pub struct [] { @@ -722,6 +748,233 @@ macro_rules! implement_page_frame_range { } } } + + /// An enum used to wrap the generic $TypeName variants corresponding to different page sizes. + /// Additional methods corresponding to $TypeName methods are provided in order to destructure the enum variants. + #[derive(Debug, Clone, PartialEq, Eq)] + pub enum [<$TypeName Sized>] { + Normal4KiB($TypeName), + Huge2MiB($TypeName), + Huge1GiB($TypeName), + } + + // These methods mostly destructure the enum in order to call internal methods + impl [<$TypeName Sized>] { + /// Get the size of the pages for the contained $TypeName + pub fn page_size(&self) -> MemChunkSize { + match self { + Self::Normal4KiB(pr) => { + pr.start().page_size() + } + Self::Huge2MiB(pr) => { + pr.start().page_size() + } + Self::Huge1GiB(pr) => { + pr.start().page_size() + } + } + } + + /// Returns a reference to the contained $TypeName holding 4kb pages. Returns None if called on a $TypeName holding huge pages. + pub fn range(&self) -> Option<&$TypeName> { + match self { + Self::Normal4KiB(pr) => { + Some(pr) + } + _ => { + None + } + } + } + + /// range() equivalent for 2MiB page ranges + pub fn range_2mb(&self) -> Result<$TypeName, &'static str> { + match self { + Self::Huge2MiB(pr) => { + Ok(pr.clone()) + } + _ => { + Err("Called range_2mb on a $TypeName with a size other than 2mb") + } + } + } + + /// range() equivalent for 1GiB page ranges + pub fn range_1gb(&self) -> Result<$TypeName, &'static str> { + match self { + Self::Huge1GiB(pr) => { + Ok(pr.clone()) + } + _ => { + Err("Called range_1gb on a $TypeName with a size other than 1gb") + } + } + } + + pub fn contains(&self, page: &$chunk) -> bool { + match self { + Self::Normal4KiB(pr) => { + pr.contains(page) + } + // 'page' is a 4kb chunk, so we need to perform a temporary conversion for other sizes + Self::Huge2MiB(pr) => { + let pr_4k = $TypeName::::from(pr.clone()); + pr_4k.contains(page) + } + Self::Huge1GiB(pr) => { + let pr_4k = $TypeName::::from(pr.clone()); + pr_4k.contains(page) + } + } + } + + pub const fn offset_of_address(&self, addr: $address) -> Option { + match self { + Self::Normal4KiB(pr) => { + pr.offset_of_address(addr) + } + Self::Huge2MiB(pr) => { + pr.offset_of_address(addr) + } + Self::Huge1GiB(pr) => { + pr.offset_of_address(addr) + } + } + } + + pub const fn address_at_offset(&self, offset: usize) -> Option<$address> { + match self { + Self::Normal4KiB(pr) => { + pr.address_at_offset(offset) + } + Self::Huge2MiB(pr) => { + pr.address_at_offset(offset) + } + Self::Huge1GiB(pr) => { + pr.address_at_offset(offset) + } + } + } + + /// Returns the starting `VirtualAddress` in this range of pages. + pub fn start_address(&self) -> $address { + match self { + Self::Normal4KiB(pr) => { + pr.start_address() + } + Self::Huge2MiB(pr) => { + pr.start_address() + } + Self::Huge1GiB(pr) => { + pr.start_address() + } + } + } + + /// Returns the size in bytes of this range of pages. + pub fn size_in_bytes(&self) -> usize { + match self { + Self::Normal4KiB(pr) => { + pr.size_in_bytes() + } + Self::Huge2MiB(pr) => { + pr.size_in_bytes() + } + Self::Huge1GiB(pr) => { + pr.size_in_bytes() + } + } + } + + /// Returns the size in number of pages of this range of pages. + pub fn [](&self) -> usize { + match self { + Self::Normal4KiB(pr) => { + pr.[]() + } + Self::Huge2MiB(pr) => { + pr.[]() + } + Self::Huge1GiB(pr) => { + pr.[]() + } + } + } + + /// Returns the starting `$chunk` in this range of pages. + /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + pub fn start(&self) -> &$chunk { + match self { + Self::Normal4KiB(pr) => { + pr.start() + } + _ => { + panic!("Attempt to get the start of a huge page range as a 4KiB page."); + } + } + } + + /// Returns the ending `$chunk` (inclusive) in this range of pages. + /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + pub fn end(&self) -> &$chunk { + match self { + Self::Normal4KiB(pr) => { + pr.end() + } + _ => { + panic!("Attempt to get the end of a huge page range as a 4KiB page."); + } + } + } + + pub fn start_2m(&self) -> &$chunk { + match self { + Self::Huge2MiB(pr) => { + pr.start() + } + _ => { + panic!("Attempt to get the start of a huge page range as a 4KiB page."); + } + } + } + + /// Returns the ending `$chunk` (inclusive) in this range of pages. + /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + pub fn end_2m(&self) -> &$chunk { + match self { + Self::Huge2MiB(pr) => { + pr.end() + } + _ => { + panic!("Attempt to get the end of a huge page range as a 4KiB page."); + } + } + } + + pub fn start_1g(&self) -> &$chunk { + match self { + Self::Huge1GiB(pr) => { + pr.start() + } + _ => { + panic!("Attempt to get the start of a huge page range as a 4KiB page."); + } + } + } + + /// Returns the ending `$chunk` (inclusive) in this range of pages. + /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + pub fn end_1g(&self) -> &$chunk { + match self { + Self::Huge1GiB(pr) => { + pr.end() + } + _ => { + panic!("Attempt to get the end of a huge page range as a 4KiB page."); + } + } + } + } } }; } diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 13e3b48e90..1900e73633 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -32,7 +32,7 @@ mod static_array_rb_tree; use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}}; use kernel_config::memory::*; -use memory_structs::{VirtualAddress, Page, PageRange, Page1G, Page2M, Page4K, MemChunkSize}; +use memory_structs::{VirtualAddress, Page, PageRange, Page1G, Page2M, Page4K, MemChunkSize, PageRangeSized}; use spin::{Mutex, Once}; use static_array_rb_tree::*; @@ -143,182 +143,182 @@ pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &' /// An enum used to wrap the generic PageRange variants corresponding to different page sizes. /// Additional methods corresponding to PageRange methods are provided in order to destructure the enum variants. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum PageRangeSized { - Normal4KiB(PageRange), - Huge2MiB(PageRange), - Huge1GiB(PageRange), -} - -// These methods mostly destructure the enum in order to call internal methods -impl PageRangeSized { - /// Get the size of the pages for the contained PageRange - pub fn page_size(&self) -> MemChunkSize { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.start().page_size() - } - PageRangeSized::Huge2MiB(pr) => { - pr.start().page_size() - } - PageRangeSized::Huge1GiB(pr) => { - pr.start().page_size() - } - } - } - - /// Returns a reference to the contained PageRange holding 4kb pages. Returns None if called on a PageRange holding huge pages. - pub fn range(&self) -> Option<&PageRange> { - match self { - PageRangeSized::Normal4KiB(pr) => { - Some(pr) - } - _ => { - None - } - } - } +// #[derive(Debug, Clone, PartialEq, Eq)] +// pub enum PageRangeSized { +// Normal4KiB(PageRange), +// Huge2MiB(PageRange), +// Huge1GiB(PageRange), +// } + +// // These methods mostly destructure the enum in order to call internal methods +// impl PageRangeSized { +// /// Get the size of the pages for the contained PageRange +// pub fn page_size(&self) -> MemChunkSize { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.start().page_size() +// } +// PageRangeSized::Huge2MiB(pr) => { +// pr.start().page_size() +// } +// PageRangeSized::Huge1GiB(pr) => { +// pr.start().page_size() +// } +// } +// } + +// /// Returns a reference to the contained PageRange holding 4kb pages. Returns None if called on a PageRange holding huge pages. +// pub fn range(&self) -> Option<&PageRange> { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// Some(pr) +// } +// _ => { +// None +// } +// } +// } - /// range() equivalent for 2MiB page ranges - pub fn range_2mb(&self) -> Result, &'static str> { - match self { - PageRangeSized::Huge2MiB(pr) => { - Ok(pr.clone()) - } - _ => { - Err("Called range_2mb on a PageRange with a size other than 2mb") - } - } - } - - /// range() equivalent for 1GiB page ranges - pub fn range_1gb(&self) -> Result, &'static str> { - match self { - PageRangeSized::Huge1GiB(pr) => { - Ok(pr.clone()) - } - _ => { - Err("Called range_1gb on a PageRange with a size other than 1gb") - } - } - } - - pub fn contains(&self, page: &Page) -> bool { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.contains(page) - } - // page is a Page, so we need to perform a temporary conversion for other sizes - PageRangeSized::Huge2MiB(pr) => { - let pr_4k = PageRange::::from(pr.clone()); - pr_4k.contains(page) - } - PageRangeSized::Huge1GiB(pr) => { - let pr_4k = PageRange::::from(pr.clone()); - pr_4k.contains(page) - } - } - } - - pub const fn offset_of_address(&self, addr: VirtualAddress) -> Option { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.offset_of_address(addr) - } - PageRangeSized::Huge2MiB(pr) => { - pr.offset_of_address(addr) - } - PageRangeSized::Huge1GiB(pr) => { - pr.offset_of_address(addr) - } - } - } - - pub const fn address_at_offset(&self, offset: usize) -> Option { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.address_at_offset(offset) - } - PageRangeSized::Huge2MiB(pr) => { - pr.address_at_offset(offset) - } - PageRangeSized::Huge1GiB(pr) => { - pr.address_at_offset(offset) - } - } - } +// /// range() equivalent for 2MiB page ranges +// pub fn range_2mb(&self) -> Result, &'static str> { +// match self { +// PageRangeSized::Huge2MiB(pr) => { +// Ok(pr.clone()) +// } +// _ => { +// Err("Called range_2mb on a PageRange with a size other than 2mb") +// } +// } +// } + +// /// range() equivalent for 1GiB page ranges +// pub fn range_1gb(&self) -> Result, &'static str> { +// match self { +// PageRangeSized::Huge1GiB(pr) => { +// Ok(pr.clone()) +// } +// _ => { +// Err("Called range_1gb on a PageRange with a size other than 1gb") +// } +// } +// } + +// pub fn contains(&self, page: &Page) -> bool { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.contains(page) +// } +// // page is a Page, so we need to perform a temporary conversion for other sizes +// PageRangeSized::Huge2MiB(pr) => { +// let pr_4k = PageRange::::from(pr.clone()); +// pr_4k.contains(page) +// } +// PageRangeSized::Huge1GiB(pr) => { +// let pr_4k = PageRange::::from(pr.clone()); +// pr_4k.contains(page) +// } +// } +// } + +// pub const fn offset_of_address(&self, addr: VirtualAddress) -> Option { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.offset_of_address(addr) +// } +// PageRangeSized::Huge2MiB(pr) => { +// pr.offset_of_address(addr) +// } +// PageRangeSized::Huge1GiB(pr) => { +// pr.offset_of_address(addr) +// } +// } +// } + +// pub const fn address_at_offset(&self, offset: usize) -> Option { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.address_at_offset(offset) +// } +// PageRangeSized::Huge2MiB(pr) => { +// pr.address_at_offset(offset) +// } +// PageRangeSized::Huge1GiB(pr) => { +// pr.address_at_offset(offset) +// } +// } +// } - /// Returns the starting `VirtualAddress` in this range of pages. - pub fn start_address(&self) -> VirtualAddress { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.start_address() - } - PageRangeSized::Huge2MiB(pr) => { - pr.start_address() - } - PageRangeSized::Huge1GiB(pr) => { - pr.start_address() - } - } - } - - /// Returns the size in bytes of this range of pages. - pub fn size_in_bytes(&self) -> usize { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.size_in_bytes() - } - PageRangeSized::Huge2MiB(pr) => { - pr.size_in_bytes() - } - PageRangeSized::Huge1GiB(pr) => { - pr.size_in_bytes() - } - } - } - - /// Returns the size in number of pages of this range of pages. - pub fn size_in_pages(&self) -> usize { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.size_in_pages() - } - PageRangeSized::Huge2MiB(pr) => { - pr.size_in_pages() - } - PageRangeSized::Huge1GiB(pr) => { - pr.size_in_pages() - } - } - } - - /// Returns the starting `Page` in this range of pages. - /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. - pub fn start(&self) -> &Page { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.start() - } - _ => { - panic!("Attempt to get the start of a huge page range as a 4KiB page."); - } - } - } - - /// Returns the ending `Page` (inclusive) in this range of pages. - /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. - pub fn end(&self) -> &Page { - match self { - PageRangeSized::Normal4KiB(pr) => { - pr.end() - } - _ => { - panic!("Attempt to get the end of a huge page range as a 4KiB page."); - } - } - } -} +// /// Returns the starting `VirtualAddress` in this range of pages. +// pub fn start_address(&self) -> VirtualAddress { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.start_address() +// } +// PageRangeSized::Huge2MiB(pr) => { +// pr.start_address() +// } +// PageRangeSized::Huge1GiB(pr) => { +// pr.start_address() +// } +// } +// } + +// /// Returns the size in bytes of this range of pages. +// pub fn size_in_bytes(&self) -> usize { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.size_in_bytes() +// } +// PageRangeSized::Huge2MiB(pr) => { +// pr.size_in_bytes() +// } +// PageRangeSized::Huge1GiB(pr) => { +// pr.size_in_bytes() +// } +// } +// } + +// /// Returns the size in number of pages of this range of pages. +// pub fn size_in_pages(&self) -> usize { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.size_in_pages() +// } +// PageRangeSized::Huge2MiB(pr) => { +// pr.size_in_pages() +// } +// PageRangeSized::Huge1GiB(pr) => { +// pr.size_in_pages() +// } +// } +// } + +// /// Returns the starting `Page` in this range of pages. +// /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. +// pub fn start(&self) -> &Page { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.start() +// } +// _ => { +// panic!("Attempt to get the start of a huge page range as a 4KiB page."); +// } +// } +// } + +// /// Returns the ending `Page` (inclusive) in this range of pages. +// /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. +// pub fn end(&self) -> &Page { +// match self { +// PageRangeSized::Normal4KiB(pr) => { +// pr.end() +// } +// _ => { +// panic!("Attempt to get the end of a huge page range as a 4KiB page."); +// } +// } +// } +// } /// A range of contiguous pages. /// @@ -1136,6 +1136,45 @@ pub fn allocate_pages_by_bytes_in_range( .map(|(ap, _action)| ap) } +/// Allocates the given number of 2MB huge pages with no constraints on the starting virtual address. +/// +/// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. +pub fn allocate_2mb_pages(num_pages: usize) -> Option { + let huge_num_pages = num_pages * 512; + let ap = allocate_pages_deferred(AllocationRequest::AlignedTo { alignment_4k_pages: 512 }, huge_num_pages) + .map(|(ap, _action)| ap) + .ok(); + match ap { + None => { + None + } + Some(mut p) => { // Since this function converts *this* AllocatedPages, it needs to be + // mutable + p.to_2mb_allocated_pages(); + Some(p) + } + } +} + +/// Allocates the given number of 1GB huge pages with no constraints on the starting virtual address. +/// +/// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. +pub fn allocate_1gb_pages(num_pages: usize) -> Option { + let huge_num_pages = num_pages * 512 * 512; + let ap = allocate_pages_deferred(AllocationRequest::AlignedTo { alignment_4k_pages: 512 * 512 }, huge_num_pages) + .map(|(ap, _action)| ap) + .ok(); + match ap { + None => { + None + } + Some(mut p) => { // Since this function converts *this* AllocatedPages, it needs to be + // mutable + p.to_1gb_allocated_pages(); + Some(p) + } + } +} /// Converts the page allocator from using static memory (a primitive array) to dynamically-allocated memory. /// diff --git a/libs/range_inclusive/src/lib.rs b/libs/range_inclusive/src/lib.rs index 9b5a83cd73..77f56cb6e4 100644 --- a/libs/range_inclusive/src/lib.rs +++ b/libs/range_inclusive/src/lib.rs @@ -112,6 +112,12 @@ pub struct RangeInclusiveIterator { end: Idx } +impl RangeInclusiveIterator { + pub fn current(&self) -> &A { + &self.current + } +} + impl Iterator for RangeInclusiveIterator { type Item = A; From 4bb17e96bddc4d0fd3297fb67ff13f0ca4400c40 Mon Sep 17 00:00:00 2001 From: NIMogen Date: Sun, 24 Sep 2023 14:33:56 -0700 Subject: [PATCH 4/9] clean up incidental changes --- kernel/nano_core/linker_higher_half-x86_64.ld | 1 - kernel/page_allocator/src/lib.rs | 183 +----------------- libs/range_inclusive/src/lib.rs | 6 - 3 files changed, 1 insertion(+), 189 deletions(-) diff --git a/kernel/nano_core/linker_higher_half-x86_64.ld b/kernel/nano_core/linker_higher_half-x86_64.ld index ffc40db86c..c0635f63bb 100644 --- a/kernel/nano_core/linker_higher_half-x86_64.ld +++ b/kernel/nano_core/linker_higher_half-x86_64.ld @@ -113,7 +113,6 @@ SECTIONS { } } - /* These definitions must be placed at the end of the file so that the .cls, .tdata, and .tbss sections are placed in the correct order. */ __THESEUS_CLS_SIZE = SIZEOF(.cls); diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 1900e73633..df5fd54223 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -30,14 +30,12 @@ use intrusive_collections::Bound; mod static_array_rb_tree; // mod static_array_linked_list; -use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}}; +use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}, convert::TryFrom}; use kernel_config::memory::*; use memory_structs::{VirtualAddress, Page, PageRange, Page1G, Page2M, Page4K, MemChunkSize, PageRangeSized}; use spin::{Mutex, Once}; use static_array_rb_tree::*; -use core::convert::TryFrom; - /// Certain regions are pre-designated for special usage, specifically the kernel's initial identity mapping. /// They will be allocated from if an address within them is specifically @@ -141,185 +139,6 @@ pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &' Ok(()) } -/// An enum used to wrap the generic PageRange variants corresponding to different page sizes. -/// Additional methods corresponding to PageRange methods are provided in order to destructure the enum variants. -// #[derive(Debug, Clone, PartialEq, Eq)] -// pub enum PageRangeSized { -// Normal4KiB(PageRange), -// Huge2MiB(PageRange), -// Huge1GiB(PageRange), -// } - -// // These methods mostly destructure the enum in order to call internal methods -// impl PageRangeSized { -// /// Get the size of the pages for the contained PageRange -// pub fn page_size(&self) -> MemChunkSize { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.start().page_size() -// } -// PageRangeSized::Huge2MiB(pr) => { -// pr.start().page_size() -// } -// PageRangeSized::Huge1GiB(pr) => { -// pr.start().page_size() -// } -// } -// } - -// /// Returns a reference to the contained PageRange holding 4kb pages. Returns None if called on a PageRange holding huge pages. -// pub fn range(&self) -> Option<&PageRange> { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// Some(pr) -// } -// _ => { -// None -// } -// } -// } - -// /// range() equivalent for 2MiB page ranges -// pub fn range_2mb(&self) -> Result, &'static str> { -// match self { -// PageRangeSized::Huge2MiB(pr) => { -// Ok(pr.clone()) -// } -// _ => { -// Err("Called range_2mb on a PageRange with a size other than 2mb") -// } -// } -// } - -// /// range() equivalent for 1GiB page ranges -// pub fn range_1gb(&self) -> Result, &'static str> { -// match self { -// PageRangeSized::Huge1GiB(pr) => { -// Ok(pr.clone()) -// } -// _ => { -// Err("Called range_1gb on a PageRange with a size other than 1gb") -// } -// } -// } - -// pub fn contains(&self, page: &Page) -> bool { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.contains(page) -// } -// // page is a Page, so we need to perform a temporary conversion for other sizes -// PageRangeSized::Huge2MiB(pr) => { -// let pr_4k = PageRange::::from(pr.clone()); -// pr_4k.contains(page) -// } -// PageRangeSized::Huge1GiB(pr) => { -// let pr_4k = PageRange::::from(pr.clone()); -// pr_4k.contains(page) -// } -// } -// } - -// pub const fn offset_of_address(&self, addr: VirtualAddress) -> Option { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.offset_of_address(addr) -// } -// PageRangeSized::Huge2MiB(pr) => { -// pr.offset_of_address(addr) -// } -// PageRangeSized::Huge1GiB(pr) => { -// pr.offset_of_address(addr) -// } -// } -// } - -// pub const fn address_at_offset(&self, offset: usize) -> Option { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.address_at_offset(offset) -// } -// PageRangeSized::Huge2MiB(pr) => { -// pr.address_at_offset(offset) -// } -// PageRangeSized::Huge1GiB(pr) => { -// pr.address_at_offset(offset) -// } -// } -// } - -// /// Returns the starting `VirtualAddress` in this range of pages. -// pub fn start_address(&self) -> VirtualAddress { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.start_address() -// } -// PageRangeSized::Huge2MiB(pr) => { -// pr.start_address() -// } -// PageRangeSized::Huge1GiB(pr) => { -// pr.start_address() -// } -// } -// } - -// /// Returns the size in bytes of this range of pages. -// pub fn size_in_bytes(&self) -> usize { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.size_in_bytes() -// } -// PageRangeSized::Huge2MiB(pr) => { -// pr.size_in_bytes() -// } -// PageRangeSized::Huge1GiB(pr) => { -// pr.size_in_bytes() -// } -// } -// } - -// /// Returns the size in number of pages of this range of pages. -// pub fn size_in_pages(&self) -> usize { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.size_in_pages() -// } -// PageRangeSized::Huge2MiB(pr) => { -// pr.size_in_pages() -// } -// PageRangeSized::Huge1GiB(pr) => { -// pr.size_in_pages() -// } -// } -// } - -// /// Returns the starting `Page` in this range of pages. -// /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. -// pub fn start(&self) -> &Page { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.start() -// } -// _ => { -// panic!("Attempt to get the start of a huge page range as a 4KiB page."); -// } -// } -// } - -// /// Returns the ending `Page` (inclusive) in this range of pages. -// /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. -// pub fn end(&self) -> &Page { -// match self { -// PageRangeSized::Normal4KiB(pr) => { -// pr.end() -// } -// _ => { -// panic!("Attempt to get the end of a huge page range as a 4KiB page."); -// } -// } -// } -// } - /// A range of contiguous pages. /// /// # Ordering and Equality diff --git a/libs/range_inclusive/src/lib.rs b/libs/range_inclusive/src/lib.rs index 77f56cb6e4..9b5a83cd73 100644 --- a/libs/range_inclusive/src/lib.rs +++ b/libs/range_inclusive/src/lib.rs @@ -112,12 +112,6 @@ pub struct RangeInclusiveIterator { end: Idx } -impl RangeInclusiveIterator { - pub fn current(&self) -> &A { - &self.current - } -} - impl Iterator for RangeInclusiveIterator { type Item = A; From 51501b1c6135c4bf5eb864aa1dc4f6d52a34dc35 Mon Sep 17 00:00:00 2001 From: NIMogen Date: Sun, 24 Sep 2023 14:49:23 -0700 Subject: [PATCH 5/9] added docstrings --- kernel/memory_structs/src/lib.rs | 36 +++++++++++++++----------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index aa265b983c..ca65807ffd 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -535,7 +535,6 @@ impl Page

{ macro_rules! implement_page_frame_range { ($TypeName:ident, $desc:literal, $short:ident, $chunk:ident, $address:ident) => { paste! { // using the paste crate's macro for easy concatenation - #[doc = "A range of [`" $chunk "`]s that are contiguous in " $desc " memory."] #[derive(Clone, PartialEq, Eq)] pub struct $TypeName(RangeInclusive<$chunk::

>); @@ -749,8 +748,8 @@ macro_rules! implement_page_frame_range { } } - /// An enum used to wrap the generic $TypeName variants corresponding to different page sizes. - /// Additional methods corresponding to $TypeName methods are provided in order to destructure the enum variants. + #[doc = "An enum used to wrap the generic `" $TypeName "` variants corresponding to different `" $chunk "` sizes. \ + Additional methods are provided in order to destructure the enum variants."] #[derive(Debug, Clone, PartialEq, Eq)] pub enum [<$TypeName Sized>] { Normal4KiB($TypeName), @@ -758,9 +757,8 @@ macro_rules! implement_page_frame_range { Huge1GiB($TypeName), } - // These methods mostly destructure the enum in order to call internal methods impl [<$TypeName Sized>] { - /// Get the size of the pages for the contained $TypeName + #[doc = "Get the size of the pages for the contained `" $TypeName"` ." pub fn page_size(&self) -> MemChunkSize { match self { Self::Normal4KiB(pr) => { @@ -775,7 +773,7 @@ macro_rules! implement_page_frame_range { } } - /// Returns a reference to the contained $TypeName holding 4kb pages. Returns None if called on a $TypeName holding huge pages. + #[doc = "Returns a reference to the contained `" $TypeName "` holding 4kb `" $chunk "`s. Returns None if called on a `" $TypeName "` holding huge pages."] pub fn range(&self) -> Option<&$TypeName> { match self { Self::Normal4KiB(pr) => { @@ -787,7 +785,7 @@ macro_rules! implement_page_frame_range { } } - /// range() equivalent for 2MiB page ranges + #[doc = "range() equivalent for 2MiB memory ranges"] pub fn range_2mb(&self) -> Result<$TypeName, &'static str> { match self { Self::Huge2MiB(pr) => { @@ -799,7 +797,7 @@ macro_rules! implement_page_frame_range { } } - /// range() equivalent for 1GiB page ranges + #[doc = "range() equivalent for 1GiB memory ranges"] pub fn range_1gb(&self) -> Result<$TypeName, &'static str> { match self { Self::Huge1GiB(pr) => { @@ -856,7 +854,7 @@ macro_rules! implement_page_frame_range { } } - /// Returns the starting `VirtualAddress` in this range of pages. + #[doc = "Returns the starting `" $address "` in this range."] pub fn start_address(&self) -> $address { match self { Self::Normal4KiB(pr) => { @@ -871,7 +869,7 @@ macro_rules! implement_page_frame_range { } } - /// Returns the size in bytes of this range of pages. + #[doc = "Returns the size in bytes of this range."] pub fn size_in_bytes(&self) -> usize { match self { Self::Normal4KiB(pr) => { @@ -886,7 +884,7 @@ macro_rules! implement_page_frame_range { } } - /// Returns the size in number of pages of this range of pages. + #[doc = "Returns the size, in number of `" $chunk "`s, of this range."] pub fn [](&self) -> usize { match self { Self::Normal4KiB(pr) => { @@ -901,8 +899,8 @@ macro_rules! implement_page_frame_range { } } - /// Returns the starting `$chunk` in this range of pages. - /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + #[doc = "Returns the starting `" $chunk" ` in this range. TODO: Find an alternative to panic when called on wrong size." + // TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. pub fn start(&self) -> &$chunk { match self { Self::Normal4KiB(pr) => { @@ -914,8 +912,8 @@ macro_rules! implement_page_frame_range { } } - /// Returns the ending `$chunk` (inclusive) in this range of pages. - /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + #[doc = "Returns the ending `" $chunk "` (inclusive) in this range. TODO: Find an alternative to panic when called on wrong size."] + // TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. pub fn end(&self) -> &$chunk { match self { Self::Normal4KiB(pr) => { @@ -927,6 +925,7 @@ macro_rules! implement_page_frame_range { } } + #[doc = "start() equivalent for 2mb `" $TypeName "`s. TODO: Find an alternative to panic when called on wrong size."] pub fn start_2m(&self) -> &$chunk { match self { Self::Huge2MiB(pr) => { @@ -938,8 +937,7 @@ macro_rules! implement_page_frame_range { } } - /// Returns the ending `$chunk` (inclusive) in this range of pages. - /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + #[doc = "start() equivalent for 2mb `" $TypeName "`s. TODO: Find an alternative to panic when called on wrong size."] pub fn end_2m(&self) -> &$chunk { match self { Self::Huge2MiB(pr) => { @@ -951,6 +949,7 @@ macro_rules! implement_page_frame_range { } } + #[doc = "start() equivalent for 1gb `" $TypeName "`s. TODO: Find an alternative to panic when called on wrong size."] pub fn start_1g(&self) -> &$chunk { match self { Self::Huge1GiB(pr) => { @@ -962,8 +961,7 @@ macro_rules! implement_page_frame_range { } } - /// Returns the ending `$chunk` (inclusive) in this range of pages. - /// TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. + #[doc = "start() equivalent for 1gb `" $TypeName "`s. TODO: Find an alternative to panic when called on wrong size."] pub fn end_1g(&self) -> &$chunk { match self { Self::Huge1GiB(pr) => { From 608a09922863939fce76a970191c17e1a128c6ee Mon Sep 17 00:00:00 2001 From: NIMogen Date: Sun, 24 Sep 2023 14:49:56 -0700 Subject: [PATCH 6/9] comment deletions --- kernel/memory_structs/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index ca65807ffd..cdac01f92b 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -900,7 +900,6 @@ macro_rules! implement_page_frame_range { } #[doc = "Returns the starting `" $chunk" ` in this range. TODO: Find an alternative to panic when called on wrong size." - // TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. pub fn start(&self) -> &$chunk { match self { Self::Normal4KiB(pr) => { @@ -913,7 +912,6 @@ macro_rules! implement_page_frame_range { } #[doc = "Returns the ending `" $chunk "` (inclusive) in this range. TODO: Find an alternative to panic when called on wrong size."] - // TODO: This function panics if called on a huge page of any size, and it really shouldn't. Use an alternative method for handling this case. pub fn end(&self) -> &$chunk { match self { Self::Normal4KiB(pr) => { From af52fd35a135f889c05f6ce756c1c11ad242bbe7 Mon Sep 17 00:00:00 2001 From: NIMogen Date: Sun, 24 Sep 2023 14:53:43 -0700 Subject: [PATCH 7/9] removed leftover debugging changes --- .../src/static_array_rb_tree.rs | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/kernel/frame_allocator/src/static_array_rb_tree.rs b/kernel/frame_allocator/src/static_array_rb_tree.rs index da4d0479ee..70c5c8c077 100644 --- a/kernel/frame_allocator/src/static_array_rb_tree.rs +++ b/kernel/frame_allocator/src/static_array_rb_tree.rs @@ -51,10 +51,10 @@ impl Wrapper { /// A convenience wrapper that abstracts either an intrustive `RBTree` or a primitive array `[T; N]`. -/// -/// This allows the caller to create an array statically in a const context, -/// and then abstract over both that and the inner `RBTree` when using it. -/// +/// +/// This allows the caller to create an array statically in a const context, +/// and then abstract over both that and the inner `RBTree` when using it. +/// /// TODO: use const generics to allow this to be of any arbitrary size beyond 32 elements. pub struct StaticArrayRBTree(pub(crate) Inner); @@ -89,7 +89,7 @@ impl StaticArrayRBTree { } -impl StaticArrayRBTree { +impl StaticArrayRBTree { /// Push the given `value` into this collection. /// /// If the inner collection is an array, it is pushed onto the back of the array. @@ -105,7 +105,7 @@ impl StaticArrayRBTree { return Ok(ValueRefMut::Array(elem)); } } - log::error!("Out of space in StaticArrayRBTree's inner array, failed to insert value. {:?}", value); + log::error!("Out of space in StaticArrayRBTree's inner array, failed to insert value."); Err(value) } Inner::RBTree(tree) => { @@ -118,8 +118,8 @@ impl StaticArrayRBTree { /// Converts the contained collection from a primitive array into a RBTree. /// If the contained collection is already using heap allocation, this is a no-op. - /// - /// Call this function once heap allocation is available. + /// + /// Call this function once heap allocation is available. pub fn convert_to_heap_allocated(&mut self) { let new_tree = match &mut self.0 { Inner::Array(arr) => { @@ -136,9 +136,9 @@ impl StaticArrayRBTree { *self = StaticArrayRBTree(Inner::RBTree(new_tree)); } - /// Returns the number of elements in this collection. + /// Returns the number of elements in this collection. /// - /// Note that this an O(N) linear-time operation, not an O(1) constant-time operation. + /// Note that this an O(N) linear-time operation, not an O(1) constant-time operation. /// This is because the internal collection types do not separately maintain their current length. pub fn len(&self) -> usize { match &self.0 { @@ -176,7 +176,6 @@ impl StaticArrayRBTree { // } } -#[derive(Debug)] pub enum RemovedValue { Array(Option), RBTree(Option>>), @@ -191,13 +190,13 @@ impl RemovedValue { } } -/// A mutable reference to a value in the `StaticArrayRBTree`. +/// A mutable reference to a value in the `StaticArrayRBTree`. pub enum ValueRefMut<'list, T: Ord> { Array(&'list mut Option), RBTree(CursorMut<'list, WrapperAdapter>), } impl <'list, T: Ord> ValueRefMut<'list, T> { - /// Removes this value from the collection and returns the removed value, if one existed. + /// Removes this value from the collection and returns the removed value, if one existed. pub fn remove(&mut self) -> RemovedValue { match self { Self::Array(ref mut arr_ref) => { @@ -210,9 +209,9 @@ impl <'list, T: Ord> ValueRefMut<'list, T> { } - /// Removes this value from the collection and replaces it with the given `new_value`. + /// Removes this value from the collection and replaces it with the given `new_value`. /// - /// Returns the removed value, if one existed. If not, the + /// Returns the removed value, if one existed. If not, the #[allow(dead_code)] pub fn replace_with(&mut self, new_value: T) -> Result, T> { match self { @@ -234,4 +233,4 @@ impl <'list, T: Ord> ValueRefMut<'list, T> { Self::RBTree(ref cursor_mut) => cursor_mut.get().map(|w| w.deref()), } } -} \ No newline at end of file +} From 813b44689587454fa9ee8d656b73aa1d92d32dbc Mon Sep 17 00:00:00 2001 From: NIMogen Date: Sun, 24 Sep 2023 14:58:29 -0700 Subject: [PATCH 8/9] restore whitespace --- .../src/static_array_rb_tree.rs | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/kernel/frame_allocator/src/static_array_rb_tree.rs b/kernel/frame_allocator/src/static_array_rb_tree.rs index 70c5c8c077..2afadf0a08 100644 --- a/kernel/frame_allocator/src/static_array_rb_tree.rs +++ b/kernel/frame_allocator/src/static_array_rb_tree.rs @@ -51,10 +51,10 @@ impl Wrapper { /// A convenience wrapper that abstracts either an intrustive `RBTree` or a primitive array `[T; N]`. -/// -/// This allows the caller to create an array statically in a const context, -/// and then abstract over both that and the inner `RBTree` when using it. -/// +/// +/// This allows the caller to create an array statically in a const context, +/// and then abstract over both that and the inner `RBTree` when using it. +/// /// TODO: use const generics to allow this to be of any arbitrary size beyond 32 elements. pub struct StaticArrayRBTree(pub(crate) Inner); @@ -118,8 +118,8 @@ impl StaticArrayRBTree { /// Converts the contained collection from a primitive array into a RBTree. /// If the contained collection is already using heap allocation, this is a no-op. - /// - /// Call this function once heap allocation is available. + /// + /// Call this function once heap allocation is available. pub fn convert_to_heap_allocated(&mut self) { let new_tree = match &mut self.0 { Inner::Array(arr) => { @@ -136,9 +136,9 @@ impl StaticArrayRBTree { *self = StaticArrayRBTree(Inner::RBTree(new_tree)); } - /// Returns the number of elements in this collection. + /// Returns the number of elements in this collection. /// - /// Note that this an O(N) linear-time operation, not an O(1) constant-time operation. + /// Note that this an O(N) linear-time operation, not an O(1) constant-time operation. /// This is because the internal collection types do not separately maintain their current length. pub fn len(&self) -> usize { match &self.0 { @@ -176,6 +176,7 @@ impl StaticArrayRBTree { // } } + pub enum RemovedValue { Array(Option), RBTree(Option>>), @@ -190,7 +191,7 @@ impl RemovedValue { } } -/// A mutable reference to a value in the `StaticArrayRBTree`. +/// A mutable reference to a value in the `StaticArrayRBTree`. pub enum ValueRefMut<'list, T: Ord> { Array(&'list mut Option), RBTree(CursorMut<'list, WrapperAdapter>), @@ -209,9 +210,9 @@ impl <'list, T: Ord> ValueRefMut<'list, T> { } - /// Removes this value from the collection and replaces it with the given `new_value`. + /// Removes this value from the collection and replaces it with the given `new_value`. /// - /// Returns the removed value, if one existed. If not, the + /// Returns the removed value, if one existed. If not, the #[allow(dead_code)] pub fn replace_with(&mut self, new_value: T) -> Result, T> { match self { From a11623bedae30d1f0d4108e2e6138c35df2a8bf3 Mon Sep 17 00:00:00 2001 From: NIMogen Date: Sun, 24 Sep 2023 15:00:42 -0700 Subject: [PATCH 9/9] more whitespace --- kernel/frame_allocator/src/static_array_rb_tree.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/frame_allocator/src/static_array_rb_tree.rs b/kernel/frame_allocator/src/static_array_rb_tree.rs index 2afadf0a08..2d9d0299dd 100644 --- a/kernel/frame_allocator/src/static_array_rb_tree.rs +++ b/kernel/frame_allocator/src/static_array_rb_tree.rs @@ -197,7 +197,7 @@ pub enum ValueRefMut<'list, T: Ord> { RBTree(CursorMut<'list, WrapperAdapter>), } impl <'list, T: Ord> ValueRefMut<'list, T> { - /// Removes this value from the collection and returns the removed value, if one existed. + /// Removes this value from the collection and returns the removed value, if one existed. pub fn remove(&mut self) -> RemovedValue { match self { Self::Array(ref mut arr_ref) => { @@ -234,4 +234,4 @@ impl <'list, T: Ord> ValueRefMut<'list, T> { Self::RBTree(ref cursor_mut) => cursor_mut.get().map(|w| w.deref()), } } -} +} \ No newline at end of file