From 7a8d64aa3aecdae5ed1231ebf46adcceede62713 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 20 Sep 2023 03:47:21 +1000 Subject: [PATCH 01/25] Add automated QEMU-based testing to CI (#1041) * Added `qemu_test` application that automatically runs all tests and returns a specific exit code from QEMU based on test output. * Added a `test` Make target that enables and runs `qemu_test`. * Added a CI workflow that runs the `test` target * Changed `test_mlx5`, `test_ixgbe`, and `test_block_io` to return an exit code of 0 if the required devices aren't connected. * Changed `test_identity_mapping`, `test_aligned_page_allocation`, and `test_filerw` to fail rather than print if they encounter an error. * Renamed `tls_test` to `test_tls` so it is detected by `qemu_test`. * Changed `test_channel` and `test_tls` to run all tests rather than specifying specific tests in the arguments. * Renamed `test_serial_echo` to `serial_echo` because it isn't really a test with defined success and failure conditions. * Changed `test_task_cancel` to always return 0, because task cancellation is not yet implemented in the mainline. * Skip `test_channel`, as it currently does not work. Signed-off-by: Klimenty Tsoutsman --- .github/workflows/check-clippy.yaml | 6 +- .github/workflows/docs.yaml | 4 +- .github/workflows/test.yaml | 20 +++ Cargo.lock | 70 ++++++---- Makefile | 11 ++ applications/qemu_test/Cargo.toml | 13 ++ applications/qemu_test/src/lib.rs | 120 ++++++++++++++++++ .../Cargo.toml | 2 +- .../src/lib.rs | 0 .../test_aligned_page_allocation/src/lib.rs | 18 ++- applications/test_block_io/Cargo.toml | 1 + applications/test_block_io/src/lib.rs | 19 ++- applications/test_channel/src/lib.rs | 51 ++------ applications/test_filerw/src/lib.rs | 10 +- applications/test_identity_mapping/src/lib.rs | 10 +- applications/test_ixgbe/src/lib.rs | 5 +- applications/test_mlx5/src/lib.rs | 9 +- applications/test_task_cancel/src/lib.rs | 6 + .../{tls_test => test_tls}/Cargo.toml | 2 +- .../{tls_test => test_tls}/src/lib.rs | 12 +- kernel/first_application/Cargo.toml | 1 + kernel/first_application/src/lib.rs | 3 +- theseus_features/Cargo.toml | 10 +- 23 files changed, 283 insertions(+), 120 deletions(-) create mode 100644 .github/workflows/test.yaml create mode 100644 applications/qemu_test/Cargo.toml create mode 100644 applications/qemu_test/src/lib.rs rename applications/{test_serial_echo => serial_echo}/Cargo.toml (95%) rename applications/{test_serial_echo => serial_echo}/src/lib.rs (100%) rename applications/{tls_test => test_tls}/Cargo.toml (94%) rename applications/{tls_test => test_tls}/src/lib.rs (86%) diff --git a/.github/workflows/check-clippy.yaml b/.github/workflows/check-clippy.yaml index a4eb6ef4ed..6456b4ca93 100644 --- a/.github/workflows/check-clippy.yaml +++ b/.github/workflows/check-clippy.yaml @@ -11,10 +11,10 @@ jobs: run: | git submodule update --init --recursive - name: "Install nasm" - run: sudo apt install nasm + run: | + sudo apt update + sudo apt install nasm - name: "Run Clippy" - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} working-directory: . run: | make clippy ARCH=x86_64 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index d8f4764c90..edf5b21a5a 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -13,7 +13,9 @@ jobs: submodules: recursive - name: "Install nasm" - run: sudo apt install nasm + run: | + sudo apt update + sudo apt install nasm - name: Cache build artifacts uses: actions/cache@v3 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000000..b2e2f638f1 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,20 @@ +name: QEMU Test +on: + pull_request: + types: [synchronize, opened, reopened] +jobs: + run-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: "Initialize git submodules" + run: | + git submodule update --init --recursive + - name: "Install dependencies" + run: | + sudo apt update + sudo apt install make gcc nasm pkg-config grub-pc-bin mtools xorriso qemu qemu-kvm wget + - name: "Run tests" + working-directory: . + run: make test + timeout-minutes: 10 diff --git a/Cargo.lock b/Cargo.lock index 182e3e0153..65cc0e49c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1181,6 +1181,7 @@ dependencies = [ "log", "mod_mgmt", "path", + "qemu_test", "shell", "spawn", ] @@ -2837,6 +2838,23 @@ dependencies = [ "task", ] +[[package]] +name = "qemu-exit" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb0fd6580eeed0103c054e3fba2c2618ff476943762f28a645b63b8692b21c9" + +[[package]] +name = "qemu_test" +version = "0.1.0" +dependencies = [ + "app_io", + "path", + "qemu-exit", + "spawn", + "task", +] + [[package]] name = "qp-trie" version = "0.8.1" @@ -3297,6 +3315,19 @@ dependencies = [ "syn 1.0.98", ] +[[package]] +name = "serial_echo" +version = "0.1.0" +dependencies = [ + "app_io", + "core2", + "io", + "log", + "serial_port", + "sync_irq", + "task", +] + [[package]] name = "serial_port" version = "0.1.0" @@ -3894,19 +3925,6 @@ dependencies = [ "task", ] -[[package]] -name = "test_serial_echo" -version = "0.1.0" -dependencies = [ - "app_io", - "core2", - "io", - "log", - "serial_port", - "sync_irq", - "task", -] - [[package]] name = "test_std_fs" version = "0.1.0" @@ -3948,6 +3966,16 @@ dependencies = [ "task", ] +[[package]] +name = "test_tls" +version = "0.1.0" +dependencies = [ + "app_io", + "log", + "test_thread_local", + "thread_local_macro", +] + [[package]] name = "test_wait_queue" version = "0.1.0" @@ -4012,6 +4040,7 @@ dependencies = [ "date", "deps", "example", + "first_application", "heap_eval", "hello", "hull", @@ -4027,6 +4056,7 @@ dependencies = [ "print_fault_log", "ps", "pwd", + "qemu_test", "raw_mode", "rm", "rq", @@ -4034,6 +4064,7 @@ dependencies = [ "rq_eval", "scheduler_eval", "seconds_counter", + "serial_echo", "shell", "swap", "test_aligned_page_allocation", @@ -4050,15 +4081,14 @@ dependencies = [ "test_preemption_counter", "test_restartable", "test_scheduler", - "test_serial_echo", "test_std_fs", "test_sync_block", "test_task_cancel", "test_thread_local", + "test_tls", "test_wait_queue", "test_wasmtime", "theseus_std", - "tls_test", "unified_channel", "unwind_test", "upd", @@ -4127,16 +4157,6 @@ dependencies = [ "sync_irq", ] -[[package]] -name = "tls_test" -version = "0.1.0" -dependencies = [ - "app_io", - "log", - "test_thread_local", - "thread_local_macro", -] - [[package]] name = "tock-registers" version = "0.7.0" diff --git a/Makefile b/Makefile index 3dc34a2ff5..79ba925199 100644 --- a/Makefile +++ b/Makefile @@ -1079,3 +1079,14 @@ endif @sudo cp -vf $(iso) /var/lib/tftpboot/theseus/ @sudo systemctl restart isc-dhcp-server @sudo systemctl restart tftpd-hpa + +test: export override QEMU_FLAGS += -device isa-debug-exit,iobase=0xf4,iosize=0x04 +test: export override QEMU_FLAGS += -nographic +test: export override FEATURES =--features theseus_tests --features first_application/qemu_test +test: $(iso) + # We exit with an exit code of 0 if QEMU's exit code is 17, and 2 otherwise. + # This is because `qemu_test` uses a value of 0x11 to indicate success. + $(QEMU_BIN) $(QEMU_FLAGS); \ + EXIT_CODE=$$?; \ + test $$EXIT_CODE -eq 17 && exit 0; \ + exit 2 diff --git a/applications/qemu_test/Cargo.toml b/applications/qemu_test/Cargo.toml new file mode 100644 index 0000000000..a279a2eab5 --- /dev/null +++ b/applications/qemu_test/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "qemu_test" +version = "0.1.0" +authors = ["Klim Tsoutsman "] +description = "Automated test runner" +edition = "2021" + +[dependencies] +app_io = { path = "../../kernel/app_io" } +path = { path = "../../kernel/path" } +qemu-exit = "3.0.2" +spawn = { path = "../../kernel/spawn" } +task = { path = "../../kernel/task" } diff --git a/applications/qemu_test/src/lib.rs b/applications/qemu_test/src/lib.rs new file mode 100644 index 0000000000..e6a46f726f --- /dev/null +++ b/applications/qemu_test/src/lib.rs @@ -0,0 +1,120 @@ +//! An automated test runner. +//! +//! The application assumes it is running in a QEMU virtual machine and exits +//! from QEMU with different exit codes depending on whether the tests passed or +//! failed. + +#![no_std] + +use alloc::{boxed::Box, string::String, vec::Vec}; + +use app_io::{print, println}; +use path::Path; +use qemu_exit::{QEMUExit, X86}; +use task::{ExitValue, KillReason}; + +extern crate alloc; + +static QEMU_EXIT_HANDLE: X86 = X86::new(0xf4, 0x11); + +pub fn main(_: Vec) -> isize { + task::set_kill_handler(Box::new(|_| { + QEMU_EXIT_HANDLE.exit_failure(); + })) + .unwrap(); + + let dir = task::get_my_current_task() + .map(|t| t.get_namespace().dir().clone()) + .expect("couldn't get namespace dir"); + + let object_files = dir.lock().list(); + + let test_paths = object_files + .into_iter() + .filter_map(|file_name| { + if file_name.starts_with("test_") { + // We must release the lock prior to calling `get_absolute_path` to avoid + // deadlock. + let file = dir.lock().get_file(file_name.as_ref()).unwrap(); + let path = file.lock().get_absolute_path(); + Some((file_name, Path::new(path))) + } else { + None + } + }) + .collect::>(); + + let total = test_paths.len(); + println!("running {} tests", total); + + let mut num_ignored = 0; + let mut num_failed = 0; + + for (file_name, path) in test_paths.into_iter() { + print!("test {} ... ", path); + if ignore(&file_name) { + num_ignored += 1; + println!("ignored"); + } else { + match run_test(path) { + Ok(_) => println!("ok"), + Err(_) => { + num_failed += 1; + println!("failed"); + } + } + } + } + + let result_str = if num_failed > 0 { "failed" } else { "ok" }; + let num_passed = total - num_failed; + println!( + "test result: {result_str}. {num_passed} passed; {num_failed} failed; {num_ignored} \ + ignored", + ); + + if num_failed == 0 { + QEMU_EXIT_HANDLE.exit_success(); + } else { + QEMU_EXIT_HANDLE.exit_failure(); + } +} + +#[allow(clippy::result_unit_err)] +pub fn run_test(path: Path) -> Result<(), ()> { + match spawn::new_application_task_builder(path, None) + .unwrap() + .argument(Vec::new()) + .spawn() + .unwrap() + .join() + .unwrap() + { + ExitValue::Completed(status) => match status.downcast_ref::() { + Some(0) => Ok(()), + _ => Err(()), + }, + ExitValue::Killed(KillReason::Requested) => unreachable!(), + ExitValue::Killed(KillReason::Panic(_)) => Err(()), + ExitValue::Killed(KillReason::Exception(_)) => Err(()), + } +} + +fn ignore(name: &str) -> bool { + const IGNORED_TESTS: [&str; 3] = [ + // `test_libc` requires extra Make commands to run. + "test_libc", + // `test_panic` panics on success, which isn't easily translatable to + // `ExitValue::Completed(0)`. + "test_panic", + // TODO: Remove + // `test_channel` has a bug that causes deadlock. + "test_channel", + ]; + for test in IGNORED_TESTS { + if name.starts_with(test) { + return true; + } + } + false +} diff --git a/applications/test_serial_echo/Cargo.toml b/applications/serial_echo/Cargo.toml similarity index 95% rename from applications/test_serial_echo/Cargo.toml rename to applications/serial_echo/Cargo.toml index 0cd47b8944..90a169434e 100644 --- a/applications/test_serial_echo/Cargo.toml +++ b/applications/serial_echo/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "test_serial_echo" +name = "serial_echo" version = "0.1.0" authors = ["Kevin Boos "] description = "a simple app for testing serial port I/O using higher-level I/O traits" diff --git a/applications/test_serial_echo/src/lib.rs b/applications/serial_echo/src/lib.rs similarity index 100% rename from applications/test_serial_echo/src/lib.rs rename to applications/serial_echo/src/lib.rs diff --git a/applications/test_aligned_page_allocation/src/lib.rs b/applications/test_aligned_page_allocation/src/lib.rs index ebe99e7c5f..7bf4e09f8d 100644 --- a/applications/test_aligned_page_allocation/src/lib.rs +++ b/applications/test_aligned_page_allocation/src/lib.rs @@ -27,17 +27,15 @@ fn rmain() -> Result<(), &'static str> { for num_pages in TEST_SET.into_iter() { for alignment in TEST_SET.into_iter() { println!("Attempting to allocate {num_pages} pages with alignment of {alignment} 4K pages..."); - match memory::allocate_pages_deferred( - AllocationRequest::AlignedTo { alignment_4k_pages: alignment }, + let (ap, _action) = memory::allocate_pages_deferred( + AllocationRequest::AlignedTo { + alignment_4k_pages: alignment, + }, num_pages, - ) { - Ok((ap, _action)) => { - assert_eq!(ap.start().number() % alignment, 0); - assert_eq!(ap.size_in_pages(), num_pages); - println!(" Success: {ap:?}"); - } - Err(e) => println!(" !! FAILURE: {e:?}"), - } + )?; + assert_eq!(ap.start().number() % alignment, 0); + assert_eq!(ap.size_in_pages(), num_pages); + println!(" Success: {ap:?}"); } } diff --git a/applications/test_block_io/Cargo.toml b/applications/test_block_io/Cargo.toml index 16332bdd81..ec361c3528 100644 --- a/applications/test_block_io/Cargo.toml +++ b/applications/test_block_io/Cargo.toml @@ -3,6 +3,7 @@ name = "test_block_io" version = "0.1.0" authors = ["Kevin Boos "] description = "a simple app for testing IO transfers for block devices" +edition = "2021" [dependencies] core2 = { version = "0.4.0", default-features = false, features = ["alloc", "nightly"] } diff --git a/applications/test_block_io/src/lib.rs b/applications/test_block_io/src/lib.rs index 5c7891dacd..0d5a977b3d 100644 --- a/applications/test_block_io/src/lib.rs +++ b/applications/test_block_io/src/lib.rs @@ -5,28 +5,25 @@ #![no_std] extern crate alloc; -#[macro_use] extern crate log; -// #[macro_use] extern crate app_io; -extern crate task; -extern crate io; -extern crate core2; -extern crate storage_manager; -extern crate ata; - use core::ops::{DerefMut}; use alloc::boxed::Box; use alloc::vec::Vec; use alloc::string::String; +use app_io::println; use ata::AtaDrive; use io::{ByteReader, ByteReaderWrapper, ByteReaderWriterWrapper, ByteWriter, ByteWriterWrapper, Reader, ReaderWriter}; +use log::{debug, error, info, trace}; pub fn main(_args: Vec) -> isize { - - let dev = storage_manager::storage_devices().next() - .expect("no storage devices exist"); + let dev = if let Some(dev) = storage_manager::storage_devices().next() { + dev + } else { + println!("no storage devices connected"); + return 0; + }; { // Call `StorageDevice` trait methods directly diff --git a/applications/test_channel/src/lib.rs b/applications/test_channel/src/lib.rs index de3b61f92a..b93cd9a261 100644 --- a/applications/test_channel/src/lib.rs +++ b/applications/test_channel/src/lib.rs @@ -65,11 +65,6 @@ pub fn main(args: Vec) -> isize { opts.optopt("x", "panic_in_send", "Injects a panic at specified message in sender in multiple tests (default no panic)", "SEND_PANIC"); opts.optopt("y", "panic_in_receive", "Injects a panic at specified message in receiver in multiple tests (default no panic)", "RECEIVE_PANIC"); - opts.optflag("r", "rendezvous", "run the test on the rendezvous-based synchronous channel"); - opts.optflag("a", "asynchronous", "run the test on the asynchronous buffered channel"); - opts.optflag("o", "oneshot", "run the 'oneshot' test variant, in which {ITER} tasks are spawned to send/receive one message each."); - opts.optflag("m", "multiple", "run the 'multiple' test, in which one sender and one receiver task are spawned to send/receive {ITER} messages."); - let matches = match opts.parse(args) { Ok(m) => m, Err(_f) => { @@ -110,49 +105,27 @@ pub fn main(args: Vec) -> isize { } fn rmain(matches: Matches) -> Result<(), &'static str> { - let mut did_something = false; - // If the user has specified panic instances as 'val', 'send_panic_pont' will be 'Some(val)'. // Similarly for 'receive_panic_point' as well. let send_panic_point = matches.opt_str("x").and_then(|i| i.parse::().ok()); let receive_panic_point = matches.opt_str("y").and_then(|i| i.parse::().ok()); - if matches.opt_present("r") { - if matches.opt_present("o") { - did_something = true; - println!("Running rendezvous channel test in oneshot mode."); - for _i in 0 .. iterations!() { - rendezvous_test_oneshot()?; - } - } - if matches.opt_present("m") { - did_something = true; - println!("Running rendezvous channel test in multiple mode."); - rendezvous_test_multiple(send_count!(), receive_count!(), send_panic_point, receive_panic_point)?; - } + println!("Running rendezvous channel test in oneshot mode."); + for _i in 0 .. iterations!() { + rendezvous_test_oneshot()?; } + println!("Running rendezvous channel test in multiple mode."); + rendezvous_test_multiple(send_count!(), receive_count!(), send_panic_point, receive_panic_point)?; - if matches.opt_present("a") { - if matches.opt_present("o") { - did_something = true; - println!("Running asynchronous channel test in oneshot mode."); - for _i in 0 .. iterations!() { - asynchronous_test_oneshot()?; - } - } - if matches.opt_present("m") { - did_something = true; - println!("Running asynchronous channel test in multiple mode."); - asynchronous_test_multiple(send_count!(), receive_count!(), send_panic_point, receive_panic_point)?; - } + println!("Running asynchronous channel test in oneshot mode."); + for _i in 0 .. iterations!() { + asynchronous_test_oneshot()?; } - if did_something { - println!("Test complete."); - Ok(()) - } else { - Err("no action performed, please select a test") - } + println!("Running asynchronous channel test in multiple mode."); + asynchronous_test_multiple(send_count!(), receive_count!(), send_panic_point, receive_panic_point)?; + + Ok(()) } diff --git a/applications/test_filerw/src/lib.rs b/applications/test_filerw/src/lib.rs index 3321c87839..8e144ade4a 100644 --- a/applications/test_filerw/src/lib.rs +++ b/applications/test_filerw/src/lib.rs @@ -97,11 +97,11 @@ fn test_filerw() -> Result<(), &'static str> { } pub fn main(_args: Vec) -> isize { - match test_filerw() { - Ok(()) => { }, - Err(err) => println!("{}", err) + Ok(()) => 0, + Err(err) => { + println!("error {}", err); + -1 + } } - - 0 } diff --git a/applications/test_identity_mapping/src/lib.rs b/applications/test_identity_mapping/src/lib.rs index abb927b278..3e66243459 100644 --- a/applications/test_identity_mapping/src/lib.rs +++ b/applications/test_identity_mapping/src/lib.rs @@ -26,13 +26,9 @@ fn rmain() -> Result<(), &'static str> { let flags = memory::PteFlags::new().valid(true); for num_pages in TEST_SET.into_iter() { println!("Attempting to create identity mapping of {num_pages} pages..."); - match memory::create_identity_mapping(num_pages, flags) { - Ok(mp) => { - assert_eq!(mp.size_in_pages(), num_pages); - println!(" Success: {mp:?}"); - } - Err(e) => println!(" !! FAILURE: {e:?}"), - } + let mp = memory::create_identity_mapping(num_pages, flags)?; + assert_eq!(mp.size_in_pages(), num_pages); + println!(" Success: {mp:?}"); } Ok(()) diff --git a/applications/test_ixgbe/src/lib.rs b/applications/test_ixgbe/src/lib.rs index f46105ad42..898f2c52ad 100644 --- a/applications/test_ixgbe/src/lib.rs +++ b/applications/test_ixgbe/src/lib.rs @@ -73,7 +73,10 @@ fn rmain(matches: &Matches, _opts: &Options) -> Result<(), &'static str> { let (dev_id, mac_address) = { let ixgbe_devs = get_ixgbe_nics_list().ok_or("Ixgbe NICs list not initialized")?; - if ixgbe_devs.is_empty() { return Err("No ixgbe device available"); } + if ixgbe_devs.is_empty() { + println!("no IXGBE devices available"); + return Ok(()); + } let nic = ixgbe_devs[0].lock(); (nic.device_id(), nic.mac_address()) }; diff --git a/applications/test_mlx5/src/lib.rs b/applications/test_mlx5/src/lib.rs index 7bdd84c492..fee04caa91 100644 --- a/applications/test_mlx5/src/lib.rs +++ b/applications/test_mlx5/src/lib.rs @@ -31,8 +31,13 @@ pub fn main(_args: Vec) -> isize { } fn rmain() -> Result<(), &'static str> { - - let mut nic = mlx5::get_mlx5_nic().ok_or("mlx5 nic isn't initialized")?.lock(); + let mut nic = match mlx5::get_mlx5_nic() { + Some(nic) => nic.lock(), + None => { + println!("MLX5 NIC isn't initialized"); + return Ok(()); + } + }; let mac_address = nic.mac_address(); let num_packets = 8192; diff --git a/applications/test_task_cancel/src/lib.rs b/applications/test_task_cancel/src/lib.rs index 64b6fce4ce..57ba5a993a 100644 --- a/applications/test_task_cancel/src/lib.rs +++ b/applications/test_task_cancel/src/lib.rs @@ -9,6 +9,9 @@ use core::sync::atomic::{AtomicBool, Ordering::Relaxed}; use spin::Mutex; pub fn main(_: Vec) -> isize { + 0 + // FIXME + /* let lock = Arc::new(Mutex::new(())); let task = spawn::new_task_builder(guard_hog, lock.clone()) .spawn() @@ -26,8 +29,10 @@ pub fn main(_: Vec) -> isize { let _ = lock.lock(); 0 + */ } +#[allow(dead_code)] #[inline(never)] fn guard_hog(lock: Arc>) { let _guard = lock.lock(); @@ -40,6 +45,7 @@ fn guard_hog(lock: Arc>) { } } +#[allow(dead_code)] #[inline(never)] fn lsda_generator() { static FALSE: AtomicBool = AtomicBool::new(false); diff --git a/applications/tls_test/Cargo.toml b/applications/test_tls/Cargo.toml similarity index 94% rename from applications/tls_test/Cargo.toml rename to applications/test_tls/Cargo.toml index 3cf7491fb9..fa65ed7a30 100644 --- a/applications/tls_test/Cargo.toml +++ b/applications/test_tls/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "tls_test" +name = "test_tls" version = "0.1.0" authors = ["Kevin Boos "] diff --git a/applications/tls_test/src/lib.rs b/applications/test_tls/src/lib.rs similarity index 86% rename from applications/tls_test/src/lib.rs rename to applications/test_tls/src/lib.rs index fa8157be54..584e311124 100644 --- a/applications/tls_test/src/lib.rs +++ b/applications/test_tls/src/lib.rs @@ -14,15 +14,9 @@ use alloc::vec::Vec; use alloc::string::String; -pub fn main(args: Vec) -> isize { - - match args.first() { - Some(first) if first == "macro" => { - test_macro(); - return 0; - } - _ => { } - } +pub fn main(_: Vec) -> isize { + println!("Testing TLS macro"); + test_macro(); println!("Invoking test_thread_local::test_tls()..."); test_thread_local::test_tls(10); diff --git a/kernel/first_application/Cargo.toml b/kernel/first_application/Cargo.toml index 5824ca42a7..a8a98b7e69 100644 --- a/kernel/first_application/Cargo.toml +++ b/kernel/first_application/Cargo.toml @@ -25,6 +25,7 @@ spawn = { path = "../spawn" } ## Note: if another application crate is used, make sure to change ## both this dependency and the invocation string in `lib.rs`. [target.'cfg(target_arch = "x86_64")'.dependencies] +qemu_test = { path = "../../applications/qemu_test", optional = true } shell = { path = "../../applications/shell" } ## Note: aarch64 doesn't yet support the full graphical `shell` application, diff --git a/kernel/first_application/src/lib.rs b/kernel/first_application/src/lib.rs index ddf75d59ed..817e7f0352 100644 --- a/kernel/first_application/src/lib.rs +++ b/kernel/first_application/src/lib.rs @@ -28,7 +28,8 @@ use path::Path; /// See the crate-level docs and this crate's `Cargo.toml` for more. const FIRST_APPLICATION_CRATE_NAME: &str = { - #[cfg(target_arch = "x86_64")] { "shell-" } + #[cfg(all(target_arch = "x86_64", feature = "qemu_test"))] { "qemu_test-" } + #[cfg(all(target_arch = "x86_64", not(feature = "qemu_test")))] { "shell-" } #[cfg(target_arch = "aarch64")] { "hello-" } }; diff --git a/theseus_features/Cargo.toml b/theseus_features/Cargo.toml index b223c9fea8..5eb9e46b0f 100644 --- a/theseus_features/Cargo.toml +++ b/theseus_features/Cargo.toml @@ -10,6 +10,7 @@ edition = "2021" [dependencies] theseus_std = { path = "../ports/theseus_std", optional = true } +first_application = { path = "../kernel/first_application", optional = true } ## Regular applications. cat = { path = "../applications/cat", optional = true } @@ -29,6 +30,7 @@ ps = { path = "../applications/ps", optional = true } pwd = { path = "../applications/pwd", optional = true } rm = { path = "../applications/rm", optional = true } rq = { path = "../applications/rq", optional = true } +serial_echo = { path = "../applications/serial_echo", optional = true } shell = { path = "../applications/shell", optional = true } swap = { path = "../applications/swap", optional = true } upd = { path = "../applications/upd", optional = true } @@ -47,6 +49,7 @@ hello = { path = "../applications/hello", optional = true } raw_mode = { path = "../applications/raw_mode", optional = true } print_fault_log = { path = "../applications/print_fault_log", optional = true } seconds_counter = { path = "../applications/seconds_counter", optional = true } +qemu_test = { path = "../applications/qemu_test", optional = true } test_aligned_page_allocation = { path = "../applications/test_aligned_page_allocation", optional = true } test_async = { path = "../applications/test_async", optional = true } test_backtrace = { path = "../applications/test_backtrace", optional = true } @@ -61,13 +64,12 @@ test_panic = { path = "../applications/test_panic", optional = true } test_preemption_counter = { path = "../applications/test_preemption_counter", optional = true } test_restartable = { path = "../applications/test_restartable", optional = true } test_scheduler = { path = "../applications/test_scheduler", optional = true } -test_serial_echo = { path = "../applications/test_serial_echo", optional = true } test_std_fs = { path = "../applications/test_std_fs", optional = true } test_sync_block = { path = "../applications/test_sync_block", optional = true } test_task_cancel = { path = "../applications/test_task_cancel", optional = true } +test_tls = { path = "../applications/test_tls", optional = true } test_wait_queue = { path = "../applications/test_wait_queue", optional = true } test_wasmtime = { path = "../applications/test_wasmtime", optional = true } -tls_test = { path = "../applications/tls_test", optional = true } ## Benchmark crates. @@ -124,6 +126,7 @@ theseus_apps = [ "pwd", "rm", "rq", + "serial_echo", "shell", "swap", "upd", @@ -160,12 +163,11 @@ theseus_tests = [ "test_preemption_counter", "test_restartable", "test_scheduler", - "test_serial_echo", "test_std_fs", "test_sync_block", "test_task_cancel", + "test_tls", "test_wait_queue", "test_wasmtime", - "tls_test", "unwind_test", ] From cae8ca8f5cdd380973824ecfbd0b5ceb4d62b588 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 20 Sep 2023 05:17:57 +1000 Subject: [PATCH 02/25] Set `TaskInner.pinned_cpu` when spawning pinned tasks (#1044) * When spawning a pinned task, `spawn` didn't previously set `inner.pinned_cpu` for the newly-created `Task`. * This is not currently a problem because the scheduler doesn't perform task migration across CPUs, but when that gets enabled (in #1042), it would cause the pinning choice to be ignore by the scheduler. Signed-off-by: Klimenty Tsoutsman --- Cargo.lock | 1 + kernel/spawn/Cargo.toml | 1 + kernel/spawn/src/lib.rs | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 65cc0e49c7..3260daba2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3518,6 +3518,7 @@ dependencies = [ "spin 0.9.4", "stack", "task", + "task_struct", "thread_local_macro", ] diff --git a/kernel/spawn/Cargo.toml b/kernel/spawn/Cargo.toml index ff1cb93dd4..c3ce259cfa 100644 --- a/kernel/spawn/Cargo.toml +++ b/kernel/spawn/Cargo.toml @@ -18,6 +18,7 @@ stack = { path = "../stack" } cpu = { path = "../cpu" } preemption = { path = "../preemption" } task = { path = "../task" } +task_struct = { path = "../task_struct" } runqueue = { path = "../runqueue" } scheduler = { path = "../scheduler" } mod_mgmt = { path = "../mod_mgmt" } diff --git a/kernel/spawn/src/lib.rs b/kernel/spawn/src/lib.rs index dbfc138bc1..043e6855e9 100755 --- a/kernel/spawn/src/lib.rs +++ b/kernel/spawn/src/lib.rs @@ -31,6 +31,7 @@ use spin::Mutex; use memory::{get_kernel_mmi_ref, MmiRef}; use stack::Stack; use task::{Task, TaskRef, RestartInfo, RunState, JoinableTaskRef, ExitableTaskRef, FailureCleanupFunction}; +use task_struct::ExposedTask; use mod_mgmt::{CrateNamespace, SectionType, SECTION_HASH_DELIMITER}; use path::Path; use fs_node::FileOrDir; @@ -381,7 +382,11 @@ impl TaskBuilder )?; // If a Task name wasn't provided, then just use the function's name. new_task.name = self.name.unwrap_or_else(|| String::from(core::any::type_name::())); - + + let exposed = ExposedTask { task: new_task }; + exposed.inner().lock().pinned_cpu = self.pin_on_cpu; + let ExposedTask { task: mut new_task } = exposed; + #[cfg(simd_personality)] { new_task.simd = self.simd; } From 1d9120178250e0744500bdffca8102e037299271 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Wed, 20 Sep 2023 22:48:20 -0700 Subject: [PATCH 03/25] Add Discord invite badge to README (#1046) Meant to do this a while ago... --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c1a364ddea..d0cd763e73 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ [![Documentation](https://img.shields.io/badge/view-docs-blue)](https://theseus-os.github.io/Theseus/doc/___Theseus_Crates___/index.html) [![Book](https://img.shields.io/badge/view-book-blueviolet)](https://theseus-os.github.io/Theseus/book/index.html) [![Blog](https://img.shields.io/badge/view-blog-orange)](https://theseus-os.com) +[![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=flat&logo=discord&logoColor=white)](https://discord.gg/NuUnqeYT8R) Theseus is a new OS written from scratch in [Rust](https://www.rust-lang.org/) to experiment with novel OS structure, better state management, and how to leverage **intralingual design** principles to shift OS responsibilities like resource management into the compiler. From ac51fc175126cab7d4fd2723a8fc3bb222f546b0 Mon Sep 17 00:00:00 2001 From: Nathan Royer <61582713+NathanRoyer@users.noreply.github.com> Date: Sun, 24 Sep 2023 09:39:54 +0200 Subject: [PATCH 04/25] aarch64: support FIQs and use them for TLB shootdown IPIs (#1039) * Add support for fast interrupts on aarch64, aka FIQs. FIQs are designed to be fast and can thus interrupt regular interrupts (IRQs) that are in the process of being handled. They are similar to NMIs on x86_64 in this regard, but can also be explicitly enabled/disabled. * Updated the GIC driver to support both Group 0 (FIQs) and Group 1 (IRQs). * `nano_core`, `captain`, and `ap_start` now enable/disable FIQs. * Broadcasting TLB shootdown IPIs now uses FIQs to ensure that TLB shootdowns occur instantly even if regular interrupts are disabled on one or more other CPUs. * Add a separate trait `Aarch64LocalInterruptController` for arch-specific features, which keeps the `LocalInterruptController` arch-agnostic. * This trait is primarily for configuring/handling fast interrupts (FIQs), but also for acknowledging interrupts, which x86_64 does not require. * The interrupt controller now allows enabling/disabling SPIs too. --------- Co-authored-by: Kevin Boos --- Cargo.lock | 2 +- kernel/ap_start/src/lib.rs | 3 + kernel/captain/src/lib.rs | 1 + kernel/context_switch_regular/src/aarch64.rs | 32 ++--- kernel/gic/src/gic/cpu_interface_gicv2.rs | 13 +- kernel/gic/src/gic/cpu_interface_gicv3.rs | 34 +++-- kernel/gic/src/gic/dist_interface.rs | 43 +++--- kernel/gic/src/gic/mod.rs | 56 +++++--- kernel/gic/src/gic/redist_interface.rs | 36 +++-- kernel/interrupt_controller/src/aarch64.rs | 143 ++++++++++++++----- kernel/interrupt_controller/src/lib.rs | 49 ++++--- kernel/interrupt_controller/src/x86_64.rs | 26 +--- kernel/interrupts/src/aarch64/mod.rs | 135 ++++++++++++----- kernel/interrupts/src/aarch64/table.s | 4 +- kernel/memory_aarch64/src/lib.rs | 45 ++++-- kernel/nano_core/src/lib.rs | 5 +- kernel/tlb_shootdown/src/lib.rs | 10 +- 17 files changed, 435 insertions(+), 202 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3260daba2a..37c05c2764 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1661,7 +1661,7 @@ dependencies = [ [[package]] name = "irq_safety" version = "0.1.1" -source = "git+https://github.com/theseus-os/irq_safety#0e32fe775fdb93be1fc6f85b306d109aea5d920e" +source = "git+https://github.com/theseus-os/irq_safety#11bfab9f410a898df1e42ad6213488612e20c926" dependencies = [ "spin 0.9.4", ] diff --git a/kernel/ap_start/src/lib.rs b/kernel/ap_start/src/lib.rs index 6dadd5e0bd..08cb049fa4 100644 --- a/kernel/ap_start/src/lib.rs +++ b/kernel/ap_start/src/lib.rs @@ -52,6 +52,8 @@ pub fn kstart_ap( nmi_flags: u16, ) -> ! { irq_safety::disable_interrupts(); + #[cfg(target_arch = "aarch64")] + irq_safety::disable_fast_interrupts(); info!("Booted CPU {}, proc: {}, stack: {:#X} to {:#X}, nmi_lint: {}, nmi_flags: {:#X}", cpu_id, processor_id, _stack_start, _stack_end, nmi_lint, nmi_flags @@ -100,6 +102,7 @@ pub fn kstart_ap( #[cfg(target_arch = "aarch64")] { interrupts::init_ap(); + irq_safety::enable_fast_interrupts(); // Register this CPU as online in the system // This is the equivalent of `LocalApic::init` on aarch64 diff --git a/kernel/captain/src/lib.rs b/kernel/captain/src/lib.rs index 05a73df2e3..ced3b62b5b 100644 --- a/kernel/captain/src/lib.rs +++ b/kernel/captain/src/lib.rs @@ -123,6 +123,7 @@ pub fn init( interrupt_controller::init()?; interrupts::init()?; + irq_safety::enable_fast_interrupts(); // register BSP CpuId cpu::register_cpu(true)?; diff --git a/kernel/context_switch_regular/src/aarch64.rs b/kernel/context_switch_regular/src/aarch64.rs index 8e8bb1b6a3..71653e80dc 100644 --- a/kernel/context_switch_regular/src/aarch64.rs +++ b/kernel/context_switch_regular/src/aarch64.rs @@ -85,21 +85,21 @@ macro_rules! save_registers_regular { // Save all general purpose registers into the previous task. r#" // Make room on the stack for the registers. - sub sp, sp, #8 * 2 * 6 + sub sp, sp, #8 * 13 // Push registers on the stack, two at a time. - stp x19, x20, [sp, #8 * 2 * 0] - stp x21, x22, [sp, #8 * 2 * 1] - stp x23, x24, [sp, #8 * 2 * 2] - stp x25, x26, [sp, #8 * 2 * 3] - stp x27, x28, [sp, #8 * 2 * 4] - stp x29, x30, [sp, #8 * 2 * 5] + stp x19, x20, [sp, #8 * 0] + stp x21, x22, [sp, #8 * 2] + stp x23, x24, [sp, #8 * 4] + stp x25, x26, [sp, #8 * 6] + stp x27, x28, [sp, #8 * 8] + stp x29, x30, [sp, #8 * 10] // Push an OR of DAIF and NZCV flags of PSTATE mrs x29, DAIF mrs x30, NZCV orr x29, x29, x30 - str x29, [sp, #8 * 2 * 6] + str x29, [sp, #8 * 12] "# ); } @@ -133,20 +133,20 @@ macro_rules! restore_registers_regular { r#" // Pop DAIF and NZCV flags of PSTATE // These MSRs discard irrelevant bits; no AND is required. - ldr x29, [sp, #8 * 2 * 6] + ldr x29, [sp, #8 * 12] msr DAIF, x29 msr NZCV, x29 // Pop registers from the stack, two at a time. - ldp x29, x30, [sp, #8 * 2 * 5] - ldp x27, x28, [sp, #8 * 2 * 4] - ldp x25, x26, [sp, #8 * 2 * 3] - ldp x23, x24, [sp, #8 * 2 * 2] - ldp x21, x22, [sp, #8 * 2 * 1] - ldp x19, x20, [sp, #8 * 2 * 0] + ldp x29, x30, [sp, #8 * 10] + ldp x27, x28, [sp, #8 * 8] + ldp x25, x26, [sp, #8 * 6] + ldp x23, x24, [sp, #8 * 4] + ldp x21, x22, [sp, #8 * 2] + ldp x19, x20, [sp, #8 * 0] // Move the stack pointer back up. - add sp, sp, #8 * 2 * 6 + add sp, sp, #8 * 13 "# ); } diff --git a/kernel/gic/src/gic/cpu_interface_gicv2.rs b/kernel/gic/src/gic/cpu_interface_gicv2.rs index 58d8c81bca..698cbbc0fa 100644 --- a/kernel/gic/src/gic/cpu_interface_gicv2.rs +++ b/kernel/gic/src/gic/cpu_interface_gicv2.rs @@ -8,6 +8,7 @@ use super::Priority; use super::InterruptNumber; +use super::SPURIOUS_INTERRUPT_NUM; use volatile::{Volatile, ReadOnly, WriteOnly}; use zerocopy::FromBytes; @@ -42,7 +43,7 @@ pub struct CpuRegsP1 { // base offset } // enable group 0 -// const CTLR_ENGRP0: u32 = 0b01; +const CTLR_ENGRP0: u32 = 0b01; // enable group 1 const CTLR_ENGRP1: u32 = 0b10; @@ -51,6 +52,7 @@ impl CpuRegsP1 { /// Enables routing of group 1 interrupts for the current CPU. pub fn init(&mut self) { let mut reg = self.ctlr.read(); + reg |= CTLR_ENGRP0; reg |= CTLR_ENGRP1; self.ctlr.write(reg); } @@ -87,12 +89,17 @@ impl CpuRegsP1 { /// /// This tells the GIC that the requested interrupt is being /// handled by this CPU. - pub fn acknowledge_interrupt(&mut self) -> (InterruptNumber, Priority) { + /// + /// Returns None if a spurious interrupt is detected. + pub fn acknowledge_interrupt(&mut self) -> Option<(InterruptNumber, Priority)> { // Reading the interrupt number has the side effect // of acknowledging the interrupt. let int_num = self.acknowledge.read() as InterruptNumber; let priority = self.running_prio.read() as u8; - (int_num, priority) + match int_num { + SPURIOUS_INTERRUPT_NUM => None, + _ => Some((int_num, priority)) + } } } diff --git a/kernel/gic/src/gic/cpu_interface_gicv3.rs b/kernel/gic/src/gic/cpu_interface_gicv3.rs index 63072864b2..4a68d3db77 100644 --- a/kernel/gic/src/gic/cpu_interface_gicv3.rs +++ b/kernel/gic/src/gic/cpu_interface_gicv3.rs @@ -11,6 +11,8 @@ use core::arch::asm; use super::IpiTargetCpu; use super::Priority; use super::InterruptNumber; +use super::InterruptGroup; +use super::SPURIOUS_INTERRUPT_NUM; const SGIR_TARGET_ALL_OTHER_PE: u64 = 1 << 40; const IGRPEN_ENABLED: u64 = 1; @@ -28,7 +30,7 @@ pub fn init() { // Enable Group 0 // bit 0 = group 0 enable - // unsafe { asm!("msr ICC_IGRPEN0_EL1, {}", in(reg) IGRPEN_ENABLED) }; + unsafe { asm!("msr ICC_IGRPEN0_EL1, {}", in(reg) IGRPEN_ENABLED) }; // Enable Groupe 1 (non-secure) // bit 0 = group 1 (non-secure) enable @@ -61,9 +63,13 @@ pub fn set_minimum_priority(priority: Priority) { /// the current CPU. /// /// This implies that the CPU is ready to process interrupts again. -pub fn end_of_interrupt(int: InterruptNumber) { +pub fn end_of_interrupt(int: InterruptNumber, group: InterruptGroup) { let reg_value = int as u64; - unsafe { asm!("msr ICC_EOIR1_EL1, {}", in(reg) reg_value) }; + + match group { + InterruptGroup::Group0 => unsafe { asm!("msr ICC_EOIR0_EL1, {}", in(reg) reg_value) }, + InterruptGroup::Group1 => unsafe { asm!("msr ICC_EOIR1_EL1, {}", in(reg) reg_value) }, + } } /// Acknowledge the currently serviced interrupt and fetches its @@ -71,24 +77,33 @@ pub fn end_of_interrupt(int: InterruptNumber) { /// /// This tells the GIC that the requested interrupt is being /// handled by this CPU. -pub fn acknowledge_interrupt() -> (InterruptNumber, Priority) { +/// +/// Returns None if a spurious interrupt is detected. +pub fn acknowledge_interrupt(group: InterruptGroup) -> Option<(InterruptNumber, Priority)> { let int_num: u64; let priority: u64; // Reading the interrupt number has the side effect // of acknowledging the interrupt. + match group { + InterruptGroup::Group0 => unsafe { asm!("mrs {}, ICC_IAR0_EL1", out(reg) int_num) }, + InterruptGroup::Group1 => unsafe { asm!("mrs {}, ICC_IAR1_EL1", out(reg) int_num) }, + } + unsafe { - asm!("mrs {}, ICC_IAR1_EL1", out(reg) int_num); asm!("mrs {}, ICC_RPR_EL1", out(reg) priority); } let int_num = int_num & 0xffffff; let priority = priority & 0xff; - (int_num as InterruptNumber, priority as u8) + match int_num as InterruptNumber { + SPURIOUS_INTERRUPT_NUM => None, + n => Some((n, priority as u8)), + } } /// Generates an interrupt in CPU interfaces of the system -pub fn send_ipi(int_num: InterruptNumber, target: IpiTargetCpu) { +pub fn send_ipi(int_num: InterruptNumber, target: IpiTargetCpu, group: InterruptGroup) { let mut value = match target { IpiTargetCpu::Specific(cpu) => { let mpidr: cpu::MpidrValue = cpu.into(); @@ -118,5 +133,8 @@ pub fn send_ipi(int_num: InterruptNumber, target: IpiTargetCpu) { }; value |= (int_num as u64) << 24; - unsafe { asm!("msr ICC_SGI1R_EL1, {}", in(reg) value) }; + match group { + InterruptGroup::Group0 => unsafe { asm!("msr ICC_SGI0R_EL1, {}", in(reg) value) }, + InterruptGroup::Group1 => unsafe { asm!("msr ICC_SGI1R_EL1, {}", in(reg) value) }, + } } \ No newline at end of file diff --git a/kernel/gic/src/gic/dist_interface.rs b/kernel/gic/src/gic/dist_interface.rs index baa7fb7488..d271edd8fd 100644 --- a/kernel/gic/src/gic/dist_interface.rs +++ b/kernel/gic/src/gic/dist_interface.rs @@ -16,6 +16,7 @@ use super::IpiTargetCpu; use super::SpiDestination; use super::InterruptNumber; +use super::InterruptGroup; use super::Enabled; use super::Priority; use super::TargetList; @@ -75,7 +76,7 @@ pub struct DistRegsP6 { // base offset } // enable group 0 -// const CTLR_ENGRP0: u32 = 0b01; +const CTLR_ENGRP0: u32 = 0b01; // enable group 1 const CTLR_ENGRP1: u32 = 0b10; @@ -96,9 +97,6 @@ const SGIR_TARGET_ALL_OTHER_PE: u32 = 1 << 24; // 0 = route to specific PE const P6IROUTER_ANY_AVAILABLE_PE: u64 = 1 << 31; -// const GROUP_0: u32 = 0; -const GROUP_1: u32 = 1; - // bit 15: which interrupt group to target const SGIR_NSATT_GRP1: u32 = 1 << 15; @@ -112,6 +110,7 @@ impl DistRegsP1 { /// states. pub fn init(&mut self) -> Enabled { let mut reg = self.ctlr.read(); + reg |= CTLR_ENGRP0; reg |= CTLR_ENGRP1; reg |= CTLR_E1NWF; self.ctlr.write(reg); @@ -123,26 +122,26 @@ impl DistRegsP1 { } /// Returns whether the given SPI (shared peripheral interrupt) will be - /// forwarded by the distributor - pub fn is_spi_enabled(&self, int: InterruptNumber) -> Enabled { - // enabled? - read_array_volatile::<32>(&self.set_enable, int) > 0 - && - // part of group 1? - read_array_volatile::<32>(&self.group, int) == GROUP_1 + /// forwarded by the distributor. + pub fn get_spi_state(&self, int: InterruptNumber) -> Option { + if read_array_volatile::<32>(&self.set_enable, int) == 1 { + match read_array_volatile::<32>(&self.group, int) { + 0 => return Some(InterruptGroup::Group0), + 1 => return Some(InterruptGroup::Group1), + _ => { } + } + } + None } - /// Enables or disables the forwarding of a particular SPI (shared peripheral interrupt) - pub fn enable_spi(&mut self, int: InterruptNumber, enabled: Enabled) { - let reg_base = match enabled { - true => &mut self.set_enable, - false => &mut self.clear_enable, - }; - write_array_volatile::<32>(reg_base, int, 1); - - // whether we're enabling or disabling, - // set as part of group 1 - write_array_volatile::<32>(&mut self.group, int, GROUP_1); + /// Enables or disables the forwarding of a particular SPI (shared peripheral interrupt). + pub fn set_spi_state(&mut self, int: InterruptNumber, state: Option) { + if let Some(group) = state { + write_array_volatile::<32>(&mut self.group, int, group as u32); + write_array_volatile::<32>(&mut self.set_enable, int, 1); + } else { + write_array_volatile::<32>(&mut self.clear_enable, int, 1); + } } /// Returns the priority of an SPI. diff --git a/kernel/gic/src/gic/mod.rs b/kernel/gic/src/gic/mod.rs index 66f8247ac9..be09740ba2 100644 --- a/kernel/gic/src/gic/mod.rs +++ b/kernel/gic/src/gic/mod.rs @@ -19,6 +19,15 @@ use dist_interface::{DistRegsP1, DistRegsP6}; use cpu_interface_gicv2::CpuRegsP1; use redist_interface::{RedistRegsP1, RedistRegsSgiPpi}; +/// Whether an interrupt is put in Group 0 (FIQs) or Group 1 (IRQs) +#[repr(u32)] +pub enum InterruptGroup { + /// Group 0 is used for FIQs (fast interrupts) + Group0 = 0, + /// Group 1 is used for IRQs (regular interrupts) + Group1 = 1, +} + /// Boolean pub type Enabled = bool; @@ -122,6 +131,7 @@ impl SpiDestination { } const U32BITS: usize = u32::BITS as usize; +const SPURIOUS_INTERRUPT_NUM: InterruptNumber = 1023; // Reads one item of an array spanning across // multiple u32s. @@ -240,19 +250,29 @@ impl ArmGicDistributor { /// Returns whether the given interrupt is forwarded by the distributor. /// + /// # Return + /// * `None` if the interrupt is disabled. + /// * `Some(`[`InterruptGroup::Group0`]`)` if the interrupt is enabled and configured as a Group 0 interrupt. + /// * `Some(`[`InterruptGroup::Group1`]`)` if the interrupt is enabled and configured as a Group 1 interrupt. + /// /// Panics if `int` is not in the SPI range (>= 32). - pub fn get_spi_state(&self, int: InterruptNumber) -> Enabled { + pub fn get_spi_state(&self, int: InterruptNumber) -> Option { assert!(int >= 32, "get_spi_state: `int` must be >= 32"); - self.distributor().is_spi_enabled(int) + self.distributor().get_spi_state(int) } /// Enables or disables the forwarding of the given interrupt /// by the distributor. /// + /// # Arguments + /// * `int`: the interrupt number whose state we are modifying. + /// * `state`: whether the interrupt will be disabled (`None`), + /// enabled as `Group0` (regular IRQs), or enabled as `Group1` (fast interrupts, FIQs). + /// /// Panics if `int` is not in the SPI range (>= 32). - pub fn set_spi_state(&mut self, int: InterruptNumber, enabled: Enabled) { + pub fn set_spi_state(&mut self, int: InterruptNumber, state: Option) { assert!(int >= 32, "set_spi_state: `int` must be >= 32"); - self.distributor_mut().enable_spi(int, enabled) + self.distributor_mut().set_spi_state(int, state) } /// Returns the priority of the given interrupt. @@ -372,11 +392,11 @@ impl ArmGicCpuComponents { /// /// Panics if `int` is greater than or equal to 16; /// on aarch64, IPIs much be sent to an interrupt number less than 16. - pub fn send_ipi(&mut self, int: InterruptNumber, target: IpiTargetCpu) { + pub fn send_ipi(&mut self, int: InterruptNumber, target: IpiTargetCpu, group: InterruptGroup) { assert!(int < 16, "IPIs must have a number below 16 on ARMv8"); if let Self::V3 { .. } = self { - cpu_interface_gicv3::send_ipi(int, target) + cpu_interface_gicv3::send_ipi(int, target, group) } else { // we don't have access to the distributor... code would be: // dist_interface::send_ipi_gicv2(&mut dist_regs, int, target) @@ -394,10 +414,12 @@ impl ArmGicCpuComponents { /// being handled by this CPU. /// /// Returns a tuple of the interrupt's number and priority. - pub fn acknowledge_interrupt(&mut self) -> (InterruptNumber, Priority) { + /// + /// Returns None if a spurious interrupt is detected. + pub fn acknowledge_interrupt(&mut self, group: InterruptGroup) -> Option<(InterruptNumber, Priority)> { match self { - Self::V2 { registers, .. } => registers.acknowledge_interrupt(), - Self::V3 { .. } => cpu_interface_gicv3::acknowledge_interrupt(), + Self::V2 { registers, .. } => registers.acknowledge_interrupt(/* no way to specify a group in GICv2 */), + Self::V3 { .. } => cpu_interface_gicv3::acknowledge_interrupt(group), } } @@ -406,10 +428,10 @@ impl ArmGicCpuComponents { /// the current CPU. /// /// This implies that the CPU is ready to process interrupts again. - pub fn end_of_interrupt(&mut self, int: InterruptNumber) { + pub fn end_of_interrupt(&mut self, int: InterruptNumber, group: InterruptGroup) { match self { - Self::V2 { registers, .. } => registers.end_of_interrupt(int), - Self::V3 { .. } => cpu_interface_gicv3::end_of_interrupt(int), + Self::V2 { registers, .. } => registers.end_of_interrupt(int, /* no way to specify a group in GICv2 */), + Self::V3 { .. } => cpu_interface_gicv3::end_of_interrupt(int, group), } } @@ -417,17 +439,17 @@ impl ArmGicCpuComponents { /// /// Panics if `int` is greater than or equal to 32, which is beyond the range /// of local interrupt numbers. - pub fn get_interrupt_state(&self, int: InterruptNumber) -> Enabled { + pub fn get_interrupt_state(&self, int: InterruptNumber) -> Option { assert!(int < 32, "get_interrupt_state: `int` doesn't lie in the SGI/PPI (local interrupt) range"); if let Self::V3 { redist_regs } = self { - redist_regs.redist_sgippi.is_sgippi_enabled(int) + redist_regs.redist_sgippi.get_sgippi_state(int) } else { // there is no redistributor and we don't have access to the distributor log::error!("GICv2 doesn't support enabling/disabling local interrupt"); // should we panic? - true + Some(InterruptGroup::Group1) } } @@ -435,11 +457,11 @@ impl ArmGicCpuComponents { /// /// Panics if `int` is greater than or equal to 32, which is beyond the range /// of local interrupt numbers. - pub fn set_interrupt_state(&mut self, int: InterruptNumber, enabled: Enabled) { + pub fn set_interrupt_state(&mut self, int: InterruptNumber, state: Option) { assert!(int < 32, "set_interrupt_state: `int` doesn't lie in the SGI/PPI (local interrupt) range"); if let Self::V3 { redist_regs } = self { - redist_regs.redist_sgippi.enable_sgippi(int, enabled); + redist_regs.redist_sgippi.set_sgippi_state(int, state); } else { // there is no redistributor and we don't have access to the distributor log::error!("GICv2 doesn't support enabling/disabling local interrupt"); diff --git a/kernel/gic/src/gic/redist_interface.rs b/kernel/gic/src/gic/redist_interface.rs index 02461b343a..fb30b3f9f2 100644 --- a/kernel/gic/src/gic/redist_interface.rs +++ b/kernel/gic/src/gic/redist_interface.rs @@ -10,7 +10,7 @@ //! - Getting or setting the priority of PPIs & SGIs based on their numbers use super::InterruptNumber; -use super::Enabled; +use super::InterruptGroup; use super::Priority; use super::read_array_volatile; use super::write_array_volatile; @@ -78,9 +78,6 @@ const CTLR_DPG1NS: u32 = 1 << 25; /// If bit is set, the PE cannot be selected for group 0 "1 of N" interrupts. const CTLR_DPG0: u32 = 1 << 24; -/// const GROUP_0: u32 = 0; -const GROUP_1: u32 = 1; - /// This timeout value works on some ARM SoCs: /// - qemu's virt virtual machine /// @@ -150,26 +147,27 @@ impl RedistRegsP1 { impl RedistRegsSgiPpi { /// Returns whether the given SGI (software generated interrupts) or /// PPI (private peripheral interrupts) will be forwarded by the redistributor - pub fn is_sgippi_enabled(&self, int: InterruptNumber) -> Enabled { - read_array_volatile::<32>(&self.set_enable, int) > 0 - && - // part of group 1? - read_array_volatile::<32>(&self.group, int) == GROUP_1 + pub fn get_sgippi_state(&self, int: InterruptNumber) -> Option { + if read_array_volatile::<32>(&self.set_enable, int) == 1 { + match read_array_volatile::<32>(&self.group, int) { + 0 => return Some(InterruptGroup::Group0), + 1 => return Some(InterruptGroup::Group1), + _ => { } + } + } + None } /// Enables or disables the forwarding of a particular /// SGI (software generated interrupts) or PPI (private /// peripheral interrupts) - pub fn enable_sgippi(&mut self, int: InterruptNumber, enabled: Enabled) { - let reg = match enabled { - true => &mut self.set_enable, - false => &mut self.clear_enable, - }; - write_array_volatile::<32>(reg, int, 1); - - // whether we're enabling or disabling, - // set as part of group 1 - write_array_volatile::<32>(&mut self.group, int, GROUP_1); + pub fn set_sgippi_state(&mut self, int: InterruptNumber, state: Option) { + if let Some(group) = state { + write_array_volatile::<32>(&mut self.group, int, group as u32); + write_array_volatile::<32>(&mut self.set_enable, int, 1); + } else { + write_array_volatile::<32>(&mut self.clear_enable, int, 1); + } } /// Returns the priority of an SGI/PPI. diff --git a/kernel/interrupt_controller/src/aarch64.rs b/kernel/interrupt_controller/src/aarch64.rs index 6da4f16714..f2b3d7f63a 100644 --- a/kernel/interrupt_controller/src/aarch64.rs +++ b/kernel/interrupt_controller/src/aarch64.rs @@ -1,7 +1,10 @@ use { - gic::{ArmGicDistributor, ArmGicCpuComponents, SpiDestination, IpiTargetCpu, Version as GicVersion}, + gic::{ + ArmGicDistributor, ArmGicCpuComponents, SpiDestination, IpiTargetCpu, + Version as GicVersion, InterruptGroup, + }, arm_boards::{NUM_CPUS, BOARD_CONFIG, InterruptControllerConfig}, - core::array::try_from_fn, + core::{array::try_from_fn, cell::UnsafeCell}, sync_irq::IrqSafeMutex, cpu::current_cpu, spin::Once, @@ -49,9 +52,9 @@ pub fn init() -> Result<(), &'static str> { ArmGicCpuComponents::init(cpu_id, &version) })?; - Ok(cpu_ctlrs.map(|ctlr| { + Ok(cpu_ctlrs.map(|mut ctlr| { let mutex = IrqSafeMutex::new(ctlr); - LocalInterruptController(mutex) + LocalInterruptController(UnsafeCell::new(mutex)) })) })?; }, @@ -69,7 +72,10 @@ pub struct SystemInterruptController(IrqSafeMutex); /// Struct representing per-cpu-core interrupt controller chips. /// /// On aarch64 w/ GIC, this corresponds to a Redistributor & CPU interface. -pub struct LocalInterruptController(IrqSafeMutex); +pub struct LocalInterruptController(UnsafeCell>); + +unsafe impl Send for LocalInterruptController {} +unsafe impl Sync for LocalInterruptController {} impl SystemInterruptControllerApi for SystemInterruptController { fn get() -> &'static Self { @@ -112,19 +118,33 @@ impl SystemInterruptControllerApi for SystemInterruptController { fn set_destination( &self, sys_int_num: InterruptNumber, - destination: CpuId, + destination: Option, priority: Priority, ) -> Result<(), &'static str> { assert!(sys_int_num >= 32, "shared peripheral interrupts have a number >= 32"); + + let state = match destination.is_some() { + true => Some(InterruptGroup::Group1), + false => None, + }; + let mut dist = self.0.lock(); - dist.set_spi_target(sys_int_num as _, SpiDestination::Specific(destination)); - dist.set_spi_priority(sys_int_num as _, priority); + if let Some(destination) = destination { + dist.set_spi_target(sys_int_num as _, SpiDestination::Specific(destination)); + dist.set_spi_priority(sys_int_num as _, priority); + } + + dist.set_spi_state(sys_int_num as _, state); Ok(()) } } +macro_rules! lock { + ($this:ident) => (unsafe { $this.0.get().as_ref().unwrap().lock() }) +} + impl LocalInterruptControllerApi for LocalInterruptController { fn get() -> &'static Self { // how this function works: @@ -145,69 +165,128 @@ impl LocalInterruptControllerApi for LocalInterruptController { &ctrls[index] } - fn init_secondary_cpu_interface(&self) { - let mut cpu_ctrl = self.0.lock(); - cpu_ctrl.init_secondary_cpu_interface(); - } - fn id(&self) -> LocalInterruptControllerId { - let cpu_ctrl = self.0.lock(); + let cpu_ctrl = lock!(self); LocalInterruptControllerId(cpu_ctrl.get_cpu_interface_id()) } fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority { assert!(num < 32, "local interrupts have a number < 32"); - let cpu_ctrl = self.0.lock(); + let cpu_ctrl = lock!(self); cpu_ctrl.get_interrupt_priority(num as _) } fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority) { assert!(num < 32, "local interrupts have a number < 32"); - let mut cpu_ctrl = self.0.lock(); + let mut cpu_ctrl = lock!(self); cpu_ctrl.set_interrupt_priority(num as _, priority); } fn is_local_interrupt_enabled(&self, num: InterruptNumber) -> bool { assert!(num < 32, "local interrupts have a number < 32"); - let cpu_ctrl = self.0.lock(); - cpu_ctrl.get_interrupt_state(num as _) + let cpu_ctrl = lock!(self); + match cpu_ctrl.get_interrupt_state(num as _) { + None => false, + Some(InterruptGroup::Group1) => true, + Some(InterruptGroup::Group0) => { + log::error!("Warning: found misconfigured local interrupt ({})", num); + true + }, + } } fn enable_local_interrupt(&self, num: InterruptNumber, enabled: bool) { assert!(num < 32, "local interrupts have a number < 32"); - let mut cpu_ctrl = self.0.lock(); - cpu_ctrl.set_interrupt_state(num as _, enabled); + let state = match enabled { + true => Some(InterruptGroup::Group1), + false => None, + }; + let mut cpu_ctrl = lock!(self); + cpu_ctrl.set_interrupt_state(num as _, state); } fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination) { use InterruptDestination::*; assert!(num < 16, "IPIs have a number < 16"); - let mut cpu_ctrl = self.0.lock(); - cpu_ctrl.send_ipi(num as _, match dest { + let dest = match dest { + SpecificCpu(cpu) => IpiTargetCpu::Specific(cpu), + AllOtherCpus => IpiTargetCpu::AllOtherCpus, + }; + + let mut cpu_ctrl = lock!(self); + + cpu_ctrl.send_ipi(num as _, dest, InterruptGroup::Group1); + } + + fn end_of_interrupt(&self, number: InterruptNumber) { + let mut cpu_ctrl = lock!(self); + cpu_ctrl.end_of_interrupt(number as _, InterruptGroup::Group1) + } +} + +impl AArch64LocalInterruptControllerApi for LocalInterruptController { + fn enable_fast_local_interrupt(&self, num: InterruptNumber, enabled: bool) { + assert!(num < 32, "local interrupts have a number < 32"); + let state = match enabled { + true => Some(InterruptGroup::Group0), + false => None, + }; + let mut cpu_ctrl = lock!(self); + cpu_ctrl.set_interrupt_state(num as _, state); + } + + fn send_fast_ipi(&self, num: InterruptNumber, dest: InterruptDestination) { + use InterruptDestination::*; + assert!(num < 16, "IPIs have a number < 16"); + + let dest = match dest { SpecificCpu(cpu) => IpiTargetCpu::Specific(cpu), AllOtherCpus => IpiTargetCpu::AllOtherCpus, - }); + }; + + let mut cpu_ctrl = lock!(self); + + cpu_ctrl.send_ipi(num as _, dest, InterruptGroup::Group0); } fn get_minimum_priority(&self) -> Priority { - let cpu_ctrl = self.0.lock(); + let cpu_ctrl = lock!(self); cpu_ctrl.get_minimum_priority() } fn set_minimum_priority(&self, priority: Priority) { - let mut cpu_ctrl = self.0.lock(); + let mut cpu_ctrl = lock!(self); cpu_ctrl.set_minimum_priority(priority) } - fn acknowledge_interrupt(&self) -> (InterruptNumber, Priority) { - let mut cpu_ctrl = self.0.lock(); - let (num, prio) = cpu_ctrl.acknowledge_interrupt(); - (num as _, prio) + fn acknowledge_interrupt(&self) -> Option<(InterruptNumber, Priority)> { + let mut cpu_ctrl = lock!(self); + let opt = cpu_ctrl.acknowledge_interrupt(InterruptGroup::Group1); + opt.map(|(num, prio)| (num as _, prio)) } - fn end_of_interrupt(&self, number: InterruptNumber) { - let mut cpu_ctrl = self.0.lock(); - cpu_ctrl.end_of_interrupt(number as _) + fn init_secondary_cpu_interface(&self) { + let mut cpu_ctrl = lock!(self); + cpu_ctrl.init_secondary_cpu_interface(); + } + + unsafe fn acknowledge_fast_interrupt(&self) -> Option<(InterruptNumber, Priority)> { + // we cannot lock here + // this has to be unsafe + let mut_mutex = self.0.get().as_mut().unwrap(); + let mut cpu_ctrl = mut_mutex.get_mut(); + + let opt = cpu_ctrl.acknowledge_interrupt(InterruptGroup::Group0); + opt.map(|(num, prio)| (num as _, prio)) + } + + unsafe fn end_of_fast_interrupt(&self, number: InterruptNumber) { + // we cannot lock here + // this has to be unsafe + let mut_mutex = self.0.get().as_mut().unwrap(); + let mut cpu_ctrl = mut_mutex.get_mut(); + + cpu_ctrl.end_of_interrupt(number as _, InterruptGroup::Group0) } } diff --git a/kernel/interrupt_controller/src/lib.rs b/kernel/interrupt_controller/src/lib.rs index 55c08fe697..92d7f10e24 100644 --- a/kernel/interrupt_controller/src/lib.rs +++ b/kernel/interrupt_controller/src/lib.rs @@ -52,7 +52,7 @@ pub trait SystemInterruptControllerApi { fn set_destination( &self, sys_int_num: InterruptNumber, - destination: CpuId, + destination: Option, priority: Priority, ) -> Result<(), &'static str>; } @@ -60,13 +60,6 @@ pub trait SystemInterruptControllerApi { pub trait LocalInterruptControllerApi { fn get() -> &'static Self; - /// Aarch64-specific way to initialize the secondary CPU interfaces. - /// - /// Must be called once from every secondary CPU. - /// - /// Always panics on x86_64. - fn init_secondary_cpu_interface(&self); - fn id(&self) -> LocalInterruptControllerId; fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority; fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority); @@ -79,21 +72,45 @@ pub trait LocalInterruptControllerApi { /// If it's None, all CPUs except the sender receive the interrupt. fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination); + /// Tell the interrupt controller that the current interrupt has been handled. + fn end_of_interrupt(&self, number: InterruptNumber); +} + +/// AArch64-specific methods of a local interrupt controller +pub trait AArch64LocalInterruptControllerApi { + /// Same as [`LocalInterruptControllerApi::enable_local_interrupt`] but for fast interrupts (FIQs). + fn enable_fast_local_interrupt(&self, num: InterruptNumber, enabled: bool); + + /// Same as [`LocalInterruptControllerApi::send_ipi`] but for fast interrupts (FIQs). + fn send_fast_ipi(&self, num: InterruptNumber, dest: InterruptDestination); + /// Reads the minimum priority for an interrupt to reach this CPU. - /// - /// Note: aarch64-only, at the moment. fn get_minimum_priority(&self) -> Priority; /// Changes the minimum priority for an interrupt to reach this CPU. - /// - /// Note: aarch64-only, at the moment. fn set_minimum_priority(&self, priority: Priority); /// Aarch64-specific way to read the current pending interrupt number & priority. + fn acknowledge_interrupt(&self) -> Option<(InterruptNumber, Priority)>; + + /// Aarch64-specific way to initialize the secondary CPU interfaces. /// - /// Always panics on x86_64. - fn acknowledge_interrupt(&self) -> (InterruptNumber, Priority); + /// Must be called once from every secondary CPU. + fn init_secondary_cpu_interface(&self); - /// Tell the interrupt controller that the current interrupt has been handled. - fn end_of_interrupt(&self, number: InterruptNumber); + /// Same as [`Self::acknowledge_interrupt`] but for fast interrupts (FIQs) + /// + /// # Safety + /// + /// This is unsafe because it circumvents the internal Mutex. + /// It must only be used by the `interrupts` crate when handling an FIQ. + unsafe fn acknowledge_fast_interrupt(&self) -> Option<(InterruptNumber, Priority)>; + + /// Same as [`LocalInterruptControllerApi::end_of_interrupt`] but for fast interrupts (FIQs) + /// + /// # Safety + /// + /// This is unsafe because it circumvents the internal Mutex. + /// It must only be used by the `interrupts` crate when handling an FIQ. + unsafe fn end_of_fast_interrupt(&self, number: InterruptNumber); } diff --git a/kernel/interrupt_controller/src/x86_64.rs b/kernel/interrupt_controller/src/x86_64.rs index 917062e3f4..adbee306eb 100644 --- a/kernel/interrupt_controller/src/x86_64.rs +++ b/kernel/interrupt_controller/src/x86_64.rs @@ -55,7 +55,7 @@ impl SystemInterruptControllerApi for SystemInterruptController { fn set_destination( &self, sys_int_num: InterruptNumber, - destination: CpuId, + destination: Option, priority: Priority, ) -> Result<(), &'static str> { let mut int_ctlr = get_ioapic(self.id).expect("BUG: set_destination(): get_ioapic() returned None"); @@ -63,7 +63,11 @@ impl SystemInterruptControllerApi for SystemInterruptController { // no support for priority on x86_64 let _ = priority; - int_ctlr.set_irq(sys_int_num, destination.into(), sys_int_num /* <- is this correct? */) + if let Some(destination) = destination { + int_ctlr.set_irq(sys_int_num, destination.into(), sys_int_num) + } else { + Err("SystemInterruptController::set_destination: todo on x86: set the IOREDTBL MASK bit") + } } } @@ -73,10 +77,6 @@ impl LocalInterruptControllerApi for LocalInterruptController { unimplemented!() } - fn init_secondary_cpu_interface(&self) { - panic!("This must not be used on x86_64") - } - fn id(&self) -> LocalInterruptControllerId { let int_ctlr = get_my_apic().expect("BUG: id(): get_my_apic() returned None"); let int_ctlr = int_ctlr.read(); @@ -112,20 +112,6 @@ impl LocalInterruptControllerApi for LocalInterruptController { }); } - fn get_minimum_priority(&self) -> Priority { - // No priority support on x86_64 - Priority - } - - fn set_minimum_priority(&self, priority: Priority) { - // No priority support on x86_64 - let _ = priority; - } - - fn acknowledge_interrupt(&self) -> (InterruptNumber, Priority) { - panic!("This must not be used on x86_64") - } - fn end_of_interrupt(&self, _number: InterruptNumber) { let mut int_ctlr = get_my_apic().expect("BUG: end_of_interrupt(): get_my_apic() returned None"); let mut int_ctlr = int_ctlr.write(); diff --git a/kernel/interrupts/src/aarch64/mod.rs b/kernel/interrupts/src/aarch64/mod.rs index 716dd42506..3cae5f9fc4 100644 --- a/kernel/interrupts/src/aarch64/mod.rs +++ b/kernel/interrupts/src/aarch64/mod.rs @@ -11,7 +11,7 @@ use tock_registers::registers::InMemoryRegister; use interrupt_controller::{ LocalInterruptController, SystemInterruptController, InterruptDestination, - LocalInterruptControllerApi, SystemInterruptControllerApi, + LocalInterruptControllerApi, AArch64LocalInterruptControllerApi, SystemInterruptControllerApi, }; use kernel_config::time::CONFIG_TIMESLICE_PERIOD_MICROSECONDS; use arm_boards::BOARD_CONFIG; @@ -48,7 +48,7 @@ const MAX_IRQ_NUM: usize = 256; // it's an array of function pointers which are meant to handle IRQs. // Synchronous Exceptions (including syscalls) are not IRQs on aarch64; // this crate doesn't expose any way to handle them at the moment. -static IRQ_HANDLERS: IrqSafeRwLock<[InterruptHandler; MAX_IRQ_NUM]> = IrqSafeRwLock::new([default_irq_handler; MAX_IRQ_NUM]); +static IRQ_HANDLERS: IrqSafeRwLock<[Option; MAX_IRQ_NUM]> = IrqSafeRwLock::new([None; MAX_IRQ_NUM]); /// The Saved Program Status Register at the time of the exception. #[repr(transparent)] @@ -98,12 +98,6 @@ fn default_exception_handler(exc: &ExceptionContext, origin: &'static str) { loop { core::hint::spin_loop() } } -// called for all unhandled interrupt requests -extern "C" fn default_irq_handler(exc: &ExceptionContext) -> EoiBehaviour { - log::error!("Unhandled IRQ:\r\n{:?}\r\n[looping forever now]", exc); - loop { core::hint::spin_loop() } -} - fn read_timer_period_femtoseconds() -> u64 { let counter_freq_hz = CNTFRQ_EL0.get(); let fs_in_one_sec = 1_000_000_000_000_000; @@ -144,7 +138,10 @@ pub fn init_ap() { int_ctrl.init_secondary_cpu_interface(); int_ctrl.set_minimum_priority(0); - int_ctrl.enable_local_interrupt(TLB_SHOOTDOWN_IPI, true); + // on the bootstrap CPU, this is done in setup_tlb_shootdown_handler + int_ctrl.enable_fast_local_interrupt(TLB_SHOOTDOWN_IPI, true); + + // on the bootstrap CPU, this is done in init_timer int_ctrl.enable_local_interrupt(CPU_LOCAL_TIMER_IRQ, true); enable_timer(true); @@ -189,6 +186,8 @@ pub fn init_timer(timer_tick_handler: InterruptHandler) -> Result<(), &'static s /// This function registers an interrupt handler for an inter-processor interrupt /// and handles interrupt controller configuration for that interrupt. +/// +/// Returns an error if the specified interrupt number already has a registered handler. pub fn setup_ipi_handler(handler: InterruptHandler, local_num: InterruptNumber) -> Result<(), &'static str> { // register the handler if let Err(existing_handler) = register_interrupt(local_num, handler) { @@ -207,10 +206,30 @@ pub fn setup_ipi_handler(handler: InterruptHandler, local_num: InterruptNumber) Ok(()) } +/// This function registers an interrupt handler for the TLB Shootdown IPI +/// and handles interrupt controller configuration for that interrupt. +/// +/// Returns an error if the TLB Shootdown interrupt number already has a registered handler. +pub fn setup_tlb_shootdown_handler(handler: InterruptHandler) -> Result<(), &'static str> { + if let Err(existing_handler) = register_interrupt(TLB_SHOOTDOWN_IPI, handler) { + if handler as *const InterruptHandler != existing_handler { + return Err("A different interrupt handler has already been setup for that IPI"); + } + } + + { + // enable this interrupt as a Fast interrupt (FIQ / Group 0 interrupt) + let int_ctrl = LocalInterruptController::get(); + int_ctrl.enable_fast_local_interrupt(TLB_SHOOTDOWN_IPI, true); + } + + Ok(()) +} + /// Enables the PL011 "RX" SPI and routes it to the current CPU. pub fn init_pl011_rx_interrupt() -> Result<(), &'static str> { let int_ctrl = SystemInterruptController::get(); - int_ctrl.set_destination(PL011_RX_SPI, current_cpu(), u8::MAX) + int_ctrl.set_destination(PL011_RX_SPI, Some(current_cpu()), u8::MAX) } /// Disables the timer, schedules its next tick, and re-enables it @@ -255,15 +274,12 @@ pub fn register_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> R let mut handlers = IRQ_HANDLERS.write(); let index = int_num as usize; - let value = handlers[index] as *const InterruptHandler; - let default = default_irq_handler as *const InterruptHandler; - - if value == default { - handlers[index] = func; - Ok(()) - } else { + if let Some(handler) = handlers[index] { error!("register_interrupt: the requested interrupt IRQ {} was already in use", index); - Err(value) + Err(handler as *const _) + } else { + handlers[index] = Some(func); + Ok(()) } } @@ -276,27 +292,35 @@ pub fn register_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> R /// # Arguments /// * `int_num`: the interrupt number that needs to be deregistered /// * `func`: the handler that should currently be stored for 'interrupt_num' -pub fn deregister_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> Result<(), *const InterruptHandler> { +pub fn deregister_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> Result<(), Option<*const InterruptHandler>> { let mut handlers = IRQ_HANDLERS.write(); let index = int_num as usize; - let value = handlers[index] as *const InterruptHandler; let func = func as *const InterruptHandler; + let handler = handlers[index].map(|h| h as *const InterruptHandler); - if value == func { - handlers[index] = default_irq_handler; - Ok(()) - } else { + if handler != Some(func) { error!("deregister_interrupt: Cannot free interrupt due to incorrect handler function"); - Err(value) + Err(handler) + } else { + handlers[index] = None; + Ok(()) } } -/// Broadcast an Inter-Processor Interrupt to all other -/// cores in the system -pub fn send_ipi_to_all_other_cpus(irq_num: InterruptNumber) { +/// Broadcast an Inter-Processor Interrupt to all other CPU cores in the system +pub fn broadcast_ipi(ipi_num: InterruptNumber) { + let int_ctrl = LocalInterruptController::get(); + int_ctrl.send_ipi(ipi_num, InterruptDestination::AllOtherCpus); +} + +/// Broadcast the TLB Shootdown Inter-Processor Interrupt to all other +/// CPU cores in the system +/// +/// This IPI uses fast interrupts (FIQs) as an NMI alternative. +pub fn broadcast_tlb_shootdown_ipi() { let int_ctrl = LocalInterruptController::get(); - int_ctrl.send_ipi(irq_num, InterruptDestination::AllOtherCpus); + int_ctrl.send_fast_ipi(TLB_SHOOTDOWN_IPI, InterruptDestination::AllOtherCpus); } /// Send an "end of interrupt" signal, notifying the interrupt chip that @@ -414,21 +438,62 @@ extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) { default_exception_handler(e, "current_elx_synchronous"); } +// When this is entered, FIQs are enabled / unmasked, because we use +// them as an NMI alternative, so they must be allowed at all times. +// +// Spurious interrupts are often the result of an FIQ being handled +// after we started handling an IRQ but before we acknowledged it. #[no_mangle] extern "C" fn current_elx_irq(exc: &mut ExceptionContext) { let (irq_num, _priority) = { let int_ctrl = LocalInterruptController::get(); - int_ctrl.acknowledge_interrupt() + match int_ctrl.acknowledge_interrupt() { + Some(irq_prio_tuple) => irq_prio_tuple, + None /* spurious interrupt */ => return, + } }; let index = irq_num as usize; - let handler = match index < MAX_IRQ_NUM { - true => IRQ_HANDLERS.read()[index], - false => default_irq_handler, + let handler = IRQ_HANDLERS.read().get(index).copied().flatten(); + let result = handler.map(|handler| handler(exc)); + + if let Some(result) = result { + if result == EoiBehaviour::HandlerDidNotSendEoi { + // will use LocalInterruptController + eoi(irq_num); + } + } else { + log::error!("Unhandled IRQ: {}\r\n{:?}\r\n[looping forever now]", irq_num, exc); + loop { core::hint::spin_loop() } + } +} + +// When this is entered, FIQs are disabled / masked: there must be +// only one FIQ (that we use as an NMI alternative) at a time. +// +// Currently, FIQs are only used for TLB shootdown. +#[no_mangle] +extern "C" fn current_elx_fiq(exc: &mut ExceptionContext) { + let (irq_num, _priority) = { + let int_ctrl = LocalInterruptController::get(); + let ack = unsafe { int_ctrl.acknowledge_fast_interrupt() }; + match ack { + Some(irq_prio_tuple) => irq_prio_tuple, + None /* spurious interrupt */ => return, + } }; - if handler(exc) == EoiBehaviour::HandlerDidNotSendEoi { - eoi(irq_num); + let handler = IRQ_HANDLERS.read().get(irq_num as usize).copied().flatten(); + let result = handler.map(|handler| handler(exc)); + + if let Some(result) = result { + if result == EoiBehaviour::HandlerDidNotSendEoi { + let int_ctrl = LocalInterruptController::get(); + unsafe { int_ctrl.end_of_fast_interrupt(irq_num) }; + } + } else { + log::error!("Unhandled FIQ: {}\r\n{:?}\r\n[looping forever now]", irq_num, exc); + loop { core::hint::spin_loop() } } } diff --git a/kernel/interrupts/src/aarch64/table.s b/kernel/interrupts/src/aarch64/table.s index c66a2f4045..c480eb7143 100644 --- a/kernel/interrupts/src/aarch64/table.s +++ b/kernel/interrupts/src/aarch64/table.s @@ -77,9 +77,11 @@ __exception_vector_start: .org 0x200 CALL_WITH_CONTEXT current_elx_synchronous .org 0x280 + // this first instruction re-enables (unmasks) fast interrupts so that IRQs can be interrupted by FIQs. + msr daifclr, #1 CALL_WITH_CONTEXT current_elx_irq .org 0x300 - FIQ_SUSPEND + CALL_WITH_CONTEXT current_elx_fiq .org 0x380 CALL_WITH_CONTEXT current_elx_serror diff --git a/kernel/memory_aarch64/src/lib.rs b/kernel/memory_aarch64/src/lib.rs index e7cd6a560f..04cd8c451f 100644 --- a/kernel/memory_aarch64/src/lib.rs +++ b/kernel/memory_aarch64/src/lib.rs @@ -16,35 +16,62 @@ use pte_flags::PteFlags; use boot_info::{BootInformation, ElfSection}; use kernel_config::memory::KERNEL_OFFSET; -#[cfg(any(target_arch = "aarch64", doc))] +#[cfg(any(doc, target_arch = "aarch64"))] use core::arch::asm; const THESEUS_ASID: u16 = 0; -#[cfg(any(target_arch = "aarch64", doc))] /// Flushes the specific virtual address in TLB. /// /// TLBI => tlb invalidate instruction -/// "va" => all translations at execution level -/// using the supplied address +/// "va" => all translations at execution level using the supplied address /// "e1" => execution level +#[cfg(any(doc, target_arch = "aarch64"))] pub fn tlb_flush_virt_addr(vaddr: VirtualAddress) { - #[cfg(target_arch = "aarch64")] - unsafe { asm!("tlbi vae1, {}", in(reg) vaddr.value()) }; + // unsure here: where should the original address ASID go? + // it's zero in theseus so it's not important for us + + // about the 48 bit shift: + // If the implementation supports 16 bits of ASID, then the + // upper 8 bits of the ASID must be written to 0 by software + // when the context being invalidated only uses 8 bits + let value = ((THESEUS_ASID as usize) << 48) | (vaddr.value() >> 12); + + unsafe { + asm!("tlbi vae1, {}", in(reg) value) + }; } -#[cfg(any(target_arch = "aarch64", doc))] +/* NO SUPPORT IN QEMU + +/// Flushes the specific virtual address in the TLB of any CPU +/// in the outer shareable domain. +/// +/// TLBI => tlb invalidate instruction +/// "va" => all translations at execution level using the supplied address +/// "e1" => execution level +/// "os" => outer shareable domain +#[cfg(any(doc, target_arch = "aarch64"))] +pub fn tlb_flush_virt_addr_all_cpus(vaddr: VirtualAddress) { + unsafe { + let value = ((THESEUS_ASID as usize) << 48) | (vaddr.value() >> 12); + asm!(".arch armv8.4-a\ntlbi vae1os, {}", in(reg) value) + }; +} + +*/ + /// Flushes all TLB entries with Theseus' ASID (=0). /// /// TLBI => tlb invalidate instruction /// "asid" => all entries with specific ASID /// "e1" => execution level +#[cfg(any(doc, target_arch = "aarch64"))] pub fn tlb_flush_by_theseus_asid() { - #[cfg(target_arch = "aarch64")] unsafe { asm!("tlbi aside1, {:x}", in(reg) THESEUS_ASID) }; } -#[cfg(any(target_arch = "aarch64", doc))] +#[cfg(any(doc, target_arch = "aarch64"))] pub use tlb_flush_by_theseus_asid as tlb_flush_all; /// Returns the current top-level page table address. diff --git a/kernel/nano_core/src/lib.rs b/kernel/nano_core/src/lib.rs index 87b6c2409c..7329a71ae4 100644 --- a/kernel/nano_core/src/lib.rs +++ b/kernel/nano_core/src/lib.rs @@ -88,6 +88,9 @@ where B: boot_info::BootInformation { irq_safety::disable_interrupts(); + #[cfg(target_arch = "aarch64")] + irq_safety::disable_fast_interrupts(); + println!("nano_core(): Entered early setup. Interrupts disabled."); #[cfg(target_arch = "x86_64")] @@ -131,7 +134,7 @@ where let logger_ports = [take_serial_port(SerialPortAddress::COM1)]; logger::early_init(None, IntoIterator::into_iter(logger_ports).flatten()); log::info!("initialized early logger with aarch64 serial ports."); - println!("nano_core(): initialized early logger with aarch64 serial ports."); + println!("nano_core(): initialized early logger with aarch64 serial ports."); } println!("nano_core(): initialized memory subsystem."); diff --git a/kernel/tlb_shootdown/src/lib.rs b/kernel/tlb_shootdown/src/lib.rs index 98f333b5dc..a9ad79e31e 100644 --- a/kernel/tlb_shootdown/src/lib.rs +++ b/kernel/tlb_shootdown/src/lib.rs @@ -28,7 +28,7 @@ pub fn init() { memory::set_broadcast_tlb_shootdown_cb(broadcast_tlb_shootdown); #[cfg(target_arch = "aarch64")] - interrupts::setup_ipi_handler(tlb_shootdown_ipi_handler, interrupts::TLB_SHOOTDOWN_IPI).unwrap(); + interrupts::setup_tlb_shootdown_handler(tlb_shootdown_ipi_handler).unwrap(); } /// Handles a TLB shootdown IPI requested by another CPU. @@ -40,6 +40,8 @@ pub fn init() { pub fn handle_tlb_shootdown_ipi() -> bool { let pages_to_invalidate = TLB_SHOOTDOWN_IPI_PAGES.read().clone(); if let Some(pages) = pages_to_invalidate { + // Note: logging in a NMI (x86_64) or FIQ (aarch64) context can cause deadlock, + // so this should only be used sparingly to help debug problems with TLB shootdowns. // log::trace!("handle_tlb_shootdown_ipi(): CPU {}, pages: {:?}", apic::current_cpu(), pages); for page in pages { tlb_flush_virt_addr(page.start_address()); @@ -97,7 +99,7 @@ fn broadcast_tlb_shootdown(pages_to_invalidate: PageRange) { } #[cfg(target_arch = "aarch64")] - interrupts::send_ipi_to_all_other_cpus(interrupts::TLB_SHOOTDOWN_IPI); + interrupts::broadcast_tlb_shootdown_ipi(); // wait for all other cores to handle this IPI // it must be a blocking, synchronous operation to ensure stale TLB entries don't cause problems @@ -111,6 +113,10 @@ fn broadcast_tlb_shootdown(pages_to_invalidate: PageRange) { // release lock TLB_SHOOTDOWN_IPI_LOCK.store(false, Ordering::Release); + + if false { + log::warn!("send_tlb_shootdown_ipi(): from CPU {:?}, complete", cpu::current_cpu()); + } } /// Interrupt Handler for TLB Shootdowns on aarch64 From 0bd4447ea024803c13dab7bd7151ea746c42895b Mon Sep 17 00:00:00 2001 From: Nathan Royer <61582713+NathanRoyer@users.noreply.github.com> Date: Tue, 3 Oct 2023 19:04:32 +0200 Subject: [PATCH 05/25] Move PL011 driver into the main workspace (#1040) We now use that driver to implement the serial port on aarch64. --- Cargo.lock | 22 +-- kernel/serial_port/src/lib.rs | 14 +- kernel/serial_port_basic/Cargo.toml | 3 +- kernel/serial_port_basic/src/aarch64.rs | 29 ++-- kernel/serial_port_basic/src/x86_64.rs | 5 + kernel/uart_pl011/Cargo.toml | 12 ++ kernel/uart_pl011/src/lib.rs | 192 ++++++++++++++++++++++++ 7 files changed, 243 insertions(+), 34 deletions(-) create mode 100644 kernel/uart_pl011/Cargo.toml create mode 100644 kernel/uart_pl011/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 37c05c2764..71971a438e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2669,15 +2669,6 @@ dependencies = [ "spin 0.9.4", ] -[[package]] -name = "pl011" -version = "1.0.0" -source = "git+https://github.com/theseus-os/pl011/?rev=464dbf22#464dbf2288a5d9e7445b0b1f404ddd41b1fd1c1e" -dependencies = [ - "log", - "volatile-register", -] - [[package]] name = "plain" version = "0.2.3" @@ -3347,11 +3338,10 @@ name = "serial_port_basic" version = "0.1.0" dependencies = [ "arm_boards", - "memory", - "pl011", "port_io", "spin 0.9.4", "sync_irq", + "uart_pl011", ] [[package]] @@ -4200,6 +4190,16 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "uart_pl011" +version = "0.1.0" +dependencies = [ + "log", + "memory", + "volatile 0.2.7", + "zerocopy", +] + [[package]] name = "uefi-bootloader-api" version = "0.1.0" diff --git a/kernel/serial_port/src/lib.rs b/kernel/serial_port/src/lib.rs index 66126f112b..cfa0f2b2f6 100644 --- a/kernel/serial_port/src/lib.rs +++ b/kernel/serial_port/src/lib.rs @@ -82,6 +82,8 @@ pub fn init_serial_port( serial_port_address: SerialPortAddress, serial_port: SerialPortBasic, ) -> Option<&'static Arc>> { + // Note: if we're called by device_manager, we cannot log (as we're modifying the logger config) + #[cfg(target_arch = "aarch64")] if serial_port_address != SerialPortAddress::COM1 { return None; @@ -309,7 +311,7 @@ fn serial_port_receive_deferred( let mut buf = DataChunk::empty(); let bytes_read; let base_port; - + let mut input_was_ignored = false; let mut send_result = Ok(()); @@ -331,9 +333,6 @@ fn serial_port_receive_deferred( // other than data being received, which is the only one we currently care about. return Ok(()); } - - #[cfg(target_arch = "aarch64")] - sp.enable_interrupt(SerialPortInterruptEvent::DataReceived, true); } if let Err(e) = send_result { @@ -387,15 +386,18 @@ static INTERRUPT_ACTION_COM2_COM4: Once> = Once::new // Cross-platform interrupt handler for COM1 and COM3 (IRQ 0x24 on x86_64). interrupt_handler!(com1_com3_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x4), _stack_frame, { - // trace!("COM1/COM3 serial handler"); + // log::trace!("COM1/COM3 serial handler"); + #[cfg(target_arch = "aarch64")] { let mut sp = COM1_SERIAL_PORT.get().unwrap().as_ref().lock(); - sp.enable_interrupt(SerialPortInterruptEvent::DataReceived, false); + sp.acknowledge_interrupt(SerialPortInterruptEvent::DataReceived); } if let Some(func) = INTERRUPT_ACTION_COM1_COM3.get() { func() } + + // log::trace!("COM1/COM3 serial handler done"); EoiBehaviour::HandlerDidNotSendEoi }); diff --git a/kernel/serial_port_basic/Cargo.toml b/kernel/serial_port_basic/Cargo.toml index 20b53639df..9b4e1a5421 100644 --- a/kernel/serial_port_basic/Cargo.toml +++ b/kernel/serial_port_basic/Cargo.toml @@ -13,9 +13,8 @@ spin = "0.9.4" port_io = { path = "../../libs/port_io" } [target.'cfg(target_arch = "aarch64")'.dependencies] -pl011 = { git = "https://github.com/theseus-os/pl011/", rev = "464dbf22" } +uart_pl011 = { path = "../uart_pl011" } arm_boards = { path = "../arm_boards" } -memory = { path = "../memory" } [lib] crate-type = ["rlib"] diff --git a/kernel/serial_port_basic/src/aarch64.rs b/kernel/serial_port_basic/src/aarch64.rs index 7ce000da37..67209e82f7 100644 --- a/kernel/serial_port_basic/src/aarch64.rs +++ b/kernel/serial_port_basic/src/aarch64.rs @@ -1,7 +1,6 @@ -use memory::{MappedPages, PAGE_SIZE, map_frame_range, MMIO_FLAGS}; use super::{TriState, SerialPortInterruptEvent}; use arm_boards::BOARD_CONFIG; -use pl011::PL011; +use uart_pl011::Pl011; use core::fmt; /// The base port I/O addresses for COM serial ports. @@ -19,11 +18,10 @@ pub enum SerialPortAddress { } /// A serial port and its various data and control registers. +#[derive(Debug)] pub struct SerialPort { port_address: SerialPortAddress, - inner: Option, - // Owner of the MMIO frames for the PL011 registers - _mapped_pages: Option, + inner: Option, } impl Drop for SerialPort { @@ -33,7 +31,6 @@ impl Drop for SerialPort { if let TriState::Taken = &*sp_locked { let dummy = SerialPort { inner: None, - _mapped_pages: None, port_address: self.port_address, }; let dropped = core::mem::replace(self, dummy); @@ -57,19 +54,12 @@ impl SerialPort { None => panic!("Board doesn't have {:?}", serial_port_address), }; - let mapped_pages = map_frame_range(*mmio_base, PAGE_SIZE, MMIO_FLAGS) - .expect("serial_port_basic: couldn't map the UART interface"); - let addr = mapped_pages.start_address().value(); - let mut pl011 = PL011::new(addr as *mut _); - - pl011.enable_rx_interrupt(true); - pl011.set_fifo_mode(false); - // pl011.log_status(); + let pl011 = Pl011::new(*mmio_base) + .expect("SerialPort::new: Couldn't initialize PL011 UART"); SerialPort { port_address: serial_port_address, inner: Some(pl011), - _mapped_pages: Some(mapped_pages), } } @@ -84,6 +74,15 @@ impl SerialPort { } } + /// Clears an interrupt in the serial port controller + pub fn acknowledge_interrupt(&mut self, event: SerialPortInterruptEvent) { + if matches!(event, SerialPortInterruptEvent::DataReceived) { + self.inner.as_mut().unwrap().acknowledge_rx_interrupt(); + } else { + unimplemented!() + } + } + /// Write the given string to the serial port, blocking until data can be transmitted. /// /// # Special characters diff --git a/kernel/serial_port_basic/src/x86_64.rs b/kernel/serial_port_basic/src/x86_64.rs index 59fac34e95..6c83e5aa72 100644 --- a/kernel/serial_port_basic/src/x86_64.rs +++ b/kernel/serial_port_basic/src/x86_64.rs @@ -187,6 +187,11 @@ impl SerialPort { } } + /// Clears an interrupt in the serial port controller + pub fn acknowledge_interrupt(&mut self, _event: SerialPortInterruptEvent) { + // no-op on x86_64 + } + /// Write the given string to the serial port, blocking until data can be transmitted. /// /// # Special characters diff --git a/kernel/uart_pl011/Cargo.toml b/kernel/uart_pl011/Cargo.toml new file mode 100644 index 0000000000..6f734ad396 --- /dev/null +++ b/kernel/uart_pl011/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "uart_pl011" +authors = ["Nathan Royer "] +description = "Simple Driver for PL011 UARTs" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = "0.4.8" +volatile = "0.2.7" +zerocopy = "0.5.0" +memory = { path = "../memory" } \ No newline at end of file diff --git a/kernel/uart_pl011/src/lib.rs b/kernel/uart_pl011/src/lib.rs new file mode 100644 index 0000000000..2587d3d4a9 --- /dev/null +++ b/kernel/uart_pl011/src/lib.rs @@ -0,0 +1,192 @@ +//! Driver for pl011 UARTs + +#![no_std] +use core::fmt; +use zerocopy::FromBytes; +use volatile::{Volatile, ReadOnly, WriteOnly}; +use memory::{BorrowedMappedPages, Mutable, PhysicalAddress, PAGE_SIZE, map_frame_range, MMIO_FLAGS}; + +/// Struct representing Pl011 registers. Not intended to be directly used +#[derive(Debug, FromBytes)] +#[repr(C)] +pub struct Pl011_Regs { + /// Data Register + pub uartdr: Volatile, + /// receive status / error clear + pub uartrsr: Volatile, + reserved0: [u32; 4], + /// flag register + pub uartfr: ReadOnly, + reserved1: u32, + /// IrDA Low power counter register + pub uartilpr: Volatile, + /// integer baud rate + pub uartibrd: Volatile, + /// fractional baud rate + pub uartfbrd: Volatile, + /// line control + pub uartlcr_h: Volatile, + /// control + pub uartcr: Volatile, + /// interrupt fifo level select + pub uartifls: Volatile, + /// interrupt mask set/clear + pub uartimsc: Volatile, + /// raw interrupt status + pub uartris: ReadOnly, + /// masked interrupt status + pub uartmis: ReadOnly, + /// interrupt clear + pub uarticr: WriteOnly, + /// dma control + pub uartdmacr: Volatile, + reserved2: [u32; 997], + /// UART Periph ID0 + pub uartperiphid0: ReadOnly, + /// UART Periph ID1 + pub uartperiphid1: ReadOnly, + /// UART Periph ID2 + pub uartperiphid2: ReadOnly, + /// UART Periph ID3 + pub uartperiphid3: ReadOnly, + /// UART PCell ID0 + pub uartpcellid0: ReadOnly, + /// UART PCell ID1 + pub uartpcellid1: ReadOnly, + /// UART PCell ID2 + pub uartpcellid2: ReadOnly, + /// UART PCell ID3 + pub uartpcellid3: ReadOnly, +} + +const UARTIMSC_RXIM: u32 = 1 << 4; +const UARTUCR_RXIC: u32 = 1 << 4; + +const UARTLCR_FEN: u32 = 1 << 4; + +const UARTCR_RX_ENABLED: u32 = 1 << 9; +const UARTCR_TX_ENABLED: u32 = 1 << 8; +const UARTCR_UART_ENABLED: u32 = 1 << 0; + +const UARTFR_RX_BUF_EMPTY: u32 = 1 << 4; +const UARTFR_TX_BUF_FULL: u32 = 1 << 5; + +/// A Pl011 Single-Serial-Port Controller. +pub struct Pl011 { + regs: BorrowedMappedPages +} + +impl core::fmt::Debug for Pl011 { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + self.regs.fmt(f) + } +} + +/// Generic methods +impl Pl011 { + /// Initialize a UART driver. + pub fn new(base: PhysicalAddress) -> Result { + let mapped_pages = map_frame_range(base, PAGE_SIZE, MMIO_FLAGS)?; + + let mut this = Self { + regs: mapped_pages.into_borrowed_mut(0).map_err(|(_, e)| e)?, + }; + + this.enable_rx_interrupt(true); + this.set_fifo_mode(false); + + Ok(this) + } + + /// Enable on-receive interrupt + pub fn enable_rx_interrupt(&mut self, enable: bool) { + let mut reg = self.regs.uartimsc.read(); + + match enable { + true => reg |= UARTIMSC_RXIM, + false => reg &= !UARTIMSC_RXIM, + }; + + self.regs.uartimsc.write(reg); + } + + pub fn acknowledge_rx_interrupt(&mut self) { + self.regs.uarticr.write(UARTUCR_RXIC); + } + + /// Set FIFO mode + pub fn set_fifo_mode(&mut self, enable: bool) { + let mut reg = self.regs.uartlcr_h.read(); + + match enable { + true => reg |= UARTLCR_FEN, + false => reg &= !UARTLCR_FEN, + }; + + self.regs.uartlcr_h.write(reg); + } + + /// Outputs a summary of the state of the controller using `log::info!()` + pub fn log_status(&self) { + let reg = self.regs.uartcr.read(); + log::info!("RX enabled: {}", (reg & UARTCR_RX_ENABLED) > 0); + log::info!("TX enabled: {}", (reg & UARTCR_TX_ENABLED) > 0); + log::info!("UART enabled: {}", (reg & UARTCR_UART_ENABLED) > 0); + } + + /// Returns true if the receive-buffer-empty flag is clear. + pub fn has_incoming_data(&self) -> bool { + let uartfr = self.regs.uartfr.read(); + uartfr & UARTFR_RX_BUF_EMPTY == 0 + } + + /// Reads a single byte out the uart + /// + /// Spins until a byte is available in the fifo. + pub fn read_byte(&self) -> u8 { + while !self.has_incoming_data() {} + self.regs.uartdr.read() as u8 + } + + /// Reads bytes into a slice until there is none available. + pub fn read_bytes(&self, bytes: &mut [u8]) -> usize { + let mut read = 0; + + while read < bytes.len() && self.has_incoming_data() { + bytes[read] = self.read_byte(); + read += 1; + } + + read + } + + /// Returns true if the transmit-buffer-full flag is clear. + pub fn is_writeable(&self) -> bool { + let uartfr = self.regs.uartfr.read(); + uartfr & UARTFR_TX_BUF_FULL == 0 + } + + /// Writes a single byte out the uart. + /// + /// Spins until space is available in the fifo. + pub fn write_byte(&mut self, data: u8) { + while !self.is_writeable() {} + self.regs.uartdr.write(data as u32); + } + + /// Writes a byte slice out the uart. + /// + /// Spins until space is available in the fifo. + pub fn write_bytes(&mut self, bytes: &[u8]) { + for b in bytes { + self.write_byte(*b); + } + } +} + +impl fmt::Write for Pl011 { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.write_bytes(s.as_bytes()); + Ok(()) + } +} From 810e12fcdda45acc7015bfbc0eca5fd8ce09ad19 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 4 Oct 2023 06:20:57 +1100 Subject: [PATCH 06/25] Move scheduling functionality into `task`; add `Scheduler` policy trait (#1035) * Add the `Scheduler` trait, which represents a scheduler policy. * By default, there is one instance of a `Scheduler` policy per CPU. * Support dynamically registering and switching between different scheduler policies on a per-CPU basis. * Because this is now defined in the `task` crate instead of in the `scheduler` crate, the `task` crate can now access all functionality provided by a scheduler policy, which allows for custom actions like setting priority or removing/adding blocked/unblocked tasks to/from a runqueue. * Combine `runqueue_*` crates with their respective and `scheduler_*` crates, which is an improved design because external crates should not be able to view or modify a scheduler policy's internal runqueue contents. * This design makes sense, and also prevents issues like #1000. * Modify most applications that access runqueues via the old `runqueue_*` crate APIs to go through the new `Scheduler` API instead. ---------- Signed-off-by: Klimenty Tsoutsman Co-authored-by: Kevin Boos --- Cargo.lock | 88 +------ applications/bm/Cargo.toml | 3 - applications/bm/src/lib.rs | 1 - applications/heap_eval/Cargo.toml | 3 - applications/heap_eval/src/lib.rs | 1 - applications/kill/Cargo.toml | 3 - applications/kill/src/lib.rs | 1 - applications/ps/src/lib.rs | 2 +- applications/rq/Cargo.toml | 8 +- applications/rq/src/lib.rs | 79 +++--- applications/rq_access_eval/Cargo.toml | 14 - applications/rq_access_eval/src/lib.rs | 79 ------ applications/rq_eval/Cargo.toml | 3 - applications/rq_eval/src/lib.rs | 5 +- applications/shell/Cargo.toml | 3 - applications/shell/src/lib.rs | 5 +- applications/test_scheduler/src/lib.rs | 18 +- kernel/exceptions_full/Cargo.toml | 3 - kernel/ixgbe/Cargo.toml | 3 - kernel/ixgbe/src/lib.rs | 1 - kernel/libtest/Cargo.toml | 3 - kernel/libtest/src/lib.rs | 6 +- kernel/panic_wrapper/Cargo.toml | 1 - kernel/runqueue/Cargo.toml | 45 ---- kernel/runqueue/src/lib.rs | 59 ----- kernel/runqueue_epoch/Cargo.toml | 28 -- kernel/runqueue_epoch/src/lib.rs | 334 ------------------------ kernel/runqueue_priority/Cargo.toml | 16 -- kernel/runqueue_priority/src/lib.rs | 321 ----------------------- kernel/runqueue_round_robin/Cargo.toml | 27 -- kernel/runqueue_round_robin/src/lib.rs | 247 ------------------ kernel/scheduler/Cargo.toml | 4 - kernel/scheduler/src/lib.rs | 48 +--- kernel/scheduler_epoch/Cargo.toml | 6 - kernel/scheduler_epoch/src/lib.rs | 324 +++++++++++++---------- kernel/scheduler_priority/Cargo.toml | 1 - kernel/scheduler_priority/src/lib.rs | 175 ++++++++++--- kernel/scheduler_round_robin/Cargo.toml | 14 +- kernel/scheduler_round_robin/src/lib.rs | 90 +++++-- kernel/spawn/Cargo.toml | 6 +- kernel/spawn/src/lib.rs | 38 ++- kernel/task/Cargo.toml | 1 + kernel/task/src/lib.rs | 74 +----- kernel/task/src/scheduler.rs | 300 +++++++++++++++++++++ theseus_features/Cargo.toml | 2 - 45 files changed, 776 insertions(+), 1717 deletions(-) delete mode 100644 applications/rq_access_eval/Cargo.toml delete mode 100644 applications/rq_access_eval/src/lib.rs delete mode 100644 kernel/runqueue/Cargo.toml delete mode 100644 kernel/runqueue/src/lib.rs delete mode 100644 kernel/runqueue_epoch/Cargo.toml delete mode 100644 kernel/runqueue_epoch/src/lib.rs delete mode 100644 kernel/runqueue_priority/Cargo.toml delete mode 100644 kernel/runqueue_priority/src/lib.rs delete mode 100644 kernel/runqueue_round_robin/Cargo.toml delete mode 100644 kernel/runqueue_round_robin/src/lib.rs create mode 100644 kernel/task/src/scheduler.rs diff --git a/Cargo.lock b/Cargo.lock index 71971a438e..2675ed3d05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -325,7 +325,6 @@ dependencies = [ "path", "pmu_x86", "rendezvous", - "runqueue", "scheduler", "simple_ipc", "spawn", @@ -1102,7 +1101,6 @@ dependencies = [ "log", "memory", "pmu_x86", - "runqueue", "signal_handler", "stack_trace", "task", @@ -1450,7 +1448,6 @@ dependencies = [ "libtest", "log", "qp-trie", - "runqueue", "spawn", ] @@ -1699,7 +1696,6 @@ dependencies = [ "pic", "pit_clock_basic", "rand", - "runqueue", "spin 0.9.4", "sync_irq", "virtual_nic", @@ -1747,7 +1743,6 @@ dependencies = [ "app_io", "debugit", "getopts", - "runqueue", "task", ] @@ -1823,7 +1818,6 @@ dependencies = [ "log", "memory", "pmu_x86", - "runqueue", "spin 0.9.4", "task", ] @@ -2559,7 +2553,6 @@ dependencies = [ "log", "memory", "mod_mgmt", - "runqueue", "stack_trace", "stack_trace_frame_pointers", "task", @@ -3030,24 +3023,11 @@ dependencies = [ [[package]] name = "rq" version = "0.1.0" -dependencies = [ - "apic", - "app_io", - "getopts", - "runqueue", - "task", -] - -[[package]] -name = "rq_access_eval" -version = "0.1.0" dependencies = [ "app_io", "cpu", "getopts", - "irq_safety", - "runqueue", - "time", + "task", ] [[package]] @@ -3060,7 +3040,6 @@ dependencies = [ "hpet", "libtest", "log", - "runqueue", "spawn", "task", ] @@ -3096,55 +3075,6 @@ dependencies = [ "x86_64", ] -[[package]] -name = "runqueue" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "cfg-if 1.0.0", - "lazy_static", - "log", - "runqueue_epoch", - "runqueue_priority", - "runqueue_round_robin", - "single_simd_task_optimization", - "sync_preemption", - "task", -] - -[[package]] -name = "runqueue_epoch" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "log", - "single_simd_task_optimization", - "sync_preemption", - "task", -] - -[[package]] -name = "runqueue_priority" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "log", - "sync_preemption", - "task", - "time", -] - -[[package]] -name = "runqueue_round_robin" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "log", - "single_simd_task_optimization", - "sync_preemption", - "task", -] - [[package]] name = "rustc-demangle" version = "0.1.19" @@ -3183,9 +3113,6 @@ dependencies = [ "cpu", "interrupts", "log", - "scheduler_epoch", - "scheduler_priority", - "scheduler_round_robin", "sleep", "task", "x86_64", @@ -3196,8 +3123,6 @@ name = "scheduler_epoch" version = "0.1.0" dependencies = [ "log", - "runqueue", - "runqueue_epoch", "spin 0.9.4", "task", ] @@ -3219,7 +3144,6 @@ name = "scheduler_priority" version = "0.1.0" dependencies = [ "log", - "runqueue_priority", "task", "time", ] @@ -3229,8 +3153,6 @@ name = "scheduler_round_robin" version = "0.1.0" dependencies = [ "log", - "runqueue", - "runqueue_round_robin", "spin 0.9.4", "task", ] @@ -3374,7 +3296,6 @@ dependencies = [ "log", "path", "root", - "runqueue", "scheduler", "spawn", "spin 0.9.4", @@ -3489,6 +3410,7 @@ name = "spawn" version = "0.1.0" dependencies = [ "catch_unwind", + "cfg-if 1.0.0", "context_switch", "cpu", "debugit", @@ -3503,8 +3425,10 @@ dependencies = [ "no_drop", "path", "preemption", - "runqueue", "scheduler", + "scheduler_epoch", + "scheduler_priority", + "scheduler_round_robin", "spin 0.9.4", "stack", "task", @@ -3737,6 +3661,7 @@ dependencies = [ "stack", "static_assertions", "sync_irq", + "sync_preemption", "task_struct", "waker_generic", ] @@ -4051,7 +3976,6 @@ dependencies = [ "raw_mode", "rm", "rq", - "rq_access_eval", "rq_eval", "scheduler_eval", "seconds_counter", diff --git a/applications/bm/Cargo.toml b/applications/bm/Cargo.toml index ddce10d3df..b18fa0d227 100644 --- a/applications/bm/Cargo.toml +++ b/applications/bm/Cargo.toml @@ -33,9 +33,6 @@ path = "../../kernel/spawn" [dependencies.path] path = "../../kernel/path" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.heapfile] path = "../../kernel/heapfile" # [dependencies.application_main_fn] diff --git a/applications/bm/src/lib.rs b/applications/bm/src/lib.rs index 67efeae3a1..fab5fd45cf 100644 --- a/applications/bm/src/lib.rs +++ b/applications/bm/src/lib.rs @@ -18,7 +18,6 @@ extern crate apic; extern crate cpu; extern crate spawn; extern crate path; -extern crate runqueue; extern crate heapfile; extern crate scheduler; extern crate libtest; diff --git a/applications/heap_eval/Cargo.toml b/applications/heap_eval/Cargo.toml index 42162f7d42..5fa0114de9 100644 --- a/applications/heap_eval/Cargo.toml +++ b/applications/heap_eval/Cargo.toml @@ -23,9 +23,6 @@ path = "../../kernel/apic" [dependencies.cpu] path = "../../kernel/cpu" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.hashbrown] version = "0.11.2" features = ["nightly"] diff --git a/applications/heap_eval/src/lib.rs b/applications/heap_eval/src/lib.rs index 3f7eb9c753..435e08e91a 100644 --- a/applications/heap_eval/src/lib.rs +++ b/applications/heap_eval/src/lib.rs @@ -7,7 +7,6 @@ extern crate hpet; extern crate hashbrown; extern crate qp_trie; extern crate apic; -extern crate runqueue; extern crate libtest; extern crate spawn; extern crate cpu; diff --git a/applications/kill/Cargo.toml b/applications/kill/Cargo.toml index 337337aa8b..b3952482b1 100644 --- a/applications/kill/Cargo.toml +++ b/applications/kill/Cargo.toml @@ -15,8 +15,5 @@ path = "../../kernel/app_io" [dependencies.task] path = "../../kernel/task" -[dependencies.runqueue] -path = "../../kernel/runqueue" - # [dependencies.application_main_fn] # path = "../../compiler_plugins" diff --git a/applications/kill/src/lib.rs b/applications/kill/src/lib.rs index 9b51a27bb6..11cdee2271 100755 --- a/applications/kill/src/lib.rs +++ b/applications/kill/src/lib.rs @@ -4,7 +4,6 @@ extern crate alloc; // #[macro_use] extern crate debugit; extern crate task; -extern crate runqueue; extern crate getopts; use getopts::Options; diff --git a/applications/ps/src/lib.rs b/applications/ps/src/lib.rs index b4bd2f67fb..6bb2cb151b 100644 --- a/applications/ps/src/lib.rs +++ b/applications/ps/src/lib.rs @@ -60,7 +60,7 @@ pub fn main(args: Vec) -> isize { else {" "} ; #[cfg(any(epoch_scheduler, priority_scheduler))] { - let priority = scheduler::get_priority(&task).map(|priority| format!("{}", priority)).unwrap_or_else(|| String::from("-")); + let priority = scheduler::priority(&task).map(|priority| format!("{}", priority)).unwrap_or_else(|| String::from("-")); task_string.push_str( &format!("{0:<5} {1:<10} {2:<4} {3:<4} {4:<5} {5:<10} {6}\n", id, runstate, cpu, pinned, task_type, priority, task.name) diff --git a/applications/rq/Cargo.toml b/applications/rq/Cargo.toml index 961c896b13..948587e4b7 100644 --- a/applications/rq/Cargo.toml +++ b/applications/rq/Cargo.toml @@ -2,6 +2,7 @@ name = "rq" version = "0.1.0" authors = ["Christine Wang "] +edition = "2021" [dependencies] getopts = "0.2.21" @@ -9,14 +10,11 @@ getopts = "0.2.21" [dependencies.app_io] path = "../../kernel/app_io" -[dependencies.apic] -path = "../../kernel/apic" +[dependencies.cpu] +path = "../../kernel/cpu" [dependencies.task] path = "../../kernel/task" -[dependencies.runqueue] -path = "../../kernel/runqueue" - # [dependencies.application_main_fn] # path = "../../compiler_plugins" diff --git a/applications/rq/src/lib.rs b/applications/rq/src/lib.rs index 6027c032df..906bf591ed 100644 --- a/applications/rq/src/lib.rs +++ b/applications/rq/src/lib.rs @@ -1,66 +1,56 @@ #![no_std] -extern crate alloc; -#[macro_use] extern crate app_io; -extern crate apic; -extern crate getopts; -extern crate task; -extern crate runqueue; +extern crate alloc; -use getopts::Options; use alloc::{ fmt::Write, - string::{ - String, - ToString, - }, + string::{String, ToString}, vec::Vec, }; -use apic::get_lapics; + +use app_io::{print, println}; +use getopts::Options; pub fn main(args: Vec) -> isize { let mut opts = Options::new(); opts.optflag("h", "help", "print this help menu"); let matches = match opts.parse(args) { - Ok(m) => { m } - Err(_f) => { println!("{} \n", _f); - return -1; } + Ok(m) => m, + Err(_f) => { + println!("{} \n", _f); + return -1; + } }; if matches.opt_present("h") { - return print_usage(opts) + return print_usage(opts); } + let bootstrap_cpu = cpu::bootstrap_cpu(); - let all_lapics = get_lapics(); - for lapic in all_lapics.iter() { - let lapic = lapic.1; - let apic_id = lapic.read().apic_id(); - let processor = lapic.read().processor_id(); - let is_bootstrap_cpu = lapic.read().is_bootstrap_cpu(); - let core_type = if is_bootstrap_cpu { "Boot CPU" } else { "Secondary CPU" }; + for (cpu, task_list) in task::scheduler::tasks() { + let core_type = if Some(cpu) == bootstrap_cpu { + "Boot CPU" + } else { + "Secondary CPU" + }; - println!("\n{} (apic: {}, proc: {})", core_type, apic_id, processor); - - if let Some(runqueue) = runqueue::get_runqueue(apic_id.value() as u8).map(|rq| rq.read().clone()) { - let mut runqueue_contents = String::new(); - for task in runqueue.iter() { - writeln!(runqueue_contents, " {} ({}) {}", - task.name, - task.id, - if task.is_running() { "*" } else { "" } - ) - .expect("Failed to write to runqueue_contents"); - } - print!("{}", runqueue_contents); - } - - else { - println!("Can't retrieve runqueue for core {}", apic_id); - return -1; + println!("\n{} (CPU: {})", core_type, cpu); + + let mut runqueue_contents = String::new(); + for task in task_list.iter() { + writeln!( + runqueue_contents, + " {} ({}) {}", + task.name, + task.id, + if task.is_running() { "*" } else { "" } + ) + .expect("Failed to write to runqueue_contents"); } + print!("{}", runqueue_contents); } - + println!(""); 0 } @@ -68,7 +58,10 @@ pub fn main(args: Vec) -> isize { fn print_usage(opts: Options) -> isize { let mut brief = "Usage: rq \n \n".to_string(); - brief.push_str("Prints each CPU's ID, the tasks on its runqueue ('*' identifies the currently running task), and whether it is the boot CPU or not"); + brief.push_str( + "Prints each CPU's ID, the tasks on its runqueue ('*' identifies the currently running \ + task), and whether it is the boot CPU or not", + ); println!("{} \n", opts.usage(&brief)); diff --git a/applications/rq_access_eval/Cargo.toml b/applications/rq_access_eval/Cargo.toml deleted file mode 100644 index 16582f73bb..0000000000 --- a/applications/rq_access_eval/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "rq_access_eval" -version = "0.1.0" -authors = ["Klim Tsoutsman "] -description = "Run queue access benchmark" -edition = "2021" - -[dependencies] -app_io = { path = "../../kernel/app_io" } -cpu = { path = "../../kernel/cpu" } -getopts = "0.2" -irq_safety = { git = "https://github.com/theseus-os/irq_safety" } -runqueue = { path = "../../kernel/runqueue" } -time = { path = "../../kernel/time" } diff --git a/applications/rq_access_eval/src/lib.rs b/applications/rq_access_eval/src/lib.rs deleted file mode 100644 index d2fb2691fe..0000000000 --- a/applications/rq_access_eval/src/lib.rs +++ /dev/null @@ -1,79 +0,0 @@ -#![no_std] - -extern crate alloc; - -use alloc::{string::String, vec::Vec}; -use app_io::println; -use time::{now, Duration, Monotonic}; - -pub fn main(args: Vec) -> isize { - let guard = irq_safety::hold_interrupts(); - let mut options = getopts::Options::new(); - options - .optflag("h", "help", "Display this message") - .optflag("l", "least-busy", "Get the least busy core") - .optopt("c", "core", "Get 's runqueue", "") - .optopt("n", "num", "Perform iterations", ""); - - let matches = match options.parse(args) { - Ok(matches) => matches, - Err(e) => { - println!("{}", e); - print_usage(options); - return 1; - } - }; - - let least_busy = matches.opt_present("l"); - let core = matches.opt_get::("c").expect("failed to parse core"); - - if least_busy && core.is_some() { - panic!("both the least-busy and core flags can't be specified"); - } - - let num = matches - .opt_get_default("n", 1_000_000) - .expect("failed to parse num"); - - let duration = if least_busy { - run( - |_| { - runqueue::get_least_busy_core(); - }, - num, - ) - } else if let Some(core) = core { - run( - |_| { - runqueue::get_runqueue(core); - }, - num, - ) - } else { - let cpu_count = cpu::cpu_count(); - run( - |count| { - runqueue::get_runqueue((count % cpu_count) as u8); - }, - num, - ) - }; - drop(guard); - - println!("time: {:#?}", duration); - - 0 -} - -fn run(f: impl Fn(u32), num: u32) -> Duration { - let start = now::(); - for i in 0..num { - f(i); - } - now::().duration_since(start) -} - -fn print_usage(options: getopts::Options) { - let brief = alloc::format!("Usage: {} [OPTIONS]", env!("CARGO_CRATE_NAME")); - println!("{}", options.usage(&brief)); -} diff --git a/applications/rq_eval/Cargo.toml b/applications/rq_eval/Cargo.toml index df0e8284a0..2a2324239b 100644 --- a/applications/rq_eval/Cargo.toml +++ b/applications/rq_eval/Cargo.toml @@ -23,9 +23,6 @@ path = "../../kernel/task" [dependencies.cpu] path = "../../kernel/cpu" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.app_io] path = "../../kernel/app_io" diff --git a/applications/rq_eval/src/lib.rs b/applications/rq_eval/src/lib.rs index 0b5080e19a..0e960f2610 100644 --- a/applications/rq_eval/src/lib.rs +++ b/applications/rq_eval/src/lib.rs @@ -19,7 +19,6 @@ extern crate alloc; extern crate task; extern crate cpu; extern crate spawn; -extern crate runqueue; extern crate getopts; extern crate hpet; extern crate libtest; @@ -158,8 +157,8 @@ fn run_single(iterations: usize) -> Result<(), &'static str> { let start = hpet.get_counter(); for _ in 0..iterations { - runqueue::add_task_to_specific_runqueue(cpu::current_cpu().into_u8(), taskref.clone())?; - runqueue::remove_task_from_all(&taskref)?; + task::scheduler::add_task_to(cpu::current_cpu(), taskref.clone()); + assert!(task::scheduler::remove_task(&taskref)); } let end = hpet.get_counter(); diff --git a/applications/shell/Cargo.toml b/applications/shell/Cargo.toml index 8304f5ddc8..575a16a3bc 100644 --- a/applications/shell/Cargo.toml +++ b/applications/shell/Cargo.toml @@ -28,9 +28,6 @@ path = "../../kernel/spawn" [dependencies.task] path = "../../kernel/task" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.window_manager] path = "../../kernel/window_manager" diff --git a/applications/shell/src/lib.rs b/applications/shell/src/lib.rs index bbe489e012..748f7ec303 100644 --- a/applications/shell/src/lib.rs +++ b/applications/shell/src/lib.rs @@ -10,7 +10,6 @@ extern crate spin; extern crate dfqueue; extern crate spawn; extern crate task; -extern crate runqueue; extern crate event_types; extern crate window_manager; extern crate path; @@ -409,9 +408,7 @@ impl Shell { if task_ref.has_exited() { continue; } match task_ref.kill(KillReason::Requested) { Ok(_) => { - if let Err(e) = runqueue::remove_task_from_all(task_ref) { - error!("Killed task but could not remove it from runqueue: {}", e); - } + task::scheduler::remove_task(task_ref); } Err(e) => error!("Could not kill task, error: {}", e), } diff --git a/applications/test_scheduler/src/lib.rs b/applications/test_scheduler/src/lib.rs index 8bc0731d06..238034f249 100644 --- a/applications/test_scheduler/src/lib.rs +++ b/applications/test_scheduler/src/lib.rs @@ -22,8 +22,8 @@ pub fn main(_args: Vec) -> isize { .pin_on_cpu(cpu_1) .spawn().expect("failed to initiate task"); - if let Err(e) = scheduler::set_priority(&taskref1, 30) { - error!("scheduler_eval(): Could not set priority to taskref1: {}", e); + if !scheduler::set_priority(&taskref1, 30) { + error!("scheduler_eval(): Could not set priority to taskref1"); } debug!("Spawned Task 1"); @@ -33,8 +33,8 @@ pub fn main(_args: Vec) -> isize { .pin_on_cpu(cpu_1) .spawn().expect("failed to initiate task"); - if let Err(e) = scheduler::set_priority(&taskref2, 20) { - error!("scheduler_eval(): Could not set priority to taskref2: {}", e); + if !scheduler::set_priority(&taskref2, 20) { + error!("scheduler_eval(): Could not set priority to taskref2"); } debug!("Spawned Task 2"); @@ -44,17 +44,17 @@ pub fn main(_args: Vec) -> isize { .pin_on_cpu(cpu_1) .spawn().expect("failed to initiate task"); - if let Err(e) = scheduler::set_priority(&taskref3, 10) { - error!("scheduler_eval(): Could not set priority to taskref3: {}", e); + if !scheduler::set_priority(&taskref3, 10) { + error!("scheduler_eval(): Could not set priority to taskref3"); } debug!("Spawned Task 3"); debug!("Spawned all tasks"); - let _priority1 = scheduler::get_priority(&taskref1); - let _priority2 = scheduler::get_priority(&taskref2); - let _priority3 = scheduler::get_priority(&taskref3); + let _priority1 = scheduler::priority(&taskref1); + let _priority2 = scheduler::priority(&taskref2); + let _priority3 = scheduler::priority(&taskref3); #[cfg(epoch_scheduler)] { diff --git a/kernel/exceptions_full/Cargo.toml b/kernel/exceptions_full/Cargo.toml index dfa5114f4a..ff7864a5d9 100644 --- a/kernel/exceptions_full/Cargo.toml +++ b/kernel/exceptions_full/Cargo.toml @@ -25,9 +25,6 @@ path = "../tlb_shootdown" [dependencies.task] path = "../task" -[dependencies.runqueue] -path = "../runqueue" - [dependencies.fault_log] path = "../fault_log" diff --git a/kernel/ixgbe/Cargo.toml b/kernel/ixgbe/Cargo.toml index 27f8e670f7..ebd42c623c 100644 --- a/kernel/ixgbe/Cargo.toml +++ b/kernel/ixgbe/Cargo.toml @@ -55,9 +55,6 @@ features = [ "alloc", "small_rng" ] [dependencies.hpet] path = "../acpi/hpet" -[dependencies.runqueue] -path = "../runqueue" - [dependencies.nic_initialization] path = "../nic_initialization" diff --git a/kernel/ixgbe/src/lib.rs b/kernel/ixgbe/src/lib.rs index ee6f3d1656..1da6533a00 100644 --- a/kernel/ixgbe/src/lib.rs +++ b/kernel/ixgbe/src/lib.rs @@ -27,7 +27,6 @@ extern crate volatile; extern crate mpmc; extern crate rand; extern crate hpet; -extern crate runqueue; extern crate net; extern crate nic_initialization; extern crate intel_ethernet; diff --git a/kernel/libtest/Cargo.toml b/kernel/libtest/Cargo.toml index 58f10fc4fe..83c77941f4 100644 --- a/kernel/libtest/Cargo.toml +++ b/kernel/libtest/Cargo.toml @@ -24,9 +24,6 @@ path = "../memory" [dependencies.task] path = "../task" -[dependencies.runqueue] -path = "../runqueue" - [dependencies.hpet] path = "../acpi/hpet" diff --git a/kernel/libtest/src/lib.rs b/kernel/libtest/src/lib.rs index 6106e2b17b..43e4828991 100644 --- a/kernel/libtest/src/lib.rs +++ b/kernel/libtest/src/lib.rs @@ -6,7 +6,6 @@ extern crate memory; extern crate apic; extern crate cpu; extern crate hpet; -extern crate runqueue; extern crate pmu_x86; extern crate libm; #[macro_use] extern crate log; @@ -42,10 +41,7 @@ macro_rules! CPU_ID { /// Helper function return the tasks in a given `cpu`'s runqueue pub fn nr_tasks_in_rq(cpu: CpuId) -> Option { - match runqueue::get_runqueue(cpu.into_u8()).map(|rq| rq.read()) { - Some(rq) => { Some(rq.len()) } - _ => { None } - } + return task::scheduler::busyness(cpu); } diff --git a/kernel/panic_wrapper/Cargo.toml b/kernel/panic_wrapper/Cargo.toml index 55150c486b..dfbd4f4117 100644 --- a/kernel/panic_wrapper/Cargo.toml +++ b/kernel/panic_wrapper/Cargo.toml @@ -13,7 +13,6 @@ log = "0.4.8" fault_log = { path = "../fault_log" } memory = { path = "../memory" } mod_mgmt = { path = "../mod_mgmt" } -runqueue = { path = "../runqueue" } task = { path = "../task" } [target.'cfg(target_arch = "x86_64")'.dependencies] diff --git a/kernel/runqueue/Cargo.toml b/kernel/runqueue/Cargo.toml deleted file mode 100644 index 2bbca36f62..0000000000 --- a/kernel/runqueue/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -authors = ["Kevin Boos "] -name = "runqueue" -description = "Functions and types for handling runqueues, i.e., lists of tasks for scheduling purposes" -version = "0.1.0" - -[dependencies] - -[dependencies.cfg-if] -version = "1.0.0" - -[dependencies.log] -version = "0.4.8" - -[dependencies.lazy_static] -features = ["spin_no_std"] -version = "1.4.0" - -[dependencies.sync_preemption] -path = "../sync_preemption" - -[dependencies.atomic_linked_list] -path = "../../libs/atomic_linked_list" - -[dependencies.task] -path = "../task" - -[dependencies.runqueue_round_robin] -path = "../runqueue_round_robin" - -[dependencies.runqueue_epoch] -path = "../runqueue_epoch" - -[dependencies.runqueue_priority] -path = "../runqueue_priority" - -## This should be dependent upon 'cfg(single_simd_task_optimization)', -## but it cannot be because of https://github.com/rust-lang/cargo/issues/5499. -## Therefore, it has to be unconditionally included. -[dependencies.single_simd_task_optimization] -path = "../single_simd_task_optimization" - - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue/src/lib.rs b/kernel/runqueue/src/lib.rs deleted file mode 100644 index 9ef1da87b8..0000000000 --- a/kernel/runqueue/src/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! This crate contains the API of the `RunQueue` structure, Runqueue Structure should contain -//! list of tasks with additional scheduling information depending on the scheduler. -//! All crates except the scheduler should refer to this crate to access functions on `RunQueue`. - -#![no_std] - -extern crate alloc; -extern crate sync_preemption; -extern crate atomic_linked_list; -extern crate task; -#[macro_use] extern crate cfg_if; -cfg_if! { - if #[cfg(epoch_scheduler)] { - extern crate runqueue_epoch as runqueue; - } else if #[cfg(priority_scheduler)] { - extern crate runqueue_priority as runqueue; - } else { - extern crate runqueue_round_robin as runqueue; - } -} - -#[cfg(single_simd_task_optimization)] -extern crate single_simd_task_optimization; - -use sync_preemption::PreemptionSafeRwLock; -use task::TaskRef; -use runqueue::RunQueue; - - -/// Creates a new `RunQueue` for the given core, which is an `apic_id`. -pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - RunQueue::init(which_core, idle_task) -} - -/// Returns the `RunQueue` of the given core, which is an `apic_id`. -pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RunQueue::get_runqueue(which_core) -} - -/// Returns the "least busy" core -pub fn get_least_busy_core() -> Option { - RunQueue::get_least_busy_core() -} - -/// Chooses the "least busy" core's runqueue -/// and adds the given `Task` reference to that core's runqueue. -pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - RunQueue::add_task_to_any_runqueue(task) -} - -/// Adds the given `Task` reference to given core's runqueue. -pub fn add_task_to_specific_runqueue(which_core: u8, task: TaskRef) -> Result<(), &'static str> { - RunQueue::add_task_to_specific_runqueue(which_core, task) -} - -/// Removes a `TaskRef` from all `RunQueue`s that exist on the entire system. -pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - RunQueue::remove_task_from_all(task) -} diff --git a/kernel/runqueue_epoch/Cargo.toml b/kernel/runqueue_epoch/Cargo.toml deleted file mode 100644 index 91157107bf..0000000000 --- a/kernel/runqueue_epoch/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -authors = ["Namitha Liyanage "] -name = "runqueue_epoch" -description = "Run queue for the epoch scheduler" -version = "0.1.0" -edition = "2021" - -[dependencies] -log = "0.4.8" - -[dependencies.sync_preemption] -path = "../sync_preemption" - -[dependencies.atomic_linked_list] -path = "../../libs/atomic_linked_list" - -[dependencies.task] -path = "../task" - -## This should be dependent upon 'cfg(single_simd_task_optimization)', -## but it cannot be because of https://github.com/rust-lang/cargo/issues/5499. -## Therefore, it has to be unconditionally included. -[dependencies.single_simd_task_optimization] -path = "../single_simd_task_optimization" - - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue_epoch/src/lib.rs b/kernel/runqueue_epoch/src/lib.rs deleted file mode 100644 index 1868c027e1..0000000000 --- a/kernel/runqueue_epoch/src/lib.rs +++ /dev/null @@ -1,334 +0,0 @@ -//! This crate contains the `RunQueue` structure, for the epoch scheduler. -//! `RunQueue` structure is essentially a list of Tasks -//! that it used for scheduling purposes. - -#![no_std] -#![feature(let_chains)] - -extern crate alloc; - -use alloc::collections::VecDeque; -use atomic_linked_list::atomic_map::AtomicMap; -use core::ops::{Deref, DerefMut}; -use log::{debug, error, trace}; -use sync_preemption::PreemptionSafeRwLock; -use task::TaskRef; - -pub const MAX_PRIORITY: u8 = 40; -pub const DEFAULT_PRIORITY: u8 = 20; -pub const INITIAL_TOKENS: usize = 10; - -#[derive(Debug, Clone)] -pub struct EpochTaskRef { - task: TaskRef, - pub priority: u8, - /// Remaining tokens in this epoch. A task will be scheduled in an epoch - /// until tokens run out - pub tokens_remaining: usize, -} - -impl Deref for EpochTaskRef { - type Target = TaskRef; - - fn deref(&self) -> &TaskRef { - &self.task - } -} - -impl DerefMut for EpochTaskRef { - fn deref_mut(&mut self) -> &mut TaskRef { - &mut self.task - } -} - -impl EpochTaskRef { - /// Creates a new `EpochTaskRef` that wraps the given `TaskRef`. - /// We just give an initial number of tokens to run the task till - /// next scheduling epoch - pub fn new(task: TaskRef) -> EpochTaskRef { - EpochTaskRef { - task, - priority: DEFAULT_PRIORITY, - tokens_remaining: INITIAL_TOKENS, - } - } -} - -/// There is one runqueue per core, each core only accesses its own private -/// runqueue and allows the scheduler to select a task from that runqueue to -/// schedule in. -static RUNQUEUES: AtomicMap> = AtomicMap::new(); - -/// A list of references to `Task`s (`EpochTaskRef`s) -/// that is used to store the `Task`s (and associated scheduler related data) -/// that are runnable on a given core. -/// A queue is used for the token based epoch scheduler. -/// `Runqueue` implements `Deref` and `DerefMut` traits, which dereferences to -/// `VecDeque`. -#[derive(Debug)] -pub struct RunQueue { - core: u8, - queue: VecDeque, - idle_task: TaskRef, -} - -impl Deref for RunQueue { - type Target = VecDeque; - - fn deref(&self) -> &Self::Target { - &self.queue - } -} - -impl DerefMut for RunQueue { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.queue - } -} - -impl RunQueue { - /// Moves the `TaskRef` at the given index in this `RunQueue` to the end - /// (back) of this `RunQueue`, and returns a cloned reference to that - /// `TaskRef`. The number of tokens is reduced by one and number of context - /// switches is increased by one. This function is used when the task is - /// selected by the scheduler - pub fn update_and_move_to_end(&mut self, index: usize, tokens: usize) -> Option { - if let Some(mut priority_task_ref) = self.remove(index) { - priority_task_ref.tokens_remaining = tokens; - let task_ref = priority_task_ref.task.clone(); - self.push_back(priority_task_ref); - Some(task_ref) - } else { - None - } - } - - /// Creates a new `RunQueue` for the given core, which is an `apic_id` - pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(loscd_eval))] - trace!("Created runqueue (priority) for core {}", which_core); - let new_rq = PreemptionSafeRwLock::new(RunQueue { - core: which_core, - queue: VecDeque::new(), - idle_task, - }); - - if RUNQUEUES.insert(which_core, new_rq).is_some() { - error!( - "BUG: RunQueue::init(): runqueue already exists for core {}!", - which_core - ); - Err("runqueue already exists for this core") - } else { - // there shouldn't already be a RunQueue for this core - Ok(()) - } - } - - /// Returns `RunQueue` for the given core, which is an `apic_id`. - pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RUNQUEUES.get(&which_core) - } - - /// Returns the "least busy" core, which is currently very simple, based on - /// runqueue size. - pub fn get_least_busy_core() -> Option { - Self::get_least_busy_runqueue().map(|rq| rq.read().core) - } - - /// Returns the `RunQueue` for the "least busy" core. - /// See [`get_least_busy_core()`](#method.get_least_busy_core) - fn get_least_busy_runqueue() -> Option<&'static PreemptionSafeRwLock> { - let mut min_rq: Option<(&'static PreemptionSafeRwLock, usize)> = None; - - for (_, rq) in RUNQUEUES.iter() { - let rq_size = rq.read().queue.len(); - - if let Some(min) = min_rq { - if rq_size < min.1 { - min_rq = Some((rq, rq_size)); - } - } else { - min_rq = Some((rq, rq_size)); - } - } - - min_rq.map(|m| m.0) - } - - /// Chooses the "least busy" core's runqueue (based on simple - /// runqueue-size-based load balancing) and adds the given `Task` - /// reference to that core's runqueue. - pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - let rq = RunQueue::get_least_busy_runqueue() - .or_else(|| RUNQUEUES.iter().next().map(|r| r.1)) - .ok_or("couldn't find any runqueues to add the task to!")?; - - rq.write().add_task(task) - } - - /// Convenience method that adds the given `Task` reference to given core's - /// runqueue. - pub fn add_task_to_specific_runqueue( - which_core: u8, - task: TaskRef, - ) -> Result<(), &'static str> { - RunQueue::get_runqueue(which_core) - .ok_or("Couldn't get RunQueue for the given core")? - .write() - .add_task(task) - } - - /// Adds a `TaskRef` to this RunQueue. - fn add_task(&mut self, task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(loscd_eval))] - debug!("Adding task to runqueue_priority {}, {:?}", self.core, task); - let priority_task_ref = EpochTaskRef::new(task); - self.push_back(priority_task_ref); - - #[cfg(single_simd_task_optimization)] - { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::ADD_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_added_to_core(self.iter(), self.core); - } - } - - Ok(()) - } - - /// Removes a `TaskRef` from this RunQueue. - pub fn remove_task(&mut self, task: &TaskRef) -> Result<(), &'static str> { - debug!( - "Removing task from runqueue_priority {}, {:?}", - self.core, task - ); - self.retain(|x| &x.task != task); - - #[cfg(single_simd_task_optimization)] - { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::REMOVE_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_removed_from_core(self.iter(), self.core); - } - } - - Ok(()) - } - - /// Removes a `TaskRef` from all `RunQueue`s that exist on the entire - /// system. - /// - /// This is a brute force approach that iterates over all runqueues. - pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - for (_core, rq) in RUNQUEUES.iter() { - rq.write().remove_task(task)?; - } - Ok(()) - } - - pub fn idle_task(&self) -> &TaskRef { - &self.idle_task - } - - fn get_priority(&self, task: &TaskRef) -> Option { - for epoch_task in self.iter() { - if &epoch_task.task == task { - return Some(epoch_task.priority); - } - } - None - } - - /// Sets the priority of the given task. - /// - /// Returns whether the task was found in the run queue. - fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { - for epoch_task in self.iter_mut() { - if &epoch_task.task == task { - epoch_task.priority = priority; - return true; - } - } - false - } -} - -/// Returns the priority of the given task if it exists, otherwise none. -pub fn get_priority(task: &TaskRef) -> Option { - for (_, run_queue) in RUNQUEUES.iter() { - if let Some(priority) = run_queue.read().get_priority(task) { - return Some(priority); - } - } - - None -} - -pub fn set_priority(task: &TaskRef, priority: u8) { - for (_, run_queue) in RUNQUEUES.iter() { - if run_queue.write().set_priority(task, priority) { - break; - } - } -} - -/// Lowers the task's priority to its previous value when dropped. -pub struct PriorityInheritanceGuard<'a> { - inner: Option<(&'a TaskRef, u8)>, -} - -impl<'a> Drop for PriorityInheritanceGuard<'a> { - fn drop(&mut self) { - if let Some((task, priority)) = self.inner { - set_priority(task, priority) - } - } -} - -/// Modifies the given task's priority to be the maximum of its priority and the -/// current task's priority. -/// -/// Returns a guard which reverts the change when dropped. -pub fn inherit_priority(task: &TaskRef) -> PriorityInheritanceGuard<'_> { - let current_task = task::get_my_current_task().unwrap(); - - let mut current_priority = None; - let mut other_priority = None; - - 'outer: for (core, run_queue) in RUNQUEUES.iter() { - for epoch_task in run_queue.read().iter() { - if epoch_task.task == current_task { - current_priority = Some(epoch_task.priority); - if other_priority.is_some() { - break 'outer; - } - } else if &epoch_task.task == task { - other_priority = Some((core, epoch_task.priority)); - if current_priority.is_some() { - break 'outer; - } - } - } - } - - if let (Some(current_priority), Some((core, other_priority))) = - (current_priority, other_priority) && current_priority > other_priority - { - // NOTE: This assumes no task migration. - debug_assert!(RUNQUEUES.get(core).unwrap().write().set_priority(task, current_priority)); - } - - PriorityInheritanceGuard { - inner: if let (Some(current_priority), Some((_, other_priority))) = - (current_priority, other_priority) - && current_priority > other_priority - { - Some((task, other_priority)) - } else { - None - }, - } -} diff --git a/kernel/runqueue_priority/Cargo.toml b/kernel/runqueue_priority/Cargo.toml deleted file mode 100644 index 9dfb38397f..0000000000 --- a/kernel/runqueue_priority/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -authors = ["Jacob Earle "] -name = "runqueue_priority" -description = "Functions and types for handling runqueues in a priority scheduling context" -version = "0.1.0" -edition = "2021" - -[dependencies] -atomic_linked_list = { path = "../../libs/atomic_linked_list" } -log = "0.4.8" -sync_preemption = { path = "../sync_preemption" } -task = { path = "../task" } -time = { path = "../time" } - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue_priority/src/lib.rs b/kernel/runqueue_priority/src/lib.rs deleted file mode 100644 index 12d608f7d3..0000000000 --- a/kernel/runqueue_priority/src/lib.rs +++ /dev/null @@ -1,321 +0,0 @@ -//! Runqueue structures for a priority scheduler. -//! -//! The `RunQueue` structure is essentially a list of `Task`s used for -//! scheduling purposes. Each `PriorityTaskRef` element in the runqueue contains -//! a `TaskRef` representing an underlying task and as well as a `period` value. - -#![no_std] -#![feature(let_chains)] - -extern crate alloc; - -use alloc::collections::BinaryHeap; -use atomic_linked_list::atomic_map::AtomicMap; -use core::{ - cmp::Ordering, - ops::{Deref, DerefMut}, -}; -use log::{error, trace}; -use sync_preemption::PreemptionSafeRwLock; -use task::TaskRef; -use time::Instant; - -const DEFAULT_PRIORITY: u8 = 0; - -/// A reference to a task with its period for priority scheduling. -/// -/// `PriorityTaskRef` implements `Deref` and `DerefMut` traits, which -/// dereferences to `TaskRef`. -#[derive(Debug, Clone)] -pub struct PriorityTaskRef { - pub task: TaskRef, - pub last_ran: Instant, - priority: u8, -} - -impl PartialEq for PriorityTaskRef { - fn eq(&self, other: &Self) -> bool { - self.priority.eq(&other.priority) && self.last_ran.eq(&other.last_ran) - } -} - -// The equivalence relation is reflexive. -impl Eq for PriorityTaskRef {} - -impl PartialOrd for PriorityTaskRef { - fn partial_cmp(&self, other: &Self) -> Option { - match self.priority.cmp(&other.priority) { - // Tasks that were ran longer ago should be prioritised. - Ordering::Equal => Some(self.last_ran.cmp(&other.last_ran).reverse()), - ordering => Some(ordering), - } - } -} - -impl Ord for PriorityTaskRef { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - self.priority.cmp(&other.priority) - } -} - -impl Deref for PriorityTaskRef { - type Target = TaskRef; - - fn deref(&self) -> &TaskRef { - &self.task - } -} - -impl DerefMut for PriorityTaskRef { - fn deref_mut(&mut self) -> &mut TaskRef { - &mut self.task - } -} - -/// There is one runqueue per core, each core only accesses its own private -/// runqueue and allows the scheduler to select a task from that runqueue to -/// schedule in -static RUNQUEUES: AtomicMap> = AtomicMap::new(); - -/// A list of `Task`s and their associated priority scheduler data that may be -/// run on a given CPU core. -/// -/// In rate monotonic scheduling, tasks are sorted in order of increasing -/// periods. Thus, the `period` value acts as a form of task "priority", -/// with higher priority (shorter period) tasks coming first. -#[derive(Debug)] -pub struct RunQueue { - core: u8, - queue: BinaryHeap, - idle_task: TaskRef, -} - -impl Deref for RunQueue { - type Target = BinaryHeap; - - fn deref(&self) -> &BinaryHeap { - &self.queue - } -} - -impl DerefMut for RunQueue { - fn deref_mut(&mut self) -> &mut BinaryHeap { - &mut self.queue - } -} - -impl RunQueue { - /// Creates a new `RunQueue` for the given core, which is an `apic_id` - pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(loscd_eval))] - trace!("Created runqueue (priority) for core {}", which_core); - - let new_rq = PreemptionSafeRwLock::new(RunQueue { - core: which_core, - queue: BinaryHeap::new(), - idle_task, - }); - - if RUNQUEUES.insert(which_core, new_rq).is_some() { - error!("BUG: RunQueue::init(): runqueue already exists for core {which_core}!"); - Err("runqueue already exists for this core") - } else { - // there shouldn't already be a RunQueue for this core - Ok(()) - } - } - - /// Returns `RunQueue` for the given core, which is an `apic_id`. - pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RUNQUEUES.get(&which_core) - } - - /// Returns the "least busy" core, which is currently very simple, based on - /// runqueue size. - pub fn get_least_busy_core() -> Option { - Self::get_least_busy_runqueue().map(|rq| rq.read().core) - } - - /// Returns the `RunQueue` for the "least busy" core. - /// See [`get_least_busy_core()`](#method.get_least_busy_core) - fn get_least_busy_runqueue() -> Option<&'static PreemptionSafeRwLock> { - let mut min_rq: Option<(&'static PreemptionSafeRwLock, usize)> = None; - - for (_, rq) in RUNQUEUES.iter() { - let rq_size = rq.read().queue.len(); - - if let Some(min) = min_rq { - if rq_size < min.1 { - min_rq = Some((rq, rq_size)); - } - } else { - min_rq = Some((rq, rq_size)); - } - } - - min_rq.map(|m| m.0) - } - - /// Chooses the "least busy" core's runqueue (based on simple - /// runqueue-size-based load balancing) and adds the given `Task` - /// reference to that core's runqueue. - pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - let rq = RunQueue::get_least_busy_runqueue() - .or_else(|| RUNQUEUES.iter().next().map(|r| r.1)) - .ok_or("couldn't find any runqueues to add the task to!")?; - - rq.write().add_task(task, DEFAULT_PRIORITY) - } - - /// Convenience method that adds the given `Task` reference to given core's - /// runqueue. - pub fn add_task_to_specific_runqueue( - which_core: u8, - task: TaskRef, - ) -> Result<(), &'static str> { - RunQueue::get_runqueue(which_core) - .ok_or("Couldn't get RunQueue for the given core")? - .write() - .add_task(task, DEFAULT_PRIORITY) - } - - /// Adds a `TaskRef` to this runqueue with the given priority value. - fn add_task(&mut self, task: TaskRef, priority: u8) -> Result<(), &'static str> { - let priority_task = PriorityTaskRef { - task, - priority, - last_ran: Instant::ZERO, - }; - self.queue.push(priority_task); - Ok(()) - } - - /// The internal function that actually removes the task from the runqueue. - fn remove_internal(&mut self, task: &TaskRef) -> Result<(), &'static str> { - self.queue.retain(|x| &x.task != task); - Ok(()) - } - - /// Removes a `TaskRef` from this RunQueue. - pub fn remove_task(&mut self, task: &TaskRef) -> Result<(), &'static str> { - self.remove_internal(task) - } - - /// Removes a `TaskRef` from all `RunQueue`s that exist on the entire - /// system. - /// - /// This is a brute force approach that iterates over all runqueues. - pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - for (_core, rq) in RUNQUEUES.iter() { - rq.write().remove_task(task)?; - } - Ok(()) - } - - fn get_priority(&self, task: &TaskRef) -> Option { - for t in self.queue.iter() { - if t.task == *task { - return Some(t.priority); - } - } - None - } - - pub fn idle_task(&self) -> &TaskRef { - &self.idle_task - } - - fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { - let previous_len = self.queue.len(); - self.queue.retain(|t| t.task != *task); - - if previous_len != self.queue.len() { - debug_assert_eq!(self.queue.len() + 1, previous_len); - self.queue.push(PriorityTaskRef { - // TODO: Don't take reference? - task: task.clone(), - priority, - // Not technically correct, but this will be reset next time it is run. - last_ran: Instant::ZERO, - }); - true - } else { - false - } - } -} - -pub fn get_priority(task: &TaskRef) -> Option { - for (_, run_queue) in RUNQUEUES.iter() { - if let Some(priority) = run_queue.read().get_priority(task) { - return Some(priority); - } - } - None -} - -pub fn set_priority(task: &TaskRef, priority: u8) { - for (_, run_queue) in RUNQUEUES.iter() { - if run_queue.write().set_priority(task, priority) { - break; - } - } -} - -/// Lowers the task's priority to its previous value when dropped. -pub struct PriorityInheritanceGuard<'a> { - inner: Option<(&'a TaskRef, u8)>, -} - -impl<'a> Drop for PriorityInheritanceGuard<'a> { - fn drop(&mut self) { - if let Some((task, priority)) = self.inner { - set_priority(task, priority) - } - } -} - -/// Modifies the given task's priority to be the maximum of its priority and the -/// current task's priority. -/// -/// Returns a guard which reverts the change when dropped. -pub fn inherit_priority(task: &TaskRef) -> PriorityInheritanceGuard<'_> { - let current_task = task::get_my_current_task().unwrap(); - - let mut current_priority = None; - let mut other_priority = None; - - 'outer: for (core, run_queue) in RUNQUEUES.iter() { - for epoch_task in run_queue.read().iter() { - if epoch_task.task == current_task { - current_priority = Some(epoch_task.priority); - if other_priority.is_some() { - break 'outer; - } - } else if &epoch_task.task == task { - other_priority = Some((core, epoch_task.priority)); - if current_priority.is_some() { - break 'outer; - } - } - } - } - - if let (Some(current_priority), Some((core, other_priority))) = - (current_priority, other_priority) && current_priority > other_priority - { - // NOTE: This assumes no task migration. - debug_assert!(RUNQUEUES.get(core).unwrap().write().set_priority(task, current_priority)); - } - - PriorityInheritanceGuard { - inner: if let (Some(current_priority), Some((_, other_priority))) = - (current_priority, other_priority) - && current_priority > other_priority - { - Some((task, other_priority)) - } else { - None - }, - } -} diff --git a/kernel/runqueue_round_robin/Cargo.toml b/kernel/runqueue_round_robin/Cargo.toml deleted file mode 100644 index 65f6d9652b..0000000000 --- a/kernel/runqueue_round_robin/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -authors = ["Kevin Boos "] -name = "runqueue_round_robin" -description = "Functions and types for handling runqueues, i.e., lists of tasks for scheduling purposes" -version = "0.1.0" - -[dependencies] -log = "0.4.8" - -[dependencies.sync_preemption] -path = "../sync_preemption" - -[dependencies.atomic_linked_list] -path = "../../libs/atomic_linked_list" - -[dependencies.task] -path = "../task" - -## This should be dependent upon 'cfg(single_simd_task_optimization)', -## but it cannot be because of https://github.com/rust-lang/cargo/issues/5499. -## Therefore, it has to be unconditionally included. -[dependencies.single_simd_task_optimization] -path = "../single_simd_task_optimization" - - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue_round_robin/src/lib.rs b/kernel/runqueue_round_robin/src/lib.rs deleted file mode 100644 index e02e218535..0000000000 --- a/kernel/runqueue_round_robin/src/lib.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! This crate contains the `RunQueue` structure, for round robin scheduler. -//! `RunQueue` structure is essentially a list of Tasks -//! that is used for scheduling purposes. -//! - -#![no_std] - -extern crate alloc; -#[macro_use] extern crate log; -extern crate sync_preemption; -extern crate atomic_linked_list; -extern crate task; - -#[cfg(single_simd_task_optimization)] -extern crate single_simd_task_optimization; - -use alloc::collections::VecDeque; -use sync_preemption::PreemptionSafeRwLock; -use atomic_linked_list::atomic_map::AtomicMap; -use task::TaskRef; -use core::ops::{Deref, DerefMut}; - -/// A cloneable reference to a `Taskref` that exposes more methods -/// related to task scheduling -/// -/// The `RoundRobinTaskRef` type is necessary since differnt scheduling algorithms -/// require different data associated with the task to be stored alongside. -/// This makes storing them alongside the task prohibitive. -/// Since round robin is the most primitive scheduling policy -/// no additional scheduling information is needed. -/// context_switches indicate the number of context switches -/// the task has undergone. -/// context_switches is not used in scheduling algorithm. -/// `RoundRobinTaskRef` implements `Deref` and `DerefMut` traits, which dereferences to `TaskRef`. -#[derive(Debug, Clone)] -pub struct RoundRobinTaskRef{ - /// `TaskRef` wrapped by `RoundRobinTaskRef` - taskref: TaskRef, - - /// Number of context switches the task has undergone. Not used in scheduling algorithm - context_switches: usize, -} - -// impl Drop for RoundRobinTaskRef { -// fn drop(&mut self) { -// warn!("DROPPING RoundRobinTaskRef with taskref {:?}", self.taskref); -// } -// } - -impl Deref for RoundRobinTaskRef { - type Target = TaskRef; - - fn deref(&self) -> &TaskRef { - &self.taskref - } -} - -impl DerefMut for RoundRobinTaskRef { - fn deref_mut(&mut self) -> &mut TaskRef { - &mut self.taskref - } -} - -impl RoundRobinTaskRef { - /// Creates a new `RoundRobinTaskRef` that wraps the given `TaskRef`. - pub fn new(taskref: TaskRef) -> RoundRobinTaskRef { - RoundRobinTaskRef { - taskref, - context_switches: 0, - } - } - - /// Increment the number of times the task is picked - pub fn increment_context_switches(&mut self) { - self.context_switches = self.context_switches.saturating_add(1); - } -} - -/// There is one runqueue per core, each core only accesses its own private runqueue -/// and allows the scheduler to select a task from that runqueue to schedule in. -pub static RUNQUEUES: AtomicMap> = AtomicMap::new(); - -/// A list of references to `Task`s (`RoundRobinTaskRef`s). -/// This is used to store the `Task`s (and associated scheduler related data) -/// that are runnable on a given core. -/// A queue is used for the round robin scheduler. -/// `Runqueue` implements `Deref` and `DerefMut` traits, which dereferences to `VecDeque`. -#[derive(Debug)] -pub struct RunQueue { - core: u8, - idle_task: TaskRef, - queue: VecDeque, -} -// impl Drop for RunQueue { -// fn drop(&mut self) { -// warn!("DROPPING Round Robing Runqueue for core {}", self.core); -// } -// } - -impl Deref for RunQueue { - type Target = VecDeque; - - fn deref(&self) -> &VecDeque { - &self.queue - } -} - -impl DerefMut for RunQueue { - fn deref_mut(&mut self) -> &mut VecDeque { - &mut self.queue - } -} - -impl RunQueue { - - /// Moves the `TaskRef` at the given index into this `RunQueue` to the end (back) of this `RunQueue`, - /// and returns a cloned reference to that `TaskRef`. - pub fn move_to_end(&mut self, index: usize) -> Option { - self.swap_remove_front(index).map(|rr_taskref| { - let taskref = rr_taskref.taskref.clone(); - self.push_back(rr_taskref); - taskref - }) - } - - /// Creates a new `RunQueue` for the given core, which is an `apic_id`. - pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - trace!("Created runqueue (round robin) for core {}", which_core); - let new_rq = PreemptionSafeRwLock::new(RunQueue { - core: which_core, - idle_task, - queue: VecDeque::new(), - }); - - if RUNQUEUES.insert(which_core, new_rq).is_some() { - error!("BUG: RunQueue::init(): runqueue already exists for core {}!", which_core); - Err("runqueue already exists for this core") - } - else { - // there shouldn't already be a RunQueue for this core - Ok(()) - } - } - - pub fn idle_task(&self) -> &TaskRef { - &self.idle_task - } - - /// Returns the `RunQueue` for the given core, which is an `apic_id`. - pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RUNQUEUES.get(&which_core) - } - - - /// Returns the "least busy" core, which is currently very simple, based on runqueue size. - pub fn get_least_busy_core() -> Option { - Self::get_least_busy_runqueue().map(|rq| rq.read().core) - } - - - /// Returns the `RunQueue` for the "least busy" core. - /// See [`get_least_busy_core()`](#method.get_least_busy_core) - fn get_least_busy_runqueue() -> Option<&'static PreemptionSafeRwLock> { - let mut min_rq: Option<(&'static PreemptionSafeRwLock, usize)> = None; - - for (_, rq) in RUNQUEUES.iter() { - let rq_size = rq.read().queue.len(); - - if let Some(min) = min_rq { - if rq_size < min.1 { - min_rq = Some((rq, rq_size)); - } - } - else { - min_rq = Some((rq, rq_size)); - } - } - - min_rq.map(|m| m.0) - } - - /// Chooses the "least busy" core's runqueue (based on simple runqueue-size-based load balancing) - /// and adds the given `Task` reference to that core's runqueue. - pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - let rq = RunQueue::get_least_busy_runqueue() - .or_else(|| RUNQUEUES.iter().next().map(|r| r.1)) - .ok_or("couldn't find any runqueues to add the task to!")?; - - rq.write().add_task(task) - } - - /// Convenience method that adds the given `Task` reference to given core's runqueue. - pub fn add_task_to_specific_runqueue(which_core: u8, task: TaskRef) -> Result<(), &'static str> { - RunQueue::get_runqueue(which_core) - .ok_or("Couldn't get RunQueue for the given core")? - .write() - .add_task(task) - } - - /// Adds a `TaskRef` to this RunQueue. - fn add_task(&mut self, task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(rq_eval))] - debug!("Adding task to runqueue_round_robin {}, {:?}", self.core, task); - - let round_robin_taskref = RoundRobinTaskRef::new(task); - self.push_back(round_robin_taskref); - - #[cfg(single_simd_task_optimization)] - { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::ADD_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_added_to_core(self.iter(), self.core); - } - } - - Ok(()) - } - - /// Removes a `TaskRef` from this RunQueue. - pub fn remove_task(&mut self, task: &TaskRef) -> Result<(), &'static str> { - #[cfg(not(rq_eval))] - debug!("Removing task from runqueue_round_robin {}, {:?}", self.core, task); - self.retain(|x| &x.taskref != task); - - #[cfg(single_simd_task_optimization)] { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::REMOVE_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_removed_from_core(self.iter(), self.core); - } - } - - Ok(()) - } - - - /// Removes a `TaskRef` from all `RunQueue`s that exist on the entire system. - /// - /// This is a brute force approach that iterates over all runqueues. - pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - for (_core, rq) in RUNQUEUES.iter() { - rq.write().remove_task(task)?; - } - Ok(()) - } -} diff --git a/kernel/scheduler/Cargo.toml b/kernel/scheduler/Cargo.toml index 14f61e37fa..b12521db3a 100644 --- a/kernel/scheduler/Cargo.toml +++ b/kernel/scheduler/Cargo.toml @@ -14,9 +14,5 @@ interrupts = { path = "../interrupts" } sleep = { path = "../sleep" } task = { path = "../task" } -scheduler_round_robin = { path = "../scheduler_round_robin" } -scheduler_epoch = { path = "../scheduler_epoch" } -scheduler_priority = { path = "../scheduler_priority" } - [target.'cfg(target_arch = "x86_64")'.dependencies] x86_64 = "0.14.8" diff --git a/kernel/scheduler/src/lib.rs b/kernel/scheduler/src/lib.rs index af0d3bb9cd..dce7b756bb 100644 --- a/kernel/scheduler/src/lib.rs +++ b/kernel/scheduler/src/lib.rs @@ -14,21 +14,10 @@ #![no_std] #![cfg_attr(target_arch = "x86_64", feature(abi_x86_interrupt))] -cfg_if::cfg_if! { - if #[cfg(epoch_scheduler)] { - extern crate scheduler_epoch as scheduler; - } else if #[cfg(priority_scheduler)] { - extern crate scheduler_priority as scheduler; - } else { - extern crate scheduler_round_robin as scheduler; - } -} - use interrupts::{self, CPU_LOCAL_TIMER_IRQ, interrupt_handler, eoi, EoiBehaviour}; -use task::{self, TaskRef}; -/// A re-export of [`task::schedule()`] for convenience and legacy compatibility. -pub use task::schedule; +/// Re-exports for convenience and legacy compatibility. +pub use task::scheduler::{inherit_priority, priority, schedule, set_priority}; /// Initializes the scheduler on this system using the policy set at compiler time. @@ -41,8 +30,6 @@ pub use task::schedule; /// - `make THESEUS_CONFIG=epoch_scheduler`: epoch scheduler /// - `make THESEUS_CONFIG=priority_scheduler`: priority scheduler pub fn init() -> Result<(), &'static str> { - task::set_scheduler_policy(scheduler::select_next_task); - #[cfg(target_arch = "x86_64")] { interrupts::register_interrupt( CPU_LOCAL_TIMER_IRQ, @@ -91,34 +78,3 @@ interrupt_handler!(timer_tick_handler, None, _stack_frame, { EoiBehaviour::HandlerSentEoi }); - -/// Changes the priority of the given task with the given priority level. -/// Priority values must be between 40 (maximum priority) and 0 (minimum prriority). -/// This function returns an error when a scheduler without priority is loaded. -pub fn set_priority(_task: &TaskRef, _priority: u8) -> Result<(), &'static str> { - #[cfg(any(epoch_scheduler, priority_scheduler))] - { - Ok(scheduler::set_priority(_task, _priority)) - } - #[cfg(not(any(epoch_scheduler, priority_scheduler)))] - { - Err("called set priority on scheduler that doesn't support set priority") - } -} - -/// Returns the priority of a given task. -/// This function returns None when a scheduler without priority is loaded. -pub fn get_priority(_task: &TaskRef) -> Option { - #[cfg(any(epoch_scheduler, priority_scheduler))] - { - scheduler::get_priority(_task) - } - #[cfg(not(any(epoch_scheduler, priority_scheduler)))] - { - None - } -} - -pub fn inherit_priority(task: &TaskRef) -> scheduler::PriorityInheritanceGuard<'_> { - scheduler::inherit_priority(task) -} diff --git a/kernel/scheduler_epoch/Cargo.toml b/kernel/scheduler_epoch/Cargo.toml index 0427799eb6..e0b65c5a15 100644 --- a/kernel/scheduler_epoch/Cargo.toml +++ b/kernel/scheduler_epoch/Cargo.toml @@ -14,11 +14,5 @@ version = "0.4.8" [dependencies.task] path = "../task" -[dependencies.runqueue] -path = "../runqueue" - -[dependencies.runqueue_epoch] -path = "../runqueue_epoch" - [lib] crate-type = ["rlib"] diff --git a/kernel/scheduler_epoch/src/lib.rs b/kernel/scheduler_epoch/src/lib.rs index 298f8d88d3..22a6917466 100644 --- a/kernel/scheduler_epoch/src/lib.rs +++ b/kernel/scheduler_epoch/src/lib.rs @@ -1,181 +1,223 @@ -//! This crate picks the next task on token based scheduling policy. -//! At the begining of each scheduling epoch a set of tokens is distributed -//! among tasks depending on their priority. -//! [tokens assigned to each task = (prioirty of each task / prioirty of all -//! tasks) * length of epoch]. Each time a task is picked, the token count of -//! the task is decremented by 1. A task is executed only if it has tokens -//! remaining. When all tokens of all runnable task are exhausted a new -//! scheduling epoch is initiated. In addition this crate offers the interfaces -//! to set and get priorities of each task. +//! This crate implements a token-based epoch scheduling policy. +//! +//! At the begining of each scheduling epoch, a set of tokens is distributed +//! among all runnable tasks, based on their priority relative to all other +//! runnable tasks in the runqueue. The formula for this is: +//! ```ignore +//! tokens_assigned_to_task_i = (priority_task_i / sum_priority_all_tasks) * epoch_length; +//! ``` +//! * Each time a task is picked, its token count is decremented by 1. +//! * A task can only be selected for next execution if it has tokens remaining. +//! * When all tokens of all runnable task are exhausted, a new scheduling epoch begins. +//! +//! This epoch scheduler is also a priority-based scheduler, so it allows +//! getting and setting the priorities of each task. #![no_std] extern crate alloc; -use log::error; -use runqueue_epoch::{RunQueue, MAX_PRIORITY}; +use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; +use core::ops::{Deref, DerefMut}; use task::TaskRef; -pub use runqueue_epoch::{inherit_priority, PriorityInheritanceGuard}; +const MAX_PRIORITY: u8 = 40; +const DEFAULT_PRIORITY: u8 = 20; +const INITIAL_TOKENS: usize = 10; -/// A data structure to transfer data from select_next_task_priority -/// to select_next_task -struct NextTaskResult { - taskref: Option, - idle_task: bool, +/// An instance of an epoch scheduler, typically one per CPU. +pub struct Scheduler { + idle_task: TaskRef, + queue: VecDeque, } -/// Changes the priority of the given task with the given priority level. -/// Priority values must be between 40 (maximum priority) and 0 (minimum -/// prriority). -pub fn set_priority(task: &TaskRef, priority: u8) { - let priority = core::cmp::min(priority, MAX_PRIORITY); - runqueue_epoch::set_priority(task, priority); -} +impl Scheduler { + /// Creates a new epoch scheduler instance with the given idle task. + pub const fn new(idle_task: TaskRef) -> Self { + Self { + idle_task, + queue: VecDeque::new(), + } + } -/// Returns the priority of the given task. -pub fn get_priority(task: &TaskRef) -> Option { - runqueue_epoch::get_priority(task) -} + /// Moves the `TaskRef` at the given `index` in this scheduler's runqueue + /// to the end (back) of the runqueue. + /// + /// Sets the number of tokens for that task to the given `tokens` + /// and increments that task's number of context switches. + /// + /// Returns a cloned reference to the `TaskRef` at the given `index`. + fn update_and_move_to_end(&mut self, index: usize, tokens: usize) -> Option { + if let Some(mut priority_task_ref) = self.queue.remove(index) { + priority_task_ref.tokens_remaining = tokens; + let task_ref = priority_task_ref.task.clone(); + self.queue.push_back(priority_task_ref); + Some(task_ref) + } else { + None + } + } -/// This defines the priority scheduler policy. -/// Returns None if there is no schedule-able task. -pub fn select_next_task(apic_id: u8) -> Option { - let next_task = select_next_task_priority(apic_id)?; - // If the selected task is idle task we begin a new scheduling epoch - if next_task.idle_task { - assign_tokens(apic_id); - select_next_task_priority(apic_id)?.taskref - } else { - next_task.taskref + fn try_next(&mut self) -> Option { + if let Some((task_index, _)) = self + .queue + .iter() + .enumerate() + .find(|(_, task)| task.is_runnable() && task.tokens_remaining > 0) + { + let chosen_task = self.queue.get(task_index).unwrap(); + let modified_tokens = chosen_task.tokens_remaining.saturating_sub(1); + self.update_and_move_to_end(task_index, modified_tokens) + } else { + None + } } -} -/// this defines the priority scheduler policy. -/// Returns None if there is no runqueue -/// Otherwise returns a task with a flag indicating whether its an idle task. -fn select_next_task_priority(apic_id: u8) -> Option { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - // #[cfg(not(loscd_eval))] - // error!("BUG: select_next_task_priority(): couldn't get runqueue for core {}", - // apic_id); - return None; + fn assign_tokens(&mut self) { + // We begin with total priorities = 1 to avoid division by zero + let mut total_priorities: usize = 1; + + // This loop calculates the total priorities of the runqueue + for (_i, t) in self.queue.iter().enumerate() { + // we assign tokens only to runnable tasks + if !t.is_runnable() { + continue; + } + + total_priorities = total_priorities + .saturating_add(1) + .saturating_add(t.priority as usize); } - }; - - if let Some((task_index, _)) = runqueue_locked - .iter() - .enumerate() - .find(|(_, task)| task.is_runnable()) - { - let modified_tokens = { - let chosen_task = runqueue_locked.get(task_index); - match chosen_task.map(|m| m.tokens_remaining) { - Some(x) => x.saturating_sub(1), - None => 0, + + // Each epoch lasts for a total of 100 tokens by default. + // However, as this granularity could skip over low priority tasks + // when many concurrent tasks are running, we increase the epoch in such cases. + let epoch: usize = core::cmp::max(total_priorities, 100); + + for (_i, t) in self.queue.iter_mut().enumerate() { + // we give zero tokens to the idle tasks + if t.is_an_idle_task { + continue; } - }; - let task = runqueue_locked.update_and_move_to_end(task_index, modified_tokens); + // we give zero tokens to non-runnable tasks + if !t.is_runnable() { + continue; + } - Some(NextTaskResult { - taskref: task, - idle_task: false, - }) - } else { - Some(NextTaskResult { - taskref: Some(runqueue_locked.idle_task().clone()), - idle_task: true, - }) + // task_tokens = epoch * (taskref + 1) / total_priorities; + let task_tokens = epoch + .saturating_mul((t.priority as usize).saturating_add(1)) + .wrapping_div(total_priorities); + + t.tokens_remaining = task_tokens; + // debug!("assign_tokens(): CPU {} chose Task {:?}", cpu_id, &*t); + } } } -/// This assigns tokens between tasks. -/// Returns true if successful. -/// Tokens are assigned based on (prioirty of each task / prioirty of all -/// tasks). -fn assign_tokens(apic_id: u8) -> bool { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - // #[cfg(not(loscd_eval))] - // error!("BUG: assign_tokens(): couldn't get runqueue for core {}", apic_id); - return false; - } - }; +impl task::scheduler::Scheduler for Scheduler { + fn next(&mut self) -> TaskRef { + self.try_next() + .or_else(|| { + self.assign_tokens(); + self.try_next() + }) + .unwrap_or_else(|| self.idle_task.clone()) + } - // We begin with total priorities = 1 to avoid division by zero - let mut total_priorities: usize = 1; + fn add(&mut self, task: TaskRef) { + let priority_task_ref = EpochTaskRef::new(task); + self.queue.push_back(priority_task_ref); + } - // This loop calculates the total priorities of the runqueue - for (_i, t) in runqueue_locked.iter().enumerate() { - // we skip the idle task, it contains zero tokens as it is picked last - if t.is_an_idle_task { - continue; - } + fn busyness(&self) -> usize { + self.queue.len() + } - // we assign tokens only to runnable tasks - if !t.is_runnable() { - continue; + fn remove(&mut self, task: &TaskRef) -> bool { + let mut task_index = None; + for (i, t) in self.queue.iter().enumerate() { + if **t == *task { + task_index = Some(i); + break; + } } - // if this task is pinned, it must not be pinned to a different core - if let Some(pinned) = t.pinned_cpu() { - if pinned.into_u8() != apic_id { - // with per-core runqueues, this should never happen! - error!( - "select_next_task() (AP {}) found a task pinned to a different core: {:?}", - apic_id, t - ); - return false; - } + if let Some(task_index) = task_index { + self.queue.remove(task_index); + true + } else { + false } + } - total_priorities = total_priorities - .saturating_add(1) - .saturating_add(t.priority as usize); + fn as_priority_scheduler(&mut self) -> Option<&mut dyn task::scheduler::PriorityScheduler> { + Some(self) } - // We keep each epoch for 100 tokens by default - // However since this granularity could miss low priority tasks when - // many concurrent tasks are running, we increase the epoch in such cases - let epoch: usize = core::cmp::max(total_priorities, 100); + fn drain(&mut self) -> Box + '_> { + Box::new(self.queue.drain(..).map(|epoch_task| epoch_task.task)) + } - // We iterate through each task in runqueue - // We dont use iterator as items are modified in the process - for (_i, t) in runqueue_locked.iter_mut().enumerate() { - // we give zero tokens to the idle tasks - if t.is_an_idle_task { - continue; - } + fn tasks(&self) -> Vec { + self.queue + .clone() + .into_iter() + .map(|epoch_task| epoch_task.task) + .collect() + } +} - // we give zero tokens to none runnable tasks - if !t.is_runnable() { - continue; +impl task::scheduler::PriorityScheduler for Scheduler { + fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { + let priority = core::cmp::min(priority, MAX_PRIORITY); + for epoch_task in self.queue.iter_mut() { + if epoch_task.task == *task { + epoch_task.priority = priority; + return true; + } } + false + } - // if this task is pinned, it must not be pinned to a different core - if let Some(pinned) = t.pinned_cpu() { - if pinned.into_u8() != apic_id { - // with per-core runqueues, this should never happen! - error!( - "select_next_task() (AP {}) found a task pinned to a different core: {:?}", - apic_id, &*t - ); - return false; + fn priority(&mut self, task: &TaskRef) -> Option { + for epoch_task in self.queue.iter() { + if epoch_task.task == *task { + return Some(epoch_task.priority); } } - // task_tokens = epoch * (taskref + 1) / total_priorities; - let task_tokens = epoch - .saturating_mul((t.priority as usize).saturating_add(1)) - .wrapping_div(total_priorities); + None + } +} + +#[derive(Debug, Clone)] +struct EpochTaskRef { + task: TaskRef, + priority: u8, + tokens_remaining: usize, +} + +impl Deref for EpochTaskRef { + type Target = TaskRef; + + fn deref(&self) -> &TaskRef { + &self.task + } +} - t.tokens_remaining = task_tokens; - // debug!("assign_tokens(): AP {} chose Task {:?}", apic_id, &*t); - // break; +impl DerefMut for EpochTaskRef { + fn deref_mut(&mut self) -> &mut TaskRef { + &mut self.task } +} - true +impl EpochTaskRef { + fn new(task: TaskRef) -> EpochTaskRef { + EpochTaskRef { + task, + priority: DEFAULT_PRIORITY, + tokens_remaining: INITIAL_TOKENS, + } + } } diff --git a/kernel/scheduler_priority/Cargo.toml b/kernel/scheduler_priority/Cargo.toml index 5965429131..6f452d1c46 100644 --- a/kernel/scheduler_priority/Cargo.toml +++ b/kernel/scheduler_priority/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" [dependencies] log = "0.4.8" -runqueue_priority = { path = "../runqueue_priority" } task = { path = "../task" } time = { path = "../time" } diff --git a/kernel/scheduler_priority/src/lib.rs b/kernel/scheduler_priority/src/lib.rs index d22d042de7..1c11e6b6b8 100644 --- a/kernel/scheduler_priority/src/lib.rs +++ b/kernel/scheduler_priority/src/lib.rs @@ -4,43 +4,156 @@ extern crate alloc; -use alloc::vec::Vec; -use log::error; -use runqueue_priority::RunQueue; +use alloc::{boxed::Box, collections::BinaryHeap, vec::Vec}; +use core::cmp::Ordering; + use task::TaskRef; +use time::Instant; + +const DEFAULT_PRIORITY: u8 = 0; + +pub struct Scheduler { + idle_task: TaskRef, + queue: BinaryHeap, +} -pub use runqueue_priority::{ - get_priority, inherit_priority, set_priority, PriorityInheritanceGuard, -}; - -/// This defines the priority scheduler policy. -/// Returns None if there is no schedule-able task -pub fn select_next_task(apic_id: u8) -> Option { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - error!("BUG: select_next_task_priority(): couldn't get runqueue for core {apic_id}",); - return None; +impl Scheduler { + pub fn new(idle_task: TaskRef) -> Self { + Self { + idle_task, + queue: BinaryHeap::new(), } - }; - - // This is a temporary solution before the PR to only store runnable tasks in - // the run queue is merged. - let mut blocked_tasks = Vec::with_capacity(2); - while let Some(mut task) = runqueue_locked.pop() { - if task.is_runnable() { - for t in blocked_tasks { - runqueue_locked.push(t) + } +} + +impl task::scheduler::Scheduler for Scheduler { + fn next(&mut self) -> TaskRef { + // This is a temporary solution before the PR to only store runnable tasks in + // the run queue is merged. + let mut blocked_tasks = Vec::with_capacity(2); + while let Some(mut task) = self.queue.pop() { + if task.task.is_runnable() { + for t in blocked_tasks { + self.queue.push(t) + } + task.last_ran = time::now::(); + self.queue.push(task.clone()); + return task.task; + } else { + blocked_tasks.push(task); } - task.last_ran = time::now::(); - runqueue_locked.push(task.clone()); - return Some(task.task); + } + for task in blocked_tasks { + self.queue.push(task); + } + self.idle_task.clone() + } + + fn add(&mut self, task: TaskRef) { + self.queue + .push(PriorityTaskRef::new(task, DEFAULT_PRIORITY)); + } + + fn busyness(&self) -> usize { + self.queue.len() + } + + fn remove(&mut self, task: &TaskRef) -> bool { + let old_len = self.queue.len(); + self.queue + .retain(|priority_task| priority_task.task != *task); + let new_len = self.queue.len(); + // We should have removed at most one task from the run queue. + debug_assert!( + old_len - new_len < 2, + "difference between run queue lengths was: {}", + old_len - new_len + ); + new_len != old_len + } + + fn as_priority_scheduler(&mut self) -> Option<&mut dyn task::scheduler::PriorityScheduler> { + Some(self) + } + + fn drain(&mut self) -> alloc::boxed::Box + '_> { + Box::new(self.queue.drain().map(|priority_task| priority_task.task)) + } + + fn tasks(&self) -> Vec { + self.queue + .clone() + .into_iter() + .map(|priority_task| priority_task.task) + .collect() + } +} + +impl task::scheduler::PriorityScheduler for Scheduler { + fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { + let previous_len = self.queue.len(); + self.queue.retain(|t| t.task != *task); + + if previous_len != self.queue.len() { + // We should have at most removed one task from the run queue. + debug_assert_eq!(self.queue.len() + 1, previous_len); + self.queue.push(PriorityTaskRef { + task: task.clone(), + priority, + // Not technically correct, but this will be reset next time it is run. + last_ran: Instant::ZERO, + }); + true } else { - blocked_tasks.push(task); + false + } + } + + fn priority(&mut self, task: &TaskRef) -> Option { + for priority_task in self.queue.iter() { + if priority_task.task == *task { + return Some(priority_task.priority); + } + } + None + } +} + +#[derive(Clone, Debug, Eq)] +struct PriorityTaskRef { + task: TaskRef, + priority: u8, + last_ran: Instant, +} + +impl PriorityTaskRef { + pub const fn new(task: TaskRef, priority: u8) -> Self { + Self { + task, + priority, + last_ran: Instant::ZERO, } } - for task in blocked_tasks { - runqueue_locked.push(task); +} + +impl PartialEq for PriorityTaskRef { + fn eq(&self, other: &Self) -> bool { + self.priority.eq(&other.priority) && self.last_ran.eq(&other.last_ran) + } +} + +impl PartialOrd for PriorityTaskRef { + fn partial_cmp(&self, other: &Self) -> Option { + match self.priority.cmp(&other.priority) { + // Tasks that were ran longer ago should be prioritised. + Ordering::Equal => Some(self.last_ran.cmp(&other.last_ran).reverse()), + ordering => Some(ordering), + } + } +} + +impl Ord for PriorityTaskRef { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.priority.cmp(&other.priority) } - Some(runqueue_locked.idle_task().clone()) } diff --git a/kernel/scheduler_round_robin/Cargo.toml b/kernel/scheduler_round_robin/Cargo.toml index 3563d77988..fe6e0b02a0 100644 --- a/kernel/scheduler_round_robin/Cargo.toml +++ b/kernel/scheduler_round_robin/Cargo.toml @@ -6,19 +6,9 @@ version = "0.1.0" edition = "2021" [dependencies] +log = "0.4.8" spin = "0.9.4" - -[dependencies.log] -version = "0.4.8" - -[dependencies.task] -path = "../task" - -[dependencies.runqueue] -path = "../runqueue" - -[dependencies.runqueue_round_robin] -path = "../runqueue_round_robin" +task = { path = "../task" } [lib] crate-type = ["rlib"] diff --git a/kernel/scheduler_round_robin/src/lib.rs b/kernel/scheduler_round_robin/src/lib.rs index cb85fee4dd..d022c7df6c 100644 --- a/kernel/scheduler_round_robin/src/lib.rs +++ b/kernel/scheduler_round_robin/src/lib.rs @@ -6,42 +6,74 @@ extern crate alloc; -use core::marker::PhantomData; +use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; -use log::error; -use runqueue_round_robin::RunQueue; use task::TaskRef; -/// This defines the round robin scheduler policy. -/// Returns None if there is no schedule-able task -// TODO: Remove option? -// TODO: Return &'static TaskRef? -pub fn select_next_task(apic_id: u8) -> Option { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - error!("BUG: select_next_task_round_robin(): couldn't get runqueue for core {apic_id}",); - return None; - } - }; +pub struct Scheduler { + idle_task: TaskRef, + queue: VecDeque, +} - if let Some((task_index, _)) = runqueue_locked - .iter() - .enumerate() - .find(|(_, task)| task.is_runnable()) - { - runqueue_locked.move_to_end(task_index) - } else { - Some(runqueue_locked.idle_task().clone()) +impl Scheduler { + pub const fn new(idle_task: TaskRef) -> Self { + Self { + idle_task, + queue: VecDeque::new(), + } } } -pub struct PriorityInheritanceGuard<'a> { - phantom: PhantomData<&'a ()>, -} +impl task::scheduler::Scheduler for Scheduler { + fn next(&mut self) -> TaskRef { + if let Some((task_index, _)) = self + .queue + .iter() + .enumerate() + .find(|(_, task)| task.is_runnable()) + { + let task = self.queue.swap_remove_front(task_index).unwrap(); + self.queue.push_back(task.clone()); + task + } else { + self.idle_task.clone() + } + } + + fn busyness(&self) -> usize { + self.queue.len() + } + + fn add(&mut self, task: TaskRef) { + self.queue.push_back(task); + } + + fn remove(&mut self, task: &TaskRef) -> bool { + let mut task_index = None; + for (i, t) in self.queue.iter().enumerate() { + if t == task { + task_index = Some(i); + break; + } + } + + if let Some(task_index) = task_index { + self.queue.remove(task_index); + true + } else { + false + } + } + + fn as_priority_scheduler(&mut self) -> Option<&mut dyn task::scheduler::PriorityScheduler> { + None + } + + fn drain(&mut self) -> Box + '_> { + Box::new(self.queue.drain(..)) + } -pub fn inherit_priority(_: &TaskRef) -> PriorityInheritanceGuard<'_> { - PriorityInheritanceGuard { - phantom: PhantomData, + fn tasks(&self) -> Vec { + self.queue.clone().into() } } diff --git a/kernel/spawn/Cargo.toml b/kernel/spawn/Cargo.toml index c3ce259cfa..1145da653b 100644 --- a/kernel/spawn/Cargo.toml +++ b/kernel/spawn/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] +cfg-if = "1.0.0" log = "0.4.8" spin = "0.9.4" lazy_static = { features = ["spin_no_std"], version = "1.4.0" } @@ -19,7 +20,6 @@ cpu = { path = "../cpu" } preemption = { path = "../preemption" } task = { path = "../task" } task_struct = { path = "../task_struct" } -runqueue = { path = "../runqueue" } scheduler = { path = "../scheduler" } mod_mgmt = { path = "../mod_mgmt" } context_switch = { path = "../context_switch" } @@ -29,6 +29,10 @@ thread_local_macro = { path = "../thread_local_macro" } no_drop = { path = "../no_drop" } early_tls = { path = "../early_tls" } +scheduler_epoch = { path = "../scheduler_epoch" } +scheduler_priority = { path = "../scheduler_priority" } +scheduler_round_robin = { path = "../scheduler_round_robin" } + [target.'cfg(target_arch = "x86_64")'.dependencies] fault_crate_swap = { path = "../fault_crate_swap" } catch_unwind = { path = "../catch_unwind" } diff --git a/kernel/spawn/src/lib.rs b/kernel/spawn/src/lib.rs index 043e6855e9..d0ff408b87 100755 --- a/kernel/spawn/src/lib.rs +++ b/kernel/spawn/src/lib.rs @@ -59,11 +59,17 @@ pub fn init( .spawn_restartable(None)? .clone(); - runqueue::init(cpu_id.into_u8(), idle_task)?; - runqueue::add_task_to_specific_runqueue( - cpu_id.into_u8(), - exitable_bootstrap_task.clone(), - )?; + cfg_if::cfg_if! { + if #[cfg(epoch_scheduler)] { + let scheduler = scheduler_epoch::Scheduler::new(idle_task); + } else if #[cfg(priority_scheduler)] { + let scheduler = scheduler_priority::Scheduler::new(idle_task); + } else { + let scheduler = scheduler_round_robin::Scheduler::new(idle_task); + } + } + task::scheduler::set_policy(cpu_id, scheduler); + task::scheduler::add_task_to(cpu_id, exitable_bootstrap_task.clone()); Ok(BootstrapTaskRef { cpu_id, @@ -439,9 +445,9 @@ impl TaskBuilder // Idle tasks are not stored on the run queue. if !self.idle { if let Some(cpu) = self.pin_on_cpu { - runqueue::add_task_to_specific_runqueue(cpu.into_u8(), task_ref.clone())?; + task::scheduler::add_task_to(cpu, task_ref.clone()); } else { - runqueue::add_task_to_any_runqueue(task_ref.clone())?; + task::scheduler::add_task(task_ref.clone()); } } @@ -877,7 +883,7 @@ fn task_restartable_cleanup_failure(current_task: ExitableTaskRef, kill #[inline(always)] fn task_cleanup_final_internal(current_task: &ExitableTaskRef) { // First, remove the task from its runqueue(s). - remove_current_task_from_runqueue(current_task); + task::scheduler::remove_task_from_current(current_task); // Second, run TLS object destructors, which will drop any TLS objects // that were lazily initialized during this execution of this task. @@ -994,21 +1000,7 @@ where /// Helper function to remove a task from its runqueue and drop it. fn remove_current_task_from_runqueue(current_task: &ExitableTaskRef) { - // Special behavior when evaluating runqueues - #[cfg(rq_eval)] { - runqueue::remove_task_from_all(current_task).unwrap(); - } - - // In the regular case, we do not perform task migration between cores, - // so we can use the heuristic that the task is only on the current core's runqueue. - #[cfg(not(rq_eval))] { - if let Err(e) = runqueue::get_runqueue(cpu::current_cpu().into_u8()) - .ok_or("couldn't get this CPU's ID or runqueue to remove exited task from it") - .and_then(|rq| rq.write().remove_task(current_task)) - { - error!("BUG: couldn't remove exited task from runqueue: {}", e); - } - } + task::scheduler::remove_task(current_task); } /// A basic idle task that does nothing but loop endlessly. diff --git a/kernel/task/Cargo.toml b/kernel/task/Cargo.toml index 2e0361ef4f..e16a670a14 100644 --- a/kernel/task/Cargo.toml +++ b/kernel/task/Cargo.toml @@ -23,5 +23,6 @@ no_drop = { path = "../no_drop" } preemption = { path = "../preemption" } stack = { path = "../stack" } sync_irq = { path = "../../libs/sync_irq" } +sync_preemption = { path = "../sync_preemption" } task_struct = { path = "../task_struct" } waker_generic = { path = "../waker_generic" } diff --git a/kernel/task/src/lib.rs b/kernel/task/src/lib.rs index b64af49d3a..2efd3947f6 100755 --- a/kernel/task/src/lib.rs +++ b/kernel/task/src/lib.rs @@ -26,9 +26,12 @@ #![no_std] #![feature(negative_impls)] #![feature(thread_local)] +#![feature(let_chains)] extern crate alloc; +pub mod scheduler; + use alloc::{ boxed::Box, collections::BTreeMap, @@ -45,7 +48,6 @@ use core::{ task::Waker, }; use cpu::CpuId; -use crossbeam_utils::atomic::AtomicCell; use irq_safety::hold_interrupts; use log::error; use environment::Environment; @@ -65,6 +67,7 @@ pub use task_struct::{ }; #[cfg(simd_personality)] pub use task_struct::SimdExt; +pub use scheduler::schedule; /// The list of all Tasks in the system. @@ -584,75 +587,6 @@ pub fn take_kill_handler() -> Option { .flatten() } - -pub use scheduler::*; -mod scheduler { - use super::*; - - /// Yields the current CPU by selecting a new `Task` to run next, - /// and then switches to that new `Task`. - /// - /// The new "next" `Task` to run will be selected by the currently-active - /// scheduler policy. - /// - /// Preemption will be disabled while this function runs, - /// but interrupts are not disabled because it is not necessary. - /// - /// ## Return - /// * `true` if a new task was selected and switched to. - /// * `false` if no new task was selected, - /// meaning the current task will continue running. - #[doc(alias("yield"))] - pub fn schedule() -> bool { - let preemption_guard = preemption::hold_preemption(); - // If preemption was not previously enabled (before we disabled it above), - // then we shouldn't perform a task switch here. - if !preemption_guard.preemption_was_enabled() { - // trace!("Note: preemption was disabled on CPU {}, skipping scheduler.", cpu::current_cpu()); - return false; - } - - let cpu_id = preemption_guard.cpu_id(); - - let Some(next_task) = (SELECT_NEXT_TASK_FUNC.load())(cpu_id.into_u8()) else { - return false; // keep running the same current task - }; - - let (did_switch, recovered_preemption_guard) = task_switch( - next_task, - cpu_id, - preemption_guard, - ); - - // trace!("AFTER TASK_SWITCH CALL (CPU {}) new current: {:?}, interrupts are {}", cpu_id, task::get_my_current_task(), irq_safety::interrupts_enabled()); - - drop(recovered_preemption_guard); - did_switch - } - - /// The signature for the function that selects the next task for the given CPU. - /// - /// This is used when the [`schedule()`] function is invoked. - pub type SchedulerFunc = fn(u8) -> Option; - - /// The function currently registered as the system-wide scheduler policy. - /// - /// This is initialized to a dummy function that returns no "next" task, - /// meaning that no scheduling will occur until it is initialized. - /// Currently, this is initialized from within `scheduler::init()`. - static SELECT_NEXT_TASK_FUNC: AtomicCell = AtomicCell::new(|_| None); - - /// Sets the active scheduler policy used by [`schedule()`] to select the next task. - /// - /// Currently, we only support one scheduler policy for the whole system, - /// but supporting different policies on a per-CPU, per-namespace, or per-arbitrary domain basis - /// would be a relatively simple immprovement. - pub fn set_scheduler_policy(select_next_task_func: SchedulerFunc) { - SELECT_NEXT_TASK_FUNC.store(select_next_task_func); - } -} - - /// Switches from the current task to the given `next` task. /// /// ## Arguments diff --git a/kernel/task/src/scheduler.rs b/kernel/task/src/scheduler.rs new file mode 100644 index 0000000000..404e86e199 --- /dev/null +++ b/kernel/task/src/scheduler.rs @@ -0,0 +1,300 @@ +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use core::ptr; + +use cpu::CpuId; +use spin::Mutex; +use sync_preemption::PreemptionSafeMutex; + +use crate::TaskRef; + +/// List of all the schedulers on the system. +/// +/// This is primarily used for spawning tasks, either to find the least busy CPU +/// or spawn a task pinned to a particular CPU. +/// +/// The outer mutex does not need to be preemption-safe, because it is never +/// accessed from `schedule`. In fact, ideally it would be a blocking mutex, but +/// that leads to circular dependencies. +static SCHEDULERS: Mutex)>> = Mutex::new(Vec::new()); + +/// A reference to the current CPUs scheduler. +/// +/// This isn't strictly necessary, but it greatly improves performance, as it +/// avoids having to lock the system-wide list of schedulers. +#[cls::cpu_local] +static SCHEDULER: Option> = None; + +type ConcurrentScheduler = PreemptionSafeMutex; + +/// Yields the current CPU by selecting a new `Task` to run next, +/// and then switches to that new `Task`. +/// +/// The new "next" `Task` to run will be selected by the currently-active +/// scheduler policy. +/// +/// Preemption will be disabled while this function runs, +/// but interrupts are not disabled because it is not necessary. +/// +/// ## Return +/// * `true` if a new task was selected and switched to. +/// * `false` if no new task was selected, meaning the current task will +/// continue running. +#[doc(alias("yield"))] +pub fn schedule() -> bool { + let preemption_guard = preemption::hold_preemption(); + // If preemption was not previously enabled (before we disabled it above), + // then we shouldn't perform a task switch here. + if !preemption_guard.preemption_was_enabled() { + // trace!("Note: preemption was disabled on CPU {}, skipping scheduler.", cpu::current_cpu()); + return false; + } + + let cpu_id = preemption_guard.cpu_id(); + + let next_task = SCHEDULER.update_guarded( + |scheduler| scheduler.as_ref().unwrap().lock().next(), + &preemption_guard, + ); + + let (did_switch, recovered_preemption_guard) = + super::task_switch(next_task, cpu_id, preemption_guard); + + // log::trace!("AFTER TASK_SWITCH CALL (CPU {}) new current: {:?}, interrupts are {}", cpu_id, super::get_my_current_task(), irq_safety::interrupts_enabled()); + + drop(recovered_preemption_guard); + did_switch +} + +/// Sets the scheduler policy for the given CPU. +pub fn set_policy(cpu_id: CpuId, scheduler: T) +where + T: Scheduler, +{ + let mutex = PreemptionSafeMutex::new(scheduler); + let scheduler = Arc::new(mutex); + + let mut locked = SCHEDULERS.lock(); + SCHEDULER.update(|current_scheduler| { + if let Some(old_scheduler) = current_scheduler { + let mut old_scheduler_index = None; + for (i, (cpu, scheduler)) in locked.iter().enumerate() { + if *cpu == cpu_id { + debug_assert!(ptr::eq(old_scheduler, scheduler)); + old_scheduler_index = Some(i); + break; + } + } + + if let Some(old_scheduler_index) = old_scheduler_index { + locked.swap_remove(old_scheduler_index); + } else { + log::error!("BUG: current scheduler not found in `SCHEDULERS`"); + } + + let mut new_scheduler = scheduler.lock(); + for task in old_scheduler.lock().drain() { + new_scheduler.add(task); + } + } + + locked.push((cpu_id, scheduler.clone() as _)); + *current_scheduler = Some(scheduler as _); + }); +} + +/// Adds the given task to the least busy run queue. +pub fn add_task(task: TaskRef) { + let locked = SCHEDULERS.lock(); + + let mut min_busyness = usize::MAX; + let mut least_busy_index = None; + + for (i, (_, scheduler)) in locked.iter().enumerate() { + let busyness = scheduler.lock().busyness(); + if busyness < min_busyness { + least_busy_index = Some(i); + min_busyness = busyness; + } + } + + locked[least_busy_index.unwrap()].1.lock().add(task); +} + +/// Adds the given task to the specified CPU's run queue. +pub fn add_task_to(cpu_id: CpuId, task: TaskRef) { + for (cpu, scheduler) in SCHEDULERS.lock().iter() { + if *cpu == cpu_id { + scheduler.lock().add(task); + return; + } + } +} + +/// Adds the given task to the current CPU's run queue. +pub fn add_task_to_current(task: TaskRef) { + SCHEDULER.update(|scheduler| scheduler.as_ref().unwrap().lock().add(task)) +} + +/// Removes the given task from all run queues. +pub fn remove_task(task: &TaskRef) -> bool { + for (_, scheduler) in SCHEDULERS.lock().iter() { + if scheduler.lock().remove(task) { + // A task will only be on one run queue. + return true; + } + } + false +} + +/// Removes the given task from the specified CPU's run queue. +pub fn remove_task_from(task: &TaskRef, cpu_id: CpuId) -> bool { + for (cpu, scheduler) in SCHEDULERS.lock().iter() { + if *cpu == cpu_id { + return scheduler.lock().remove(task); + } + } + false +} + +/// Removes the given task from the current CPU's run queue. +pub fn remove_task_from_current(task: &TaskRef) -> bool { + SCHEDULER.update(|scheduler| scheduler.as_ref().unwrap().lock().remove(task)) +} + +/// A task scheduler. +pub trait Scheduler: Send + Sync + 'static { + /// Returns the next task to run. + fn next(&mut self) -> TaskRef; + + /// Adds a task to the run queue. + fn add(&mut self, task: TaskRef); + + /// Returns a measure of how busy the scheduler is, with higher values + /// representing a busier scheduler. + fn busyness(&self) -> usize; + + /// Removes a task from the run queue. + fn remove(&mut self, task: &TaskRef) -> bool; + + /// Returns a reference to this scheduler as a priority scheduler, if it is one. + fn as_priority_scheduler(&mut self) -> Option<&mut dyn PriorityScheduler>; + + /// Clears the scheduler's runqueue, returning an iterator over all contained tasks. + fn drain(&mut self) -> Box + '_>; + + /// Returns a cloned list of contained tasks being scheduled by this scheduler. + /// + /// The list should be considered out-of-date as soon as it is called, + /// but can be useful as a heuristic or for debugging. + fn tasks(&self) -> Vec; +} + +/// A task scheduler that supports some notion of priority. +pub trait PriorityScheduler { + /// Sets the priority of the given task. + fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool; + + /// Gets the priority of the given task. + fn priority(&mut self, task: &TaskRef) -> Option; +} + +/// Returns the priority of the given task. +/// +/// Returns `None` if the task is not on a priority run queue. +pub fn priority(task: &TaskRef) -> Option { + for (_, scheduler) in SCHEDULERS.lock().iter() { + if let Some(priority) = scheduler + .lock() + .as_priority_scheduler() + .and_then(|priority_scheduler| priority_scheduler.priority(task)) + { + return Some(priority); + } + } + None +} + +/// Sets the priority of the given task. +/// +/// Returns `None` if the task is not on a priority run queue. +pub fn set_priority(task: &TaskRef, priority: u8) -> bool { + for (_, scheduler) in SCHEDULERS.lock().iter() { + if let Some(true) = scheduler + .lock() + .as_priority_scheduler() + .map(|priority_scheduler| priority_scheduler.set_priority(task, priority)) + { + return true; + } + } + false +} + +/// Returns the busyness of the scheduler on the given CPU, +/// in which higher values indicate a busier scheduler. +pub fn busyness(cpu_id: CpuId) -> Option { + for (cpu, scheduler) in SCHEDULERS.lock().iter() { + if *cpu == cpu_id { + return Some(scheduler.lock().busyness()); + } + } + None +} + +/// Modifies the given task's priority to be the maximum of its priority +/// and the current task's priority. +/// +/// Returns a guard which reverts the change when dropped. +pub fn inherit_priority(task: &TaskRef) -> PriorityInheritanceGuard<'_> { + let current_priority = super::with_current_task(priority).unwrap(); + let other_priority = priority(task); + + if let (Some(current_priority), Some(other_priority)) = + (current_priority, other_priority) && current_priority > other_priority + { + set_priority(task, current_priority); + } + + PriorityInheritanceGuard { + inner: if let (Some(current_priority), Some(other_priority)) = + (current_priority, other_priority) + && current_priority > other_priority + { + Some((task, other_priority)) + } else { + None + }, + } +} + +/// A guard that lowers a task's priority back to its previous value when dropped. +pub struct PriorityInheritanceGuard<'a> { + inner: Option<(&'a TaskRef, u8)>, +} +impl<'a> Drop for PriorityInheritanceGuard<'a> { + fn drop(&mut self) { + if let Some((task, priority)) = self.inner { + set_priority(task, priority); + } + } +} + +/// Returns the list of tasks running on each CPU. +/// +/// To avoid race conditions with migrating tasks, this function takes a lock +/// over all system schedulers. This is incredibly disruptive and should be +/// avoided at all costs. +pub fn tasks() -> Vec<(CpuId, Vec)> { + let schedulers = SCHEDULERS.lock().clone(); + let locked = schedulers + .iter() + .map(|(cpu, scheduler)| (cpu, scheduler.lock())) + // We eagerly evaluate so that all schedulers are actually locked. + .collect::>(); + let result = locked + .iter() + .map(|(cpu, locked_scheduler)| (**cpu, locked_scheduler.tasks())) + .collect(); + drop(locked); + result +} diff --git a/theseus_features/Cargo.toml b/theseus_features/Cargo.toml index 5eb9e46b0f..47bebb30aa 100644 --- a/theseus_features/Cargo.toml +++ b/theseus_features/Cargo.toml @@ -76,7 +76,6 @@ test_wasmtime = { path = "../applications/test_wasmtime", optional = true } bm = { path = "../applications/bm", optional = true } channel_eval = { path = "../applications/channel_eval", optional = true } heap_eval = { path = "../applications/heap_eval", optional = true } -rq_access_eval = { path = "../applications/rq_access_eval", optional = true } rq_eval = { path = "../applications/rq_eval", optional = true } scheduler_eval = { path = "../applications/scheduler_eval", optional = true } @@ -138,7 +137,6 @@ theseus_benchmarks = [ "bm", "channel_eval", "heap_eval", - "rq_access_eval", "rq_eval", "scheduler_eval", ] From 1be62d4a266f1a85520b61b43ac4b4834a49b7b8 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Wed, 4 Oct 2023 07:15:01 +0800 Subject: [PATCH 07/25] Add support for the WAET ACPI table (#1050) * Currently not used, but fully working. * Can be used to optimize RTC and ACPI PM timer usage. --- Cargo.lock | 12 +++++ kernel/acpi/Cargo.toml | 3 ++ kernel/acpi/acpi_table_handler/Cargo.toml | 3 ++ kernel/acpi/acpi_table_handler/src/lib.rs | 1 + kernel/acpi/src/lib.rs | 13 ++++- kernel/acpi/waet/Cargo.toml | 19 +++++++ kernel/acpi/waet/src/lib.rs | 63 +++++++++++++++++++++++ 7 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 kernel/acpi/waet/Cargo.toml create mode 100644 kernel/acpi/waet/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2675ed3d05..d6baaea72b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,6 +19,7 @@ dependencies = [ "rsdt", "spin 0.9.4", "time", + "waet", ] [[package]] @@ -43,6 +44,7 @@ dependencies = [ "madt", "memory", "rsdt", + "waet", ] [[package]] @@ -4318,6 +4320,16 @@ dependencies = [ "quote", ] +[[package]] +name = "waet" +version = "0.1.0" +dependencies = [ + "acpi_table", + "memory", + "sdt", + "zerocopy", +] + [[package]] name = "wait_condition" version = "0.1.0" diff --git a/kernel/acpi/Cargo.toml b/kernel/acpi/Cargo.toml index 82c87197a3..c39a2505a7 100644 --- a/kernel/acpi/Cargo.toml +++ b/kernel/acpi/Cargo.toml @@ -27,6 +27,9 @@ path = "rsdt" [dependencies.fadt] path = "fadt" +[dependencies.waet] +path = "waet" + [dependencies.madt] path = "madt" diff --git a/kernel/acpi/acpi_table_handler/Cargo.toml b/kernel/acpi/acpi_table_handler/Cargo.toml index 4ab735e4c7..d089c1075b 100644 --- a/kernel/acpi/acpi_table_handler/Cargo.toml +++ b/kernel/acpi/acpi_table_handler/Cargo.toml @@ -20,6 +20,9 @@ path = "../rsdt" [dependencies.fadt] path = "../fadt" +[dependencies.waet] +path = "../waet" + [dependencies.hpet] path = "../hpet" diff --git a/kernel/acpi/acpi_table_handler/src/lib.rs b/kernel/acpi/acpi_table_handler/src/lib.rs index 4f212124c6..1b4c4e28ad 100644 --- a/kernel/acpi/acpi_table_handler/src/lib.rs +++ b/kernel/acpi/acpi_table_handler/src/lib.rs @@ -35,6 +35,7 @@ pub fn acpi_table_handler( rsdt::RSDT_SIGNATURE | rsdt::XSDT_SIGNATURE => rsdt::handle(acpi_tables, signature, length, phys_addr), fadt::FADT_SIGNATURE => fadt::handle(acpi_tables, signature, length, phys_addr), + waet::WAET_SIGNATURE => waet::handle(acpi_tables, signature, length, phys_addr), hpet::HPET_SIGNATURE => hpet::handle(acpi_tables, signature, length, phys_addr), madt::MADT_SIGNATURE => madt::handle(acpi_tables, signature, length, phys_addr), dmar::DMAR_SIGNATURE => dmar::handle(acpi_tables, signature, length, phys_addr), diff --git a/kernel/acpi/src/lib.rs b/kernel/acpi/src/lib.rs index ec62370b85..25f27f4d24 100644 --- a/kernel/acpi/src/lib.rs +++ b/kernel/acpi/src/lib.rs @@ -1,4 +1,4 @@ -//! Code to parse the ACPI tables, based off of Redox. +//! Code to parse the ACPI tables. #![no_std] extern crate alloc; @@ -63,6 +63,17 @@ pub fn init(rsdp_address: Option, page_table: &mut PageTable) - // here: do something with the DSDT here, when needed. // debug!("DSDT physical address: {:#X}", {_fadt.dsdt}); } + + // WAET is optional, and contains info about potentially optimizing timer-related actions. + { + let acpi_tables = ACPI_TABLES.lock(); + if let Some(waet) = waet::Waet::get(&acpi_tables) { + // here: do something with the WAET here, if desired. + debug!("WAET: RTC? {:?}. ACPI PM timer? {:?}", + waet.rtc_good(), waet.acpi_pm_timer_good(), + ); + } + } // HPET is optional, but usually present. { diff --git a/kernel/acpi/waet/Cargo.toml b/kernel/acpi/waet/Cargo.toml new file mode 100644 index 0000000000..e1c2172fd8 --- /dev/null +++ b/kernel/acpi/waet/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "waet" +version = "0.1.0" +authors = ["Kevin Boos "] +description = "Support for ACPI WAET" +edition = "2021" + +[dependencies] +zerocopy = "0.5.0" + +[dependencies.memory] +path = "../../memory" + +[dependencies.sdt] +path = "../sdt" + +[dependencies.acpi_table] +path = "../acpi_table" + diff --git a/kernel/acpi/waet/src/lib.rs b/kernel/acpi/waet/src/lib.rs new file mode 100644 index 0000000000..3a33ceb7f5 --- /dev/null +++ b/kernel/acpi/waet/src/lib.rs @@ -0,0 +1,63 @@ +//! Definitions for WAET, the Windows ACPI Emulated devices Table. + +#![no_std] + +use memory::PhysicalAddress; +use sdt::Sdt; +use acpi_table::{AcpiSignature, AcpiTables}; +use zerocopy::FromBytes; + + +pub const WAET_SIGNATURE: &[u8; 4] = b"WAET"; + + +/// The handler for parsing the WAET table and adding it to the ACPI tables list. +pub fn handle( + acpi_tables: &mut AcpiTables, + signature: AcpiSignature, + _length: usize, + phys_addr: PhysicalAddress +) -> Result<(), &'static str> { + acpi_tables.add_table_location(signature, phys_addr, None) +} + + +/// The Windows ACPI Emulated devices Table (WAET) allows virtualized OSes +/// to avoid workarounds for errata on physical devices. +/// +/// +#[repr(C, packed)] +#[derive(Clone, Copy, Debug, FromBytes)] +pub struct Waet { + pub header: Sdt, + pub emulated_device_flags: u32, +} +const _: () = assert!(core::mem::size_of::() == 40); +const _: () = assert!(core::mem::align_of::() == 1); + +impl Waet { + /// Finds the WAET in the given `AcpiTables` and returns a reference to it. + pub fn get(acpi_tables: &AcpiTables) -> Option<&Waet> { + acpi_tables.table(WAET_SIGNATURE).ok() + } + + /// Returns whether the RTC has been enhanced not to require + /// acknowledgment after it asserts an interrupt. + /// + /// If this returns `true`, an interrupt handler can bypass + /// reading the RTC register to unlatch the pending interrupt. + pub fn rtc_good(&self) -> bool { + const RTC_GOOD: u32 = 1 << 0; + self.emulated_device_flags & RTC_GOOD == RTC_GOOD + } + + /// Returns whether the ACPI PM timer has been enhanced not to require + /// multiple reads. + /// + /// If this returns `true`, only a single read of the ACPI PM timer is + /// necessary to obtain a reliable value from it. + pub fn acpi_pm_timer_good(&self) -> bool { + const ACPI_PM_TIMER_GOOD: u32 = 1 << 1; + self.emulated_device_flags & ACPI_PM_TIMER_GOOD == ACPI_PM_TIMER_GOOD + } +} From e9416d62bec3b68274a69f302102705894756def Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Thu, 5 Oct 2023 15:47:02 +1100 Subject: [PATCH 08/25] Add new, more robust scheduler test (#1045) * The new `test_scheduler` is significantly more robust than the old one. Currently, the test isn't particularly useful because we don't have task migration enabled, but #1042 will add implicit task migration when unblocking a task. * Hence, the test currently focuses on blocking/unblocking tasks. * Add a function to iterate over all initialized CPUs. Signed-off-by: Klimenty Tsoutsman --- Cargo.lock | 5 +- applications/test_scheduler/Cargo.toml | 29 ++-- applications/test_scheduler/src/lib.rs | 181 +++++++++++++++++-------- kernel/cpu/src/aarch64.rs | 5 + kernel/cpu/src/x86_64.rs | 4 + kernel/task_struct/src/lib.rs | 4 +- 6 files changed, 156 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d6baaea72b..5998222a77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3836,10 +3836,13 @@ dependencies = [ name = "test_scheduler" version = "0.1.0" dependencies = [ + "app_io", "cpu", "log", - "scheduler", + "rand", + "random", "spawn", + "sync_block", "task", ] diff --git a/applications/test_scheduler/Cargo.toml b/applications/test_scheduler/Cargo.toml index 35b8a30f17..74bfeb63ef 100644 --- a/applications/test_scheduler/Cargo.toml +++ b/applications/test_scheduler/Cargo.toml @@ -1,21 +1,20 @@ [package] name = "test_scheduler" version = "0.1.0" -authors = ["Namitha Liyanage "] +authors = ["Klim Tsoutsman "] +description = "An application to test the scheduler" +edition = "2021" [dependencies] +app_io = { path = "../../kernel/app_io" } +cpu = { path = "../../kernel/cpu" } +log = "0.4.8" +random = { path = "../../kernel/random" } +spawn = { path = "../../kernel/spawn" } +sync_block = { path = "../../kernel/sync_block" } +task = { path = "../../kernel/task" } -[dependencies.log] -version = "0.4.8" - -[dependencies.spawn] -path = "../../kernel/spawn" - -[dependencies.scheduler] -path = "../../kernel/scheduler" - -[dependencies.task] -path = "../../kernel/task" - -[dependencies.cpu] -path = "../../kernel/cpu" +[dependencies.rand] +version = "0.8.5" +default-features = false +features = ["small_rng"] diff --git a/applications/test_scheduler/src/lib.rs b/applications/test_scheduler/src/lib.rs index 238034f249..7b44c297dc 100644 --- a/applications/test_scheduler/src/lib.rs +++ b/applications/test_scheduler/src/lib.rs @@ -1,78 +1,151 @@ #![no_std] -#[macro_use] extern crate log; extern crate alloc; -extern crate spawn; -extern crate scheduler; -extern crate task; -extern crate cpu; -use core::convert::TryFrom; - -use alloc::string::String; -use alloc::vec::Vec; -use cpu::CpuId; +use alloc::{format, string::String, vec::Vec}; +use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use app_io::println; +use cpu::{cpus, CpuId}; +use rand::seq::SliceRandom; +use sync_block::RwLock; +use task::TaskRef; pub fn main(_args: Vec) -> isize { - let cpu_1 = CpuId::try_from(1).expect("CPU ID 1 did not exist"); - - let taskref1 = spawn::new_task_builder(worker, ()) - .name(String::from("test1")) - .pin_on_cpu(cpu_1) - .spawn().expect("failed to initiate task"); + println!("testing pinned"); + test_pinned(); + println!("testing unpinned"); + test_unpinned(); + 0 +} - if !scheduler::set_priority(&taskref1, 30) { - error!("scheduler_eval(): Could not set priority to taskref1"); +// Spawn a bunch of pinned tasks, and then each pinned task randomly blocks and +// unblocks other tasks than are pinned to the same CPU. +// +// The tasks must be pinned to the same CPU to avoid a deadlock where two tasks +// on different CPUs block each other at the same time and then yield. +pub fn test_pinned() { + static TASKS: RwLock)>> = RwLock::new(Vec::new()); + static READY: AtomicBool = AtomicBool::new(false); + + let tasks = cpus() + .map(|cpu| { + ( + #[allow(clippy::clone_on_copy)] + cpu.clone(), + (0..100) + .map(move |id| { + spawn::new_task_builder(pinned_worker, cpu) + .name(format!("test-scheduler-pinned-{cpu}-{id}")) + .pin_on_cpu(cpu) + .block() + .spawn() + .expect("failed to spawn task") + }) + .collect::>(), + ) + }) + .collect::>(); + + *TASKS.write() = tasks + .iter() + .map(|(cpu, task_iter)| (*cpu, task_iter.iter().map(|task| (*task).clone()).collect())) + .collect(); + + for (_, task_list) in tasks.iter() { + for task in task_list { + task.unblock().unwrap(); + } } - debug!("Spawned Task 1"); - - let taskref2 = spawn::new_task_builder(worker, ()) - .name(String::from("test2")) - .pin_on_cpu(cpu_1) - .spawn().expect("failed to initiate task"); + READY.store(true, Ordering::Release); - if !scheduler::set_priority(&taskref2, 20) { - error!("scheduler_eval(): Could not set priority to taskref2"); + for (_, task_list) in tasks { + for task in task_list { + task.join().unwrap(); + } } - debug!("Spawned Task 2"); - - let taskref3 = spawn::new_task_builder(worker, ()) - .name(String::from("test3")) - .pin_on_cpu(cpu_1) - .spawn().expect("failed to initiate task"); - - if !scheduler::set_priority(&taskref3, 10) { - error!("scheduler_eval(): Could not set priority to taskref3"); + // We have to drop the tasks so that the `test-scheduler` crate can be dropped. + *TASKS.write() = Vec::new(); + + fn pinned_worker(pinned_cpu: CpuId) { + let mut rng = random::init_rng::().unwrap(); + while !READY.load(Ordering::Acquire) { + core::hint::spin_loop(); + } + + let locked = TASKS.read(); + let tasks = &locked.iter().find(|(cpu, _)| *cpu == pinned_cpu).unwrap().1; + for _ in 0..100 { + assert_eq!( + cpu::current_cpu(), + pinned_cpu, + "pinned worker migrated cores" + ); + + let random_task = tasks.choose(&mut rng).unwrap(); + + let chose_self = + task::with_current_task(|current_task| random_task == current_task).unwrap(); + if chose_self { + continue; + } + + let _ = random_task.block(); + task::schedule(); + let _ = random_task.unblock(); + } } +} - debug!("Spawned Task 3"); +/// Spawn a bunch of unpinned tasks, and then block and unblock random tasks +/// from the main thread. +pub fn test_unpinned() { + const NUM_TASKS: usize = 500; + + static READY: AtomicBool = AtomicBool::new(false); + static NUM_RUNNING: AtomicUsize = AtomicUsize::new(NUM_TASKS); + + let tasks = (0..NUM_TASKS) + .map(move |id| { + spawn::new_task_builder(unpinned_worker, ()) + .name(format!("test-scheduler-unpinned-{id}")) + .block() + .spawn() + .expect("failed to spawn task") + }) + .collect::>(); + + for task in tasks.iter() { + task.unblock().unwrap(); + } - debug!("Spawned all tasks"); + READY.store(true, Ordering::Release); - let _priority1 = scheduler::priority(&taskref1); - let _priority2 = scheduler::priority(&taskref2); - let _priority3 = scheduler::priority(&taskref3); + // Cause some mayhem. + let mut rng = random::init_rng::().unwrap(); + while NUM_RUNNING.load(Ordering::Relaxed) != 0 { + let random_task = tasks.choose(&mut rng).unwrap(); + let _ = random_task.block(); + // Let the worker tasks on this core run. + task::schedule(); + let _ = random_task.unblock(); + } - #[cfg(epoch_scheduler)] - { - assert_eq!(_priority1,Some(30)); - assert_eq!(_priority2,Some(20)); - assert_eq!(_priority3,Some(10)); + for task in tasks { + task.join().unwrap(); } - taskref1.join().expect("Task 1 join failed"); - taskref2.join().expect("Task 2 join failed"); - taskref3.join().expect("Task 3 join failed"); + fn unpinned_worker(_: ()) { + while !READY.load(Ordering::Acquire) { + core::hint::spin_loop(); + } - 0 -} + for _ in 0..1000 { + task::schedule(); + } -fn worker(_: ()) { - for i in 0..1000 { - debug!("Task_ID : {} , Instance : {}", task::get_my_current_task_id(), i); - scheduler::schedule(); + NUM_RUNNING.fetch_sub(1, Ordering::Relaxed); } } diff --git a/kernel/cpu/src/aarch64.rs b/kernel/cpu/src/aarch64.rs index 84865dd0ac..31ea3b1dac 100644 --- a/kernel/cpu/src/aarch64.rs +++ b/kernel/cpu/src/aarch64.rs @@ -40,6 +40,11 @@ pub fn register_cpu(bootstrap: bool) -> Result<(), &'static str> { } } +// Returns an iterator over the available CPUs. +pub fn cpus() -> impl Iterator { + ONLINE_CPUS.read().clone().into_iter() +} + /// Returns the number of CPUs (SMP cores) that exist and /// are currently initialized on this system. pub fn cpu_count() -> u32 { diff --git a/kernel/cpu/src/x86_64.rs b/kernel/cpu/src/x86_64.rs index de158ef812..2f85d5ddd0 100644 --- a/kernel/cpu/src/x86_64.rs +++ b/kernel/cpu/src/x86_64.rs @@ -26,6 +26,10 @@ impl TryFrom for CpuId { } } +// Returns an iterator over the available CPUs. +pub fn cpus() -> impl Iterator { + apic::get_lapics().iter().map(|(apic_id, _)| (*apic_id).into()) +} /// Returns the number of CPUs (SMP cores) that exist and /// are currently initialized on this system. diff --git a/kernel/task_struct/src/lib.rs b/kernel/task_struct/src/lib.rs index 7f0e47b82c..273fc3deb7 100755 --- a/kernel/task_struct/src/lib.rs +++ b/kernel/task_struct/src/lib.rs @@ -478,7 +478,7 @@ impl Task { if self.runstate.compare_exchange(Runnable, Blocked).is_ok() { Ok(Runnable) } else if self.runstate.compare_exchange(Blocked, Blocked).is_ok() { - warn!("Blocked an already blocked task: {:?}", self); + // warn!("Blocked an already blocked task: {:?}", self); Ok(Blocked) } else { Err(self.runstate.load()) @@ -510,7 +510,7 @@ impl Task { if self.runstate.compare_exchange(Blocked, Runnable).is_ok() { Ok(Blocked) } else if self.runstate.compare_exchange(Runnable, Runnable).is_ok() { - warn!("Unblocked an already runnable task: {:?}", self); + // warn!("Unblocked an already runnable task: {:?}", self); Ok(Runnable) } else { Err(self.runstate.load()) From 493ac602e893b18ca233bd748bf79b2403ef894d Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Sun, 8 Oct 2023 05:29:05 +0800 Subject: [PATCH 09/25] Redesign the interrupt controller crate; support x86_64 (#1051) * The `interrupt_controller` crate still is not used on x86_64, but it is now nearly fully implemented. * Move initialization of x86-specific interrupt controllers (i.e., APIC) into the x86-specific module of `interrupt_controller`, which makes more sense than doing it from the `device_manager`. * As such, the `captain` can now unconditionally initialize the interrupt controller crate, which works on all architectures. Two remaining to-do items, which will come in a future PR: 1. Implement `enable_local_timer_interrupt()` for aarch64 * This requires moving some aarch64-specific functionality for setting up the system's per-CPU timer out of the `interrupts` crate and into another crate. 2. Implement `SystemInterruptController::get()` for x86_64. * This is a bit challenging because there can be _multiple_ system-wide interrupt controllers (I/O APICs) on x86_64, so we can't simply implement a basic getter function. * We may need to offer an iterator interface, or perhaps a getter that takes the current CPU or NUMA domain or some kind of other identifier into account. * In the future, we'll move each `LocalInterruptController` instance into CPU-local storage to allow for very fast access. --- Cargo.lock | 2 + kernel/acpi/madt/src/lib.rs | 4 +- kernel/acpi/src/lib.rs | 7 - kernel/captain/src/lib.rs | 15 +- kernel/device_manager/src/lib.rs | 7 +- kernel/interrupt_controller/Cargo.toml | 17 +- kernel/interrupt_controller/src/aarch64.rs | 228 ++++++++++++++------- kernel/interrupt_controller/src/lib.rs | 120 +++++------ kernel/interrupt_controller/src/x86_64.rs | 99 ++++----- kernel/interrupts/src/aarch64/mod.rs | 52 +++-- kernel/ioapic/src/lib.rs | 21 +- kernel/serial_port/src/lib.rs | 19 +- 12 files changed, 318 insertions(+), 273 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5998222a77..15d303f40d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1573,12 +1573,14 @@ dependencies = [ name = "interrupt_controller" version = "0.1.0" dependencies = [ + "acpi", "apic", "arm_boards", "cpu", "gic", "ioapic", "log", + "madt", "memory", "spin 0.9.4", "sync_irq", diff --git a/kernel/acpi/madt/src/lib.rs b/kernel/acpi/madt/src/lib.rs index 3a8437599e..eceb42b681 100644 --- a/kernel/acpi/madt/src/lib.rs +++ b/kernel/acpi/madt/src/lib.rs @@ -355,7 +355,7 @@ fn handle_bsp_lapic_entry(madt_iter: MadtIter, page_table: &mut PageTable) -> Re // Redirect every IoApic's interrupts to the one BSP. // TODO: long-term, we should distribute interrupts across CPUs more evenly. - for (_ioapic_id, ioapic) in ioapic::get_ioapics().iter() { + for (_ioapic_id, ioapic) in ioapic::get_ioapics() { let mut ioapic_ref = ioapic.lock(); // Set the BSP to receive regular PIC interrupts routed through the IoApic. @@ -380,7 +380,7 @@ fn handle_bsp_lapic_entry(madt_iter: MadtIter, page_table: &mut PageTable) -> Re let mut handled = false; // find the IoApic that should handle this interrupt source override entry - for (_id, ioapic) in ioapic::get_ioapics().iter() { + for (_id, ioapic) in ioapic::get_ioapics() { let mut ioapic_ref = ioapic.lock(); if ioapic_ref.handles_irq(int_src.gsi) { // using BSP for now, but later we could redirect the IRQ to more (or all) cores diff --git a/kernel/acpi/src/lib.rs b/kernel/acpi/src/lib.rs index 25f27f4d24..12934fae32 100644 --- a/kernel/acpi/src/lib.rs +++ b/kernel/acpi/src/lib.rs @@ -86,13 +86,6 @@ pub fn init(rsdp_address: Option, page_table: &mut PageTable) - warn!("This machine has no HPET."); } }; - - // MADT is mandatory - { - let acpi_tables = ACPI_TABLES.lock(); - let madt = madt::Madt::get(&acpi_tables).ok_or("The required MADT ACPI table wasn't found (signature 'APIC')")?; - madt.bsp_init(page_table)?; - } // If we have a DMAR table, use it to obtain IOMMU info. { diff --git a/kernel/captain/src/lib.rs b/kernel/captain/src/lib.rs index ced3b62b5b..56998a4bb2 100644 --- a/kernel/captain/src/lib.rs +++ b/kernel/captain/src/lib.rs @@ -93,19 +93,17 @@ pub fn init( log::warn!("Couldn't get TSC period"); } - // now we initialize early driver stuff, like APIC/ACPI - // arch-gate: device_manager currently detects PCI & PS2 devices, - // which are unsupported on aarch64 at this point + // Initialize early devices, which currently only includes ACPI (x86-specific). #[cfg(target_arch = "x86_64")] device_manager::early_init(rsdp_address, kernel_mmi_ref.lock().deref_mut())?; - // initialize the rest of the BSP's interrupt stuff, including TSS & GDT + // Initialize local and system-wide interrupt controllers. + interrupt_controller::init(&kernel_mmi_ref)?; + + // Initialize other arch-specific interrupt stuff, e.g., basic interrupt handlers. // arch-gate: the IDT & special stacks are x86_64 specific #[cfg(target_arch = "x86_64")] let idt = { - // does nothing at the moment on x86_64 - interrupt_controller::init()?; - let (double_fault_stack, privilege_stack) = { let mut kernel_mmi = kernel_mmi_ref.lock(); ( @@ -119,9 +117,6 @@ pub fn init( }; #[cfg(target_arch = "aarch64")] { - // Initialize the GIC - interrupt_controller::init()?; - interrupts::init()?; irq_safety::enable_fast_interrupts(); diff --git a/kernel/device_manager/src/lib.rs b/kernel/device_manager/src/lib.rs index b0a806a80f..bf9dc46754 100644 --- a/kernel/device_manager/src/lib.rs +++ b/kernel/device_manager/src/lib.rs @@ -21,18 +21,13 @@ use { /// Performs early-stage initialization for simple devices needed during early boot. /// /// This includes: -/// * local APICs ([`apic`]), /// * [`acpi`] tables for system configuration info, including the IOAPIC. #[cfg(target_arch = "x86_64")] pub fn early_init( rsdp_address: Option, kernel_mmi: &mut MemoryManagementInfo ) -> Result<(), &'static str> { - // First, initialize the local APIC hardware such that we can populate - // and initialize each LocalAPIC discovered in the ACPI table initialization routine below. - apic::init(); - - // Then, parse the ACPI tables to acquire system configuration info. + // Parse the ACPI tables to acquire system configuration info. acpi::init(rsdp_address, &mut kernel_mmi.page_table)?; Ok(()) diff --git a/kernel/interrupt_controller/Cargo.toml b/kernel/interrupt_controller/Cargo.toml index 8d237fea46..3f3e834dc8 100644 --- a/kernel/interrupt_controller/Cargo.toml +++ b/kernel/interrupt_controller/Cargo.toml @@ -1,21 +1,26 @@ [package] -authors = ["Nathan Royer "] -description = "Cross-platform abstraction over interrupt controllers" +name = "interrupt_controller" version = "0.1.0" +authors = [ + "Nathan Royer ", + "Kevin Boos ", +] +description = "Cross-platform abstraction over interrupt controllers" edition = "2021" -name = "interrupt_controller" [dependencies] log = "0.4.8" cpu = { path = "../cpu" } +memory = { path = "../memory" } +spin = "0.9.4" +sync_irq = { path = "../../libs/sync_irq" } [target.'cfg(target_arch = "aarch64")'.dependencies] -sync_irq = { path = "../../libs/sync_irq" } arm_boards = { path = "../arm_boards" } -memory = { path = "../memory" } gic = { path = "../gic" } -spin = "0.9.4" [target.'cfg(target_arch = "x86_64")'.dependencies] +acpi = { path = "../acpi" } apic = { path = "../apic" } ioapic = { path = "../ioapic" } +madt = { path = "../acpi/madt" } diff --git a/kernel/interrupt_controller/src/aarch64.rs b/kernel/interrupt_controller/src/aarch64.rs index f2b3d7f63a..d4cd775081 100644 --- a/kernel/interrupt_controller/src/aarch64.rs +++ b/kernel/interrupt_controller/src/aarch64.rs @@ -21,18 +21,19 @@ pub struct SystemInterruptControllerId(pub u8); #[derive(Debug, Copy, Clone)] pub struct LocalInterruptControllerId(pub u16); -/// Per-CPU local interrupt controller +/// The list of all per-CPU local interrupt controllers on this system. /// /// To get the controller for a specific CPU: -/// a. Find the position of its CpuId in `BOARD_CONFIG.cpu_ids` -/// b. Index into this array using that position +/// 1. Find the position of its CpuId in `BOARD_CONFIG.cpu_ids` +/// 2. Index into this array using that position static LOCAL_INT_CTRL: Once<[LocalInterruptController; NUM_CPUS]> = Once::new(); -/// System-wide interrupt controller +/// The singleton instance of a system-wide interrupt controller. static SYSTEM_WIDE_INT_CTRL: Once = Once::new(); + /// Initializes the interrupt controller, on aarch64 -pub fn init() -> Result<(), &'static str> { +pub fn init(_kernel_mmi: &memory::MmiRef) -> Result<(), &'static str> { match BOARD_CONFIG.interrupt_controller { InterruptControllerConfig::GicV3(gicv3_cfg) => { let version = GicVersion::InitV3 { @@ -63,33 +64,31 @@ pub fn init() -> Result<(), &'static str> { Ok(()) } + /// Structure representing a top-level/system-wide interrupt controller chip, /// responsible for routing interrupts between peripherals and CPU cores. /// /// On aarch64 w/ GIC, this corresponds to the Distributor. pub struct SystemInterruptController(IrqSafeMutex); - -/// Struct representing per-cpu-core interrupt controller chips. -/// -/// On aarch64 w/ GIC, this corresponds to a Redistributor & CPU interface. -pub struct LocalInterruptController(UnsafeCell>); - -unsafe impl Send for LocalInterruptController {} -unsafe impl Sync for LocalInterruptController {} - -impl SystemInterruptControllerApi for SystemInterruptController { - fn get() -> &'static Self { - SYSTEM_WIDE_INT_CTRL.get().expect("interrupt_controller wasn't initialized") +impl SystemInterruptController { + /// Returns a reference to the single system-wide interrupt controller, + /// if it has been initialized. + pub fn get() -> Option<&'static SystemInterruptController> { + SYSTEM_WIDE_INT_CTRL.get() } +} +impl SystemInterruptControllerApi for SystemInterruptController { fn id(&self) -> SystemInterruptControllerId { - let dist = self.0.lock(); - SystemInterruptControllerId(dist.implementer().product_id) + SystemInterruptControllerId( + self.0.lock().implementer().product_id + ) } fn version(&self) -> SystemInterruptControllerVersion { - let dist = self.0.lock(); - SystemInterruptControllerVersion(dist.implementer().version) + SystemInterruptControllerVersion( + self.0.lock().implementer().version + ) } fn get_destination( @@ -129,7 +128,6 @@ impl SystemInterruptControllerApi for SystemInterruptController { }; let mut dist = self.0.lock(); - if let Some(destination) = destination { dist.set_spi_target(sys_int_num as _, SpiDestination::Specific(destination)); dist.set_spi_priority(sys_int_num as _, priority); @@ -141,47 +139,128 @@ impl SystemInterruptControllerApi for SystemInterruptController { } } + +/// Struct representing a per-CPU interrupt controller chip. +/// +/// On aarch64 w/ GIC, this corresponds to a Redistributor & CPU interface. +/// +/// ## Implementation note +/// The inner `ArmGicCpuComponents` object is wrapped in an `UnsafeCell`, +/// which allows us to access it within the context of a fast interrupt (FIQ). +/// This is unfortunately mandatory as there is no way to obtain a lock safely +/// or correctly from within a FIQ context, since they can interrupt other +/// normal interrupts at any time, even when regular interrupts are disabled. +pub struct LocalInterruptController(UnsafeCell>); +unsafe impl Send for LocalInterruptController {} +unsafe impl Sync for LocalInterruptController {} + +/// A macro to safely lock a `LocalInterruptController` instance. macro_rules! lock { ($this:ident) => (unsafe { $this.0.get().as_ref().unwrap().lock() }) } -impl LocalInterruptControllerApi for LocalInterruptController { - fn get() -> &'static Self { +impl LocalInterruptController { + /// Returns a reference to the current CPU's local interrupt controller, + /// if it has been initialized. + pub fn get() -> Option<&'static LocalInterruptController> { // how this function works: - // a. get the current CpuId: this CpuId of the current CPU - // b. iterate on all valid CpuIds, find the index of the current CpuId. - // This is used as a current CPU index. - // c. get the global array of interrupt controllers - // d. index into this array based on the current CPU index - - let cpu_id = current_cpu(); - // While we're waiting for cpu-local-storage, this loop will work as fine as an AtomicMap - let index = BOARD_CONFIG.cpu_ids.iter().position(|mpidr| cpu_id == (*mpidr).into()); - let index = index.expect("Invalid CpuId returned by current_cpu()"); - - let ctrls = LOCAL_INT_CTRL.get(); - let ctrls = ctrls.expect("interrupt_controller wasn't initialized"); - - &ctrls[index] + // 1. Get the current CPU's CpuId + // 2. Iterate over the static list of all valid CpuIds (from the board config) + // to find the position of the current CpuId in that list. + // 3. Use that position as an index into the array of local interrupt controllers. + + // Since we don't yet have the ability to store Local interrupt controllers + // in CPU-local storage, we use this `LOCAL_INT_CTRL` array instead. + if let Some(locals) = LOCAL_INT_CTRL.get() { + let cpu_id = current_cpu(); + let index = BOARD_CONFIG.cpu_ids.iter() + .position(|mpidr| cpu_id == (*mpidr).into()); + let index = index.expect("BUG: current_cpu() returned invalid CpuId"); + locals.get(index) + } else { + None + } } +} +impl LocalInterruptControllerApi for LocalInterruptController { fn id(&self) -> LocalInterruptControllerId { let cpu_ctrl = lock!(self); LocalInterruptControllerId(cpu_ctrl.get_cpu_interface_id()) } - fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority { - assert!(num < 32, "local interrupts have a number < 32"); - let cpu_ctrl = lock!(self); - cpu_ctrl.get_interrupt_priority(num as _) + fn enable_local_timer_interrupt(&self, enable: bool) { + todo!("invoke interrupts::enable_timer(enable)...") } - fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority) { - assert!(num < 32, "local interrupts have a number < 32"); + fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination) { + use InterruptDestination::*; + assert!(num < 16, "IPIs have a number < 16"); + + let dest = match dest { + SpecificCpu(cpu) => IpiTargetCpu::Specific(cpu), + AllOtherCpus => IpiTargetCpu::AllOtherCpus, + }; + let mut cpu_ctrl = lock!(self); - cpu_ctrl.set_interrupt_priority(num as _, priority); + + cpu_ctrl.send_ipi(num as _, dest, InterruptGroup::Group1); + } + + fn end_of_interrupt(&self, number: InterruptNumber) { + let mut cpu_ctrl = lock!(self); + cpu_ctrl.end_of_interrupt(number as _, InterruptGroup::Group1) } +} + + +/// Functionality for a local interrupt controller on aarch64 only. +pub trait AArch64LocalInterruptControllerApi { + fn is_local_interrupt_enabled(&self, num: InterruptNumber) -> bool; + fn enable_local_interrupt(&self, num: InterruptNumber, enabled: bool); + + fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority; + fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority); + + /// Same as [`enable_local_interrupt`], but for fast interrupts (FIQs). + fn enable_fast_local_interrupt(&self, num: InterruptNumber, enabled: bool); + + /// Same as [`LocalInterruptControllerApi::send_ipi`], but for fast interrupts (FIQs). + fn send_fast_ipi(&self, num: InterruptNumber, dest: InterruptDestination); + + /// Returns the minimum priority for an interrupt to reach this CPU. + fn get_minimum_priority(&self) -> Priority; + + /// Changes the minimum priority for an interrupt to reach this CPU. + fn set_minimum_priority(&self, priority: Priority); + + /// Returns the currently-pending interrupt number and priority. + fn acknowledge_interrupt(&self) -> Option<(InterruptNumber, Priority)>; + /// Aarch64-specific way to initialize the secondary CPU interfaces. + /// + /// Must be called once from every secondary CPU. + fn init_secondary_cpu_interface(&self); + + /// Same as [`Self::acknowledge_interrupt`] but for fast interrupts (FIQs) + /// + /// # Safety + /// + /// This is unsafe because it circumvents the internal Mutex. + /// It must only be used by the `interrupts` crate when handling an FIQ. + unsafe fn acknowledge_fast_interrupt(&self) -> Option<(InterruptNumber, Priority)>; + + /// Same as [`LocalInterruptControllerApi::end_of_interrupt`] but for fast interrupts (FIQs) + /// + /// # Safety + /// + /// This is unsafe because it circumvents the internal Mutex. + /// It must only be used by the `interrupts` crate when handling an FIQ. + unsafe fn end_of_fast_interrupt(&self, number: InterruptNumber); +} + + +impl AArch64LocalInterruptControllerApi for LocalInterruptController { fn is_local_interrupt_enabled(&self, num: InterruptNumber) -> bool { assert!(num < 32, "local interrupts have a number < 32"); let cpu_ctrl = lock!(self); @@ -205,27 +284,39 @@ impl LocalInterruptControllerApi for LocalInterruptController { cpu_ctrl.set_interrupt_state(num as _, state); } - fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination) { - use InterruptDestination::*; - assert!(num < 16, "IPIs have a number < 16"); + fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority { + assert!(num < 32, "local interrupts have a number < 32"); + let cpu_ctrl = lock!(self); + cpu_ctrl.get_interrupt_priority(num as _) + } - let dest = match dest { - SpecificCpu(cpu) => IpiTargetCpu::Specific(cpu), - AllOtherCpus => IpiTargetCpu::AllOtherCpus, - }; + fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority) { + assert!(num < 32, "local interrupts have a number < 32"); + let mut cpu_ctrl = lock!(self); + cpu_ctrl.set_interrupt_priority(num as _, priority); + } + + fn get_minimum_priority(&self) -> Priority { + let cpu_ctrl = lock!(self); + cpu_ctrl.get_minimum_priority() + } + fn set_minimum_priority(&self, priority: Priority) { let mut cpu_ctrl = lock!(self); + cpu_ctrl.set_minimum_priority(priority) + } - cpu_ctrl.send_ipi(num as _, dest, InterruptGroup::Group1); + fn acknowledge_interrupt(&self) -> Option<(InterruptNumber, Priority)> { + let mut cpu_ctrl = lock!(self); + let opt = cpu_ctrl.acknowledge_interrupt(InterruptGroup::Group1); + opt.map(|(num, prio)| (num as _, prio)) } - fn end_of_interrupt(&self, number: InterruptNumber) { + fn init_secondary_cpu_interface(&self) { let mut cpu_ctrl = lock!(self); - cpu_ctrl.end_of_interrupt(number as _, InterruptGroup::Group1) + cpu_ctrl.init_secondary_cpu_interface(); } -} -impl AArch64LocalInterruptControllerApi for LocalInterruptController { fn enable_fast_local_interrupt(&self, num: InterruptNumber, enabled: bool) { assert!(num < 32, "local interrupts have a number < 32"); let state = match enabled { @@ -250,27 +341,6 @@ impl AArch64LocalInterruptControllerApi for LocalInterruptController { cpu_ctrl.send_ipi(num as _, dest, InterruptGroup::Group0); } - fn get_minimum_priority(&self) -> Priority { - let cpu_ctrl = lock!(self); - cpu_ctrl.get_minimum_priority() - } - - fn set_minimum_priority(&self, priority: Priority) { - let mut cpu_ctrl = lock!(self); - cpu_ctrl.set_minimum_priority(priority) - } - - fn acknowledge_interrupt(&self) -> Option<(InterruptNumber, Priority)> { - let mut cpu_ctrl = lock!(self); - let opt = cpu_ctrl.acknowledge_interrupt(InterruptGroup::Group1); - opt.map(|(num, prio)| (num as _, prio)) - } - - fn init_secondary_cpu_interface(&self) { - let mut cpu_ctrl = lock!(self); - cpu_ctrl.init_secondary_cpu_interface(); - } - unsafe fn acknowledge_fast_interrupt(&self) -> Option<(InterruptNumber, Priority)> { // we cannot lock here // this has to be unsafe diff --git a/kernel/interrupt_controller/src/lib.rs b/kernel/interrupt_controller/src/lib.rs index 92d7f10e24..ffd6d28503 100644 --- a/kernel/interrupt_controller/src/lib.rs +++ b/kernel/interrupt_controller/src/lib.rs @@ -1,3 +1,5 @@ +//! Support for accessing interupt controllers across multiple architectures. + #![no_std] #![allow(unused_variables, unused_mut)] #![feature(array_try_from_fn)] @@ -6,14 +8,11 @@ extern crate alloc; use alloc::vec::Vec; use cpu::CpuId; +use memory::MmiRef; -#[cfg(target_arch = "aarch64")] -#[path = "aarch64.rs"] -pub mod arch; - -#[cfg(target_arch = "x86_64")] -#[path = "x86_64.rs"] -pub mod arch; +#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")] +#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")] +mod arch; pub use arch::{ SystemInterruptControllerVersion, @@ -22,12 +21,30 @@ pub use arch::{ Priority, SystemInterruptController, LocalInterruptController, - init, }; +#[cfg(target_arch = "aarch64")] +pub use arch::AArch64LocalInterruptControllerApi; + pub type InterruptNumber = u8; -/// The Cpu where this interrupt should be handled, as well as + +/// Initializes the interrupt controller(s) on this system. +/// +/// Depending on the architecture, this includes both the system-wide +/// interrupt controller(s) and one or more local (per-CPU) interrupt controllers. +/// * On x86_64 systems, this initializes the I/O APIC(s) (there may be more than one) +/// as the system-wide interrupt controller(s), and the Local APIC for the BSP +/// (bootstrap processor) only as the first local interrupt controller. +/// * Other Local APICs are initialized by those CPUs when they are brought online. +/// * On aarch64 systems with GIC, this initializes both the system-wide +/// interrupt controller (the GIC Distributor) as well as the local controllers +/// for all CPUs (their Redistributors and CPU interfaces). +pub fn init(kernel_mmi: &MmiRef) -> Result<(), &'static str> { + arch::init(kernel_mmi) +} + +/// The CPU where an interrupt should be handled, as well as /// the local interrupt number this gets translated to. /// /// On aarch64, there is no `local_number` field as the system interrupt @@ -38,79 +55,52 @@ pub enum InterruptDestination { AllOtherCpus, } +/// Functionality provided by system-wide interrupt controllers. +/// +/// Note that a system may actually have *multiple* system-wide interrupt controllers. +/// +/// * On x86_64, this corresponds to an I/O APIC (IOAPIC). +/// * On aarch64 (with GIC), this corresponds to the Distributor. pub trait SystemInterruptControllerApi { - fn get() -> &'static Self; - + /// Returns the unique ID of this system-wide interrupt controller. fn id(&self) -> SystemInterruptControllerId; + + /// Returns the version ID of this system-wide interrupt controller. fn version(&self) -> SystemInterruptControllerVersion; + /// Returns the destination(s) that the given `interrupt` is routed to + /// by this system-wide interrupt controller. fn get_destination( &self, - interrupt_num: InterruptNumber, + interrupt: InterruptNumber, ) -> Result<(Vec, Priority), &'static str>; - + + /// Routes the given `interrupt` to the given `destination` with the given `priority`. fn set_destination( &self, - sys_int_num: InterruptNumber, + interrupt: InterruptNumber, destination: Option, priority: Priority, ) -> Result<(), &'static str>; } +/// Functionality provided by local interrupt controllers, +/// which exist on a per-CPU basis. +/// +/// * On x86_64, this corresponds to a Local APIC. +/// * On aarch64 (with GIC), this corresponds to the GIC Redistributor + CPU interface. pub trait LocalInterruptControllerApi { - fn get() -> &'static Self; - + /// Returns the unique ID of this local interrupt controller. fn id(&self) -> LocalInterruptControllerId; - fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority; - fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority); - fn is_local_interrupt_enabled(&self, num: InterruptNumber) -> bool; - fn enable_local_interrupt(&self, num: InterruptNumber, enabled: bool); - - /// Sends an inter-processor interrupt. - /// - /// If `dest` is Some, the interrupt is sent to a specific CPU. - /// If it's None, all CPUs except the sender receive the interrupt. + + /// Enables or disables the local timer interrupt for this local interrupt controller. + fn enable_local_timer_interrupt(&self, enable: bool); + + /// Sends an inter-processor interrupt from this local interrupt controller + /// to the given destination. fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination); - /// Tell the interrupt controller that the current interrupt has been handled. + /// Tells this local interrupt controller that the interrupt being currently serviced + /// has been completely handled. fn end_of_interrupt(&self, number: InterruptNumber); } - -/// AArch64-specific methods of a local interrupt controller -pub trait AArch64LocalInterruptControllerApi { - /// Same as [`LocalInterruptControllerApi::enable_local_interrupt`] but for fast interrupts (FIQs). - fn enable_fast_local_interrupt(&self, num: InterruptNumber, enabled: bool); - - /// Same as [`LocalInterruptControllerApi::send_ipi`] but for fast interrupts (FIQs). - fn send_fast_ipi(&self, num: InterruptNumber, dest: InterruptDestination); - - /// Reads the minimum priority for an interrupt to reach this CPU. - fn get_minimum_priority(&self) -> Priority; - - /// Changes the minimum priority for an interrupt to reach this CPU. - fn set_minimum_priority(&self, priority: Priority); - - /// Aarch64-specific way to read the current pending interrupt number & priority. - fn acknowledge_interrupt(&self) -> Option<(InterruptNumber, Priority)>; - - /// Aarch64-specific way to initialize the secondary CPU interfaces. - /// - /// Must be called once from every secondary CPU. - fn init_secondary_cpu_interface(&self); - - /// Same as [`Self::acknowledge_interrupt`] but for fast interrupts (FIQs) - /// - /// # Safety - /// - /// This is unsafe because it circumvents the internal Mutex. - /// It must only be used by the `interrupts` crate when handling an FIQ. - unsafe fn acknowledge_fast_interrupt(&self) -> Option<(InterruptNumber, Priority)>; - - /// Same as [`LocalInterruptControllerApi::end_of_interrupt`] but for fast interrupts (FIQs) - /// - /// # Safety - /// - /// This is unsafe because it circumvents the internal Mutex. - /// It must only be used by the `interrupts` crate when handling an FIQ. - unsafe fn end_of_fast_interrupt(&self, number: InterruptNumber); -} diff --git a/kernel/interrupt_controller/src/x86_64.rs b/kernel/interrupt_controller/src/x86_64.rs index adbee306eb..349fa8cb02 100644 --- a/kernel/interrupt_controller/src/x86_64.rs +++ b/kernel/interrupt_controller/src/x86_64.rs @@ -1,9 +1,10 @@ use super::*; -use { - apic::{get_my_apic, LapicIpiDestination}, - ioapic::get_ioapic, -}; +use apic::{LocalApic, LapicIpiDestination}; +use ioapic::IoApic; +use madt::Madt; +use spin::Mutex; +use sync_irq::IrqSafeRwLock; #[derive(Debug, Copy, Clone)] pub struct SystemInterruptControllerVersion(pub u32); @@ -14,35 +15,38 @@ pub struct LocalInterruptControllerId(pub u32); #[derive(Debug, Copy, Clone)] pub struct Priority; -/// Initializes the interrupt controller (not yet used on x86) -pub fn init() -> Result<(), &'static str> { Ok(()) } +/// Initializes the interrupt controller(s), including the Local APIC for the BSP +/// (bootstrap processor) and the system-wide IOAPIC(s). +pub fn init(kernel_mmi: &memory::MmiRef) -> Result<(), &'static str> { + apic::init(); + + // Use the MADT ACPI table to initialize more interrupt controller details. + { + let acpi_tables = acpi::get_acpi_tables().lock(); + let madt = Madt::get(&acpi_tables) + .ok_or("The required MADT ACPI table wasn't found (signature 'APIC')")?; + madt.bsp_init(&mut kernel_mmi.lock().page_table)?; + } + + Ok(()) +} /// Structure representing a top-level/system-wide interrupt controller chip, /// responsible for routing interrupts between peripherals and CPU cores. /// /// On x86_64, this corresponds to an IoApic. -pub struct SystemInterruptController { - id: u8, -} +pub struct SystemInterruptController(&'static Mutex); -/// Struct representing per-cpu-core interrupt controller chips. -/// -/// On x86_64, this corresponds to a LocalApic. -pub struct LocalInterruptController; +// TODO: implement `SystemInterruptController::get()` for IOAPIC, +// but it needs to be able to handle multiple IOAPICs. impl SystemInterruptControllerApi for SystemInterruptController { - fn get() -> &'static Self { - unimplemented!() - } - fn id(&self) -> SystemInterruptControllerId { - let mut int_ctlr = get_ioapic(self.id).expect("BUG: id(): get_ioapic() returned None"); - SystemInterruptControllerId(int_ctlr.id()) + SystemInterruptControllerId(self.0.lock().id()) } fn version(&self) -> SystemInterruptControllerVersion { - let mut int_ctlr = get_ioapic(self.id).expect("BUG: version(): get_ioapic() returned None"); - SystemInterruptControllerVersion(int_ctlr.version()) + SystemInterruptControllerVersion(self.0.lock().version()) } fn get_destination( @@ -58,65 +62,50 @@ impl SystemInterruptControllerApi for SystemInterruptController { destination: Option, priority: Priority, ) -> Result<(), &'static str> { - let mut int_ctlr = get_ioapic(self.id).expect("BUG: set_destination(): get_ioapic() returned None"); - // no support for priority on x86_64 let _ = priority; if let Some(destination) = destination { - int_ctlr.set_irq(sys_int_num, destination.into(), sys_int_num) + self.0.lock().set_irq(sys_int_num, destination.into(), sys_int_num) } else { - Err("SystemInterruptController::set_destination: todo on x86: set the IOREDTBL MASK bit") + todo!("SystemInterruptController::set_destination: todo on x86: set the IOREDTBL MASK bit") } } } -impl LocalInterruptControllerApi for LocalInterruptController { - fn get() -> &'static Self { - unimplemented!() +/// Struct representing a per-CPU interrupt controller chip. +/// +/// On x86_64, this corresponds to a LocalApic. +pub struct LocalInterruptController(&'static IrqSafeRwLock); +impl LocalInterruptController { + /// Returns a reference to the current CPU's local interrupt controller, + /// if it has been initialized. + pub fn get() -> Option { + apic::get_my_apic().map(Self) } +} +impl LocalInterruptControllerApi for LocalInterruptController { fn id(&self) -> LocalInterruptControllerId { - let int_ctlr = get_my_apic().expect("BUG: id(): get_my_apic() returned None"); - let int_ctlr = int_ctlr.read(); - LocalInterruptControllerId(int_ctlr.processor_id()) + LocalInterruptControllerId(self.0.read().apic_id().value()) } - fn get_local_interrupt_priority(&self, num: InterruptNumber) -> Priority { - // No priority support on x86_64 - Priority - } - - fn set_local_interrupt_priority(&self, num: InterruptNumber, priority: Priority) { - // No priority support on x86_64 - let _ = priority; - } - - fn is_local_interrupt_enabled(&self, num: InterruptNumber) -> bool { - todo!() - } - - fn enable_local_interrupt(&self, num: InterruptNumber, enabled: bool) { - todo!() + fn enable_local_timer_interrupt(&self, enable: bool) { + self.0.write().enable_lvt_timer(enable) } fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination) { use InterruptDestination::*; - let mut int_ctlr = get_my_apic().expect("BUG: send_ipi(): get_my_apic() returned None"); - let mut int_ctlr = int_ctlr.write(); - int_ctlr.send_ipi(num, match dest { + self.0.write().send_ipi(num, match dest { SpecificCpu(cpu) => LapicIpiDestination::One(cpu.into()), AllOtherCpus => LapicIpiDestination::AllButMe, }); } fn end_of_interrupt(&self, _number: InterruptNumber) { - let mut int_ctlr = get_my_apic().expect("BUG: end_of_interrupt(): get_my_apic() returned None"); - let mut int_ctlr = int_ctlr.write(); - - // On x86, passing the number isn't required. - int_ctlr.eoi(); + // When using APIC, we don't need to pass in an IRQ number. + self.0.write().eoi(); } } diff --git a/kernel/interrupts/src/aarch64/mod.rs b/kernel/interrupts/src/aarch64/mod.rs index 3cae5f9fc4..11551d219c 100644 --- a/kernel/interrupts/src/aarch64/mod.rs +++ b/kernel/interrupts/src/aarch64/mod.rs @@ -17,7 +17,7 @@ use kernel_config::time::CONFIG_TIMESLICE_PERIOD_MICROSECONDS; use arm_boards::BOARD_CONFIG; use sync_irq::IrqSafeRwLock; use cpu::current_cpu; -use log::error; +use log::*; use spin::Once; use time::{Monotonic, ClockSource, Instant, Period, register_clock_source}; @@ -134,7 +134,8 @@ pub fn init_ap() { set_vbar_el1(); // Enable the CPU-local timer - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); int_ctrl.init_secondary_cpu_interface(); int_ctrl.set_minimum_priority(0); @@ -157,7 +158,8 @@ pub fn init() -> Result<(), &'static str> { set_vbar_el1(); - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); int_ctrl.set_minimum_priority(0); Ok(()) @@ -175,7 +177,8 @@ pub fn init_timer(timer_tick_handler: InterruptHandler) -> Result<(), &'static s // Route the IRQ to this core (implicit as IRQ < 32) & Enable the interrupt. { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); // enable routing of this interrupt int_ctrl.enable_local_interrupt(CPU_LOCAL_TIMER_IRQ, true); @@ -197,7 +200,8 @@ pub fn setup_ipi_handler(handler: InterruptHandler, local_num: InterruptNumber) } { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); // enable routing of this interrupt int_ctrl.enable_local_interrupt(local_num, true); @@ -219,16 +223,18 @@ pub fn setup_tlb_shootdown_handler(handler: InterruptHandler) -> Result<(), &'st { // enable this interrupt as a Fast interrupt (FIQ / Group 0 interrupt) - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); int_ctrl.enable_fast_local_interrupt(TLB_SHOOTDOWN_IPI, true); } Ok(()) } -/// Enables the PL011 "RX" SPI and routes it to the current CPU. +/// Enables the PL011 receive interrupt ("RX" SPI) and routes it to the current CPU. pub fn init_pl011_rx_interrupt() -> Result<(), &'static str> { - let int_ctrl = SystemInterruptController::get(); + let int_ctrl = SystemInterruptController::get() + .expect("SystemInterruptController was not yet initialized"); int_ctrl.set_destination(PL011_RX_SPI, Some(current_cpu()), u8::MAX) } @@ -250,13 +256,11 @@ pub fn enable_timer(enable: bool) { }) ); - /* DEBUGGING CODE - - info!("timer enabled: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ENABLE)); - info!("timer IMASK: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::IMASK)); - info!("timer status: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ISTATUS)); - - */ + if false { + info!("timer enabled: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ENABLE)); + info!("timer IMASK: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::IMASK)); + info!("timer status: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ISTATUS)); + } } /// Registers an interrupt handler at the given IRQ interrupt number. @@ -310,7 +314,8 @@ pub fn deregister_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> /// Broadcast an Inter-Processor Interrupt to all other CPU cores in the system pub fn broadcast_ipi(ipi_num: InterruptNumber) { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); int_ctrl.send_ipi(ipi_num, InterruptDestination::AllOtherCpus); } @@ -319,14 +324,16 @@ pub fn broadcast_ipi(ipi_num: InterruptNumber) { /// /// This IPI uses fast interrupts (FIQs) as an NMI alternative. pub fn broadcast_tlb_shootdown_ipi() { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); int_ctrl.send_fast_ipi(TLB_SHOOTDOWN_IPI, InterruptDestination::AllOtherCpus); } /// Send an "end of interrupt" signal, notifying the interrupt chip that /// the given interrupt request `irq` has been serviced. pub fn eoi(irq_num: InterruptNumber) { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); int_ctrl.end_of_interrupt(irq_num); } @@ -446,7 +453,8 @@ extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) { #[no_mangle] extern "C" fn current_elx_irq(exc: &mut ExceptionContext) { let (irq_num, _priority) = { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); match int_ctrl.acknowledge_interrupt() { Some(irq_prio_tuple) => irq_prio_tuple, None /* spurious interrupt */ => return, @@ -475,7 +483,8 @@ extern "C" fn current_elx_irq(exc: &mut ExceptionContext) { #[no_mangle] extern "C" fn current_elx_fiq(exc: &mut ExceptionContext) { let (irq_num, _priority) = { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); let ack = unsafe { int_ctrl.acknowledge_fast_interrupt() }; match ack { Some(irq_prio_tuple) => irq_prio_tuple, @@ -488,7 +497,8 @@ extern "C" fn current_elx_fiq(exc: &mut ExceptionContext) { if let Some(result) = result { if result == EoiBehaviour::HandlerDidNotSendEoi { - let int_ctrl = LocalInterruptController::get(); + let int_ctrl = LocalInterruptController::get() + .expect("LocalInterruptController was not yet initialized"); unsafe { int_ctrl.end_of_fast_interrupt(irq_num) }; } } else { diff --git a/kernel/ioapic/src/lib.rs b/kernel/ioapic/src/lib.rs index aba05eeb1e..5a787fd382 100644 --- a/kernel/ioapic/src/lib.rs +++ b/kernel/ioapic/src/lib.rs @@ -1,11 +1,11 @@ #![no_std] use log::debug; -use spin::{Mutex, MutexGuard}; +use spin::Mutex; use volatile::{Volatile, WriteOnly}; use zerocopy::FromBytes; use memory::{PageTable, PhysicalAddress, PteFlags, allocate_pages, allocate_frames_at, BorrowedMappedPages, Mutable}; -use atomic_linked_list::atomic_map::AtomicMap; +use atomic_linked_list::atomic_map::{AtomicMap, AtomicMapIter}; use apic::ApicId; @@ -14,24 +14,17 @@ use apic::ApicId; static IOAPICS: AtomicMap> = AtomicMap::new(); -/// Returns a reference to the list of IoApics. -pub fn get_ioapics() -> &'static AtomicMap> { - &IOAPICS +/// Returns an iterator over the list of `IoApic`s. +pub fn get_ioapics() -> AtomicMapIter<'static, u8, Mutex> { + IOAPICS.iter() } /// If an `IoApic` with the given `id` exists, then lock it (acquire its Mutex) /// and return the locked `IoApic`. -pub fn get_ioapic(ioapic_id: u8) -> Option> { - IOAPICS.get(&ioapic_id).map(|ioapic| ioapic.lock()) +pub fn get_ioapic(ioapic_id: u8) -> Option<&'static Mutex> { + IOAPICS.get(&ioapic_id) } -/// Returns the first `IoApic` that was created, if any, after locking it. -/// This is not necessarily the default one. -pub fn get_first_ioapic() -> Option> { - IOAPICS.iter().next().map(|(_id, ioapic)| ioapic.lock()) -} - - #[derive(FromBytes)] #[repr(C)] diff --git a/kernel/serial_port/src/lib.rs b/kernel/serial_port/src/lib.rs index cfa0f2b2f6..2c4eccc904 100644 --- a/kernel/serial_port/src/lib.rs +++ b/kernel/serial_port/src/lib.rs @@ -20,7 +20,7 @@ extern crate alloc; use log::{info, error, warn}; use alloc::format; -use deferred_interrupt_tasks::{InterruptRegistrationError}; +use deferred_interrupt_tasks::InterruptRegistrationError; pub use serial_port_basic::{ SerialPortAddress, SerialPortInterruptEvent, @@ -96,7 +96,7 @@ pub fn init_serial_port( let (int_num, int_handler) = interrupt_number_handler(&serial_port_address); #[cfg(target_arch = "aarch64")] - let (int_num, int_handler) = (PL011_RX_SPI, com1_com3_interrupt_handler); + let (int_num, int_handler) = (PL011_RX_SPI, primary_serial_port_interrupt_handler); SerialPort::register_interrupt_handler(sp.clone(), int_num, int_handler).unwrap(); @@ -127,8 +127,8 @@ fn interrupt_number_handler( ) -> (InterruptNumber, InterruptHandler) { use interrupts::IRQ_BASE_OFFSET; match serial_port_address { - SerialPortAddress::COM1 | SerialPortAddress::COM3 => (IRQ_BASE_OFFSET + 0x04, com1_com3_interrupt_handler), - SerialPortAddress::COM2 | SerialPortAddress::COM4 => (IRQ_BASE_OFFSET + 0x03, com2_com4_interrupt_handler), + SerialPortAddress::COM1 | SerialPortAddress::COM3 => (IRQ_BASE_OFFSET + 0x04, primary_serial_port_interrupt_handler), + SerialPortAddress::COM2 | SerialPortAddress::COM4 => (IRQ_BASE_OFFSET + 0x03, secondary_serial_port_interrupt_handler), } } @@ -384,8 +384,11 @@ static INTERRUPT_ACTION_COM1_COM3: Once> = Once::new static INTERRUPT_ACTION_COM2_COM4: Once> = Once::new(); -// Cross-platform interrupt handler for COM1 and COM3 (IRQ 0x24 on x86_64). -interrupt_handler!(com1_com3_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x4), _stack_frame, { +// Cross-platform interrupt handler for the primary serial port. +// +// * On x86_64, this is IRQ 0x24, used for COM1 and COM3 serial ports. +// * On aarch64, this is interrupt 0x21, used for the PL011 UART serial port. +interrupt_handler!(primary_serial_port_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x4), _stack_frame, { // log::trace!("COM1/COM3 serial handler"); #[cfg(target_arch = "aarch64")] { @@ -401,8 +404,8 @@ interrupt_handler!(com1_com3_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET EoiBehaviour::HandlerDidNotSendEoi }); -// Cross-platform interrupt handler for COM2 and COM4 (IRQ 0x24 on 0x23). -interrupt_handler!(com2_com4_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x3), _stack_frame, { +// Cross-platform interrupt handler, only used on x86_64 for COM2 and COM4 (IRQ 0x23). +interrupt_handler!(secondary_serial_port_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x3), _stack_frame, { // trace!("COM2/COM4 serial handler"); if let Some(func) = INTERRUPT_ACTION_COM2_COM4.get() { func() From dfa1e3677d49f0119978eeb86d368dfa25e2780a Mon Sep 17 00:00:00 2001 From: Niklas Date: Sat, 14 Oct 2023 20:39:48 +0200 Subject: [PATCH 10/25] Explicitly set locale in Makefile (#1055) Set environment variable `LANG="C.UTF-8"`, which ensures that building and running Theseus on non-English locales will succeed. Closes #1053. --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 79ba925199..d6d1d219f1 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,12 @@ SHELL := /bin/bash ## Cargo already handles build parallelism for us anyway. .NOTPARALLEL: +## Override the locale as building on non-English systems may fail. +## Or even worse: it might build, but not boot. +## Overriding LC_ALL instead throws bash warnings. +## C.UTF-8 should be available on all modern glibc systems. +export override LANG="C.UTF-8" + ## most of the variables used below are defined in Config.mk include cfg/Config.mk From 8d4007a8b169c098125cbcd56cfd0b3f00d93f43 Mon Sep 17 00:00:00 2001 From: Niklas Date: Mon, 16 Oct 2023 18:38:14 +0200 Subject: [PATCH 11/25] Fix README instructions for creating a bootable USB image --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d0cd763e73..755699159f 100644 --- a/README.md +++ b/README.md @@ -240,7 +240,7 @@ make view-book ## for the Theseus book We have tested Theseus on a variety of real machines, including Intel NUC devices, various Thinkpad laptops, and Supermicro servers. Currently, we have only tested booting Theseus via USB or PXE using a traditional BIOS bootloader rather than UEFI, but UEFI is fully supported so it should work. -To boot over USB, simply run `make boot usb=sdc`, in which `sdc` is the device node for the USB disk itself *(**not a partition** like sdc2)* to which you want to write the OS image. +To boot over USB, simply run `make usb drive=sdc`, in which `sdc` is the device node for the USB disk itself *(**not a partition** like sdc2)* to which you want to write the OS image. On WSL or other host environments where `/dev` device nodes don't exist, you can simply run `make iso` and burn the `.iso` file in the `build/` directory to a USB, e.g., using [Rufus](https://rufus.ie/) on Windows. To boot Theseus over PXE (network boot), see [this set of separate instructions](https://theseus-os.github.io/Theseus/book/running/pxe.html). From 38fbae04df455f4283c404b5af3f94d315c5e00f Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Tue, 17 Oct 2023 02:33:38 +0800 Subject: [PATCH 12/25] aarch64: move generic system timer to its own crate (#1054) * `LocalInterruptController::enable_timer_interrupt()` now uses this generic timer crate to manage the CPU-local timer interrupt. * Enable/disable the generic system timer and its interrupt together. * Refactor code that was originally in the `interrupts` crate by moving it into the most relevant crate, e.g., timeslice and preemptive scheduling interrupt stuff is now in `scheduler` where it belongs. --- Cargo.lock | 18 ++++- kernel/captain/src/lib.rs | 1 + kernel/generic_timer_aarch64/Cargo.toml | 17 ++++ kernel/generic_timer_aarch64/src/lib.rs | 89 ++++++++++++++++++++ kernel/interrupt_controller/Cargo.toml | 1 + kernel/interrupt_controller/src/aarch64.rs | 2 +- kernel/interrupts/Cargo.toml | 2 +- kernel/interrupts/src/aarch64/mod.rs | 94 ++++------------------ kernel/interrupts/src/x86_64/mod.rs | 6 +- kernel/scheduler/Cargo.toml | 7 +- kernel/scheduler/src/lib.rs | 37 ++++++--- kernel/sleep/src/lib.rs | 3 +- 12 files changed, 179 insertions(+), 98 deletions(-) create mode 100644 kernel/generic_timer_aarch64/Cargo.toml create mode 100644 kernel/generic_timer_aarch64/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 15d303f40d..c3b9f24bad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1357,6 +1357,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "generic_timer_aarch64" +version = "0.1.0" +dependencies = [ + "cortex-a", + "derive_more", + "log", + "memory_structs", + "time", + "tock-registers", +] + [[package]] name = "getopts" version = "0.2.21" @@ -1577,6 +1589,7 @@ dependencies = [ "apic", "arm_boards", "cpu", + "generic_timer_aarch64", "gic", "ioapic", "log", @@ -1597,6 +1610,7 @@ dependencies = [ "early_printer", "exceptions_early", "gdt", + "generic_timer_aarch64", "gic", "interrupt_controller", "kernel_config", @@ -1606,7 +1620,6 @@ dependencies = [ "pic", "spin 0.9.4", "sync_irq", - "time", "tock-registers", "tss", "x86_64", @@ -3115,9 +3128,12 @@ version = "0.1.0" dependencies = [ "cfg-if 1.0.0", "cpu", + "generic_timer_aarch64", "interrupts", + "kernel_config", "log", "sleep", + "spin 0.9.4", "task", "x86_64", ] diff --git a/kernel/captain/src/lib.rs b/kernel/captain/src/lib.rs index 56998a4bb2..e65ad2fd62 100644 --- a/kernel/captain/src/lib.rs +++ b/kernel/captain/src/lib.rs @@ -98,6 +98,7 @@ pub fn init( device_manager::early_init(rsdp_address, kernel_mmi_ref.lock().deref_mut())?; // Initialize local and system-wide interrupt controllers. + // TODO: move this into `interrupts::init()`. interrupt_controller::init(&kernel_mmi_ref)?; // Initialize other arch-specific interrupt stuff, e.g., basic interrupt handlers. diff --git a/kernel/generic_timer_aarch64/Cargo.toml b/kernel/generic_timer_aarch64/Cargo.toml new file mode 100644 index 0000000000..dbaccc7be8 --- /dev/null +++ b/kernel/generic_timer_aarch64/Cargo.toml @@ -0,0 +1,17 @@ +[package] +authors = [ + "Nathan Royer ", + "Kevin Boos + +#![no_std] +#![feature(negative_impls)] + +// This crate is only relevant on aarch64 systems, +// but we use a cfg gate here to allow it to be included in x86 builds +// because the build system currently builds _all_ crates for x86. +#[cfg(target_arch = "aarch64")] +pub use aarch64::*; + +#[cfg(target_arch = "aarch64")] +mod aarch64 { + +use cortex_a::registers::*; +use log::*; +use time::{Monotonic, ClockSource, Instant, Period, register_clock_source}; +use tock_registers::interfaces::Writeable; +use tock_registers::interfaces::Readable; + + +/// Initializes the aarch64 generic system timer +/// and registers it as a monotonic [`ClockSource`]. +/// +/// This only needs to be invoked once, system-wide. +/// However, each CPU will need to enable their own timer interrupt separately, +/// as this function itself does not enable the timer interrupt. +pub fn init() { + let period = Period::new(timer_period_femtoseconds()); + register_clock_source::(period); +} + +/// A ClockSource for the time crate, implemented using +/// the System Counter of the Generic Arm Timer. The +/// period of this timer is computed in `init` above. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub struct PhysicalSystemCounter; +impl !Send for PhysicalSystemCounter { } +impl PhysicalSystemCounter { + /// An instant (no-op) function that returns the current CPU's system counter. + pub fn get() -> Self { + Self + } +} +impl ClockSource for PhysicalSystemCounter { + type ClockType = Monotonic; + + fn now() -> Instant { + Instant::new(CNTPCT_EL0.get()) + } +} + +/// Returns the period in femtoseconds of the generic system timer. +/// +/// This reads the `CNTFRQ_EL0` system register. +pub fn timer_period_femtoseconds() -> u64 { + let counter_freq_hz = CNTFRQ_EL0.get(); + let fs_in_one_sec = 1_000_000_000_000_000; + fs_in_one_sec / counter_freq_hz +} + +/// Sets the current CPU's system timer interrupt to fire after `ticks_to_elapse` from now. +pub fn set_next_timer_interrupt(ticks_to_elapse: u64) { + enable_timer_interrupt(false); + CNTP_TVAL_EL0.set(ticks_to_elapse); + enable_timer_interrupt(true); +} + +/// Enables/disables the generic system timer interrupt on the current CPU. +/// +/// This writes the `CNTP_CTL_EL0` system register. +pub fn enable_timer_interrupt(enable: bool) { + // Unmask the interrupt (to enable it), and enable the timer. + CNTP_CTL_EL0.write( + CNTP_CTL_EL0::IMASK.val(!enable as u64) + + CNTP_CTL_EL0::ENABLE.val(enable as u64) + ); + + if false { + info!("timer enabled: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ENABLE)); + info!("timer IMASK: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::IMASK)); + info!("timer status: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ISTATUS)); + } +} + +} diff --git a/kernel/interrupt_controller/Cargo.toml b/kernel/interrupt_controller/Cargo.toml index 3f3e834dc8..7990ea6c22 100644 --- a/kernel/interrupt_controller/Cargo.toml +++ b/kernel/interrupt_controller/Cargo.toml @@ -17,6 +17,7 @@ sync_irq = { path = "../../libs/sync_irq" } [target.'cfg(target_arch = "aarch64")'.dependencies] arm_boards = { path = "../arm_boards" } +generic_timer_aarch64 = { path = "../generic_timer_aarch64" } gic = { path = "../gic" } [target.'cfg(target_arch = "x86_64")'.dependencies] diff --git a/kernel/interrupt_controller/src/aarch64.rs b/kernel/interrupt_controller/src/aarch64.rs index d4cd775081..5c129b9522 100644 --- a/kernel/interrupt_controller/src/aarch64.rs +++ b/kernel/interrupt_controller/src/aarch64.rs @@ -190,7 +190,7 @@ impl LocalInterruptControllerApi for LocalInterruptController { } fn enable_local_timer_interrupt(&self, enable: bool) { - todo!("invoke interrupts::enable_timer(enable)...") + generic_timer_aarch64::enable_timer_interrupt(enable) } fn send_ipi(&self, num: InterruptNumber, dest: InterruptDestination) { diff --git a/kernel/interrupts/Cargo.toml b/kernel/interrupts/Cargo.toml index b0acfca1ba..75cf620483 100644 --- a/kernel/interrupts/Cargo.toml +++ b/kernel/interrupts/Cargo.toml @@ -17,10 +17,10 @@ spin = "0.9.4" sync_irq = { path = "../../libs/sync_irq" } arm_boards = { path = "../arm_boards" } kernel_config = { path = "../kernel_config" } +generic_timer_aarch64 = { path = "../generic_timer_aarch64" } gic = { path = "../gic" } tock-registers = "0.7.0" cortex-a = "7.5.0" -time = { path = "../time" } [target.'cfg(target_arch = "x86_64")'.dependencies] exceptions_early = { path = "../exceptions_early" } diff --git a/kernel/interrupts/src/aarch64/mod.rs b/kernel/interrupts/src/aarch64/mod.rs index 11551d219c..abe9dd6ae2 100644 --- a/kernel/interrupts/src/aarch64/mod.rs +++ b/kernel/interrupts/src/aarch64/mod.rs @@ -13,14 +13,10 @@ use interrupt_controller::{ LocalInterruptController, SystemInterruptController, InterruptDestination, LocalInterruptControllerApi, AArch64LocalInterruptControllerApi, SystemInterruptControllerApi, }; -use kernel_config::time::CONFIG_TIMESLICE_PERIOD_MICROSECONDS; use arm_boards::BOARD_CONFIG; use sync_irq::IrqSafeRwLock; use cpu::current_cpu; use log::*; -use spin::Once; - -use time::{Monotonic, ClockSource, Instant, Period, register_clock_source}; pub use interrupt_controller::InterruptNumber; @@ -98,23 +94,6 @@ fn default_exception_handler(exc: &ExceptionContext, origin: &'static str) { loop { core::hint::spin_loop() } } -fn read_timer_period_femtoseconds() -> u64 { - let counter_freq_hz = CNTFRQ_EL0.get(); - let fs_in_one_sec = 1_000_000_000_000_000; - fs_in_one_sec / counter_freq_hz -} - -fn get_timeslice_ticks() -> u64 { - // The number of femtoseconds between each internal timer tick - static TIMESLICE_TICKS: Once = Once::new(); - - *TIMESLICE_TICKS.call_once(|| { - let timeslice_femtosecs = (CONFIG_TIMESLICE_PERIOD_MICROSECONDS as u64) * 1_000_000_000; - let tick_period_femtosecs = read_timer_period_femtoseconds(); - timeslice_femtosecs / tick_period_femtosecs - }) -} - /// Sets `VBAR_EL1` to the start of the exception vector fn set_vbar_el1() { extern "Rust" { @@ -133,41 +112,40 @@ fn set_vbar_el1() { pub fn init_ap() { set_vbar_el1(); - // Enable the CPU-local timer let int_ctrl = LocalInterruptController::get() .expect("LocalInterruptController was not yet initialized"); int_ctrl.init_secondary_cpu_interface(); int_ctrl.set_minimum_priority(0); - // on the bootstrap CPU, this is done in setup_tlb_shootdown_handler + // Enable the TLB shootdown IPI to be delivered to this CPU. + // On the bootstrap CPU, this is done in `setup_tlb_shootdown_handler()`. int_ctrl.enable_fast_local_interrupt(TLB_SHOOTDOWN_IPI, true); - // on the bootstrap CPU, this is done in init_timer + // Enable the CPU-local timer interrupt to be delivered to this CPU. + // On the bootstrap CPU, this is done in `setup_timer_interrupt()`. int_ctrl.enable_local_interrupt(CPU_LOCAL_TIMER_IRQ, true); - enable_timer(true); + generic_timer_aarch64::enable_timer_interrupt(true); } -/// Please call this (only once) before using this crate. +/// Initializes the generic system timer and the system-wide list of interrupt handlers. /// -/// This initializes the Generic Interrupt Controller -/// using the addresses which are valid on qemu's "virt" VM. +/// This only needs to be invoked once, system-wide. pub fn init() -> Result<(), &'static str> { - let period = Period::new(read_timer_period_femtoseconds()); - register_clock_source::(period); - + generic_timer_aarch64::init(); set_vbar_el1(); + // TODO: see note in captain::init(): just call interrupt_controller::init() here directly. + let int_ctrl = LocalInterruptController::get() .expect("LocalInterruptController was not yet initialized"); int_ctrl.set_minimum_priority(0); - Ok(()) } -/// This function registers an interrupt handler for the CPU-local -/// timer and handles interrupt controller configuration for the timer interrupt. -pub fn init_timer(timer_tick_handler: InterruptHandler) -> Result<(), &'static str> { +/// Registers an interrupt handler for the CPU-local timer +/// and handles interrupt controller configuration for that timer interrupt. +pub fn setup_timer_interrupt(timer_tick_handler: InterruptHandler) -> Result<(), &'static str> { // register/deregister the handler for the timer IRQ. if let Err(existing_handler) = register_interrupt(CPU_LOCAL_TIMER_IRQ, timer_tick_handler) { if timer_tick_handler as *const InterruptHandler != existing_handler { @@ -178,7 +156,7 @@ pub fn init_timer(timer_tick_handler: InterruptHandler) -> Result<(), &'static s // Route the IRQ to this core (implicit as IRQ < 32) & Enable the interrupt. { let int_ctrl = LocalInterruptController::get() - .expect("LocalInterruptController was not yet initialized"); + .ok_or("LocalInterruptController was not yet initialized")?; // enable routing of this interrupt int_ctrl.enable_local_interrupt(CPU_LOCAL_TIMER_IRQ, true); @@ -201,8 +179,7 @@ pub fn setup_ipi_handler(handler: InterruptHandler, local_num: InterruptNumber) { let int_ctrl = LocalInterruptController::get() - .expect("LocalInterruptController was not yet initialized"); - + .ok_or("LocalInterruptController was not yet initialized")?; // enable routing of this interrupt int_ctrl.enable_local_interrupt(local_num, true); } @@ -224,7 +201,7 @@ pub fn setup_tlb_shootdown_handler(handler: InterruptHandler) -> Result<(), &'st { // enable this interrupt as a Fast interrupt (FIQ / Group 0 interrupt) let int_ctrl = LocalInterruptController::get() - .expect("LocalInterruptController was not yet initialized"); + .ok_or("LocalInterruptController was not yet initialized")?; int_ctrl.enable_fast_local_interrupt(TLB_SHOOTDOWN_IPI, true); } @@ -234,34 +211,10 @@ pub fn setup_tlb_shootdown_handler(handler: InterruptHandler) -> Result<(), &'st /// Enables the PL011 receive interrupt ("RX" SPI) and routes it to the current CPU. pub fn init_pl011_rx_interrupt() -> Result<(), &'static str> { let int_ctrl = SystemInterruptController::get() - .expect("SystemInterruptController was not yet initialized"); + .ok_or("SystemInterruptController was not yet initialized")?; int_ctrl.set_destination(PL011_RX_SPI, Some(current_cpu()), u8::MAX) } -/// Disables the timer, schedules its next tick, and re-enables it -pub fn schedule_next_timer_tick() { - enable_timer(false); - CNTP_TVAL_EL0.set(get_timeslice_ticks()); - enable_timer(true); -} - -/// Enables/Disables the System Timer via the dedicated Arm System Registers -pub fn enable_timer(enable: bool) { - // unmask the interrupt & enable the timer - CNTP_CTL_EL0.write( - CNTP_CTL_EL0::IMASK.val(0) - + CNTP_CTL_EL0::ENABLE.val(match enable { - true => 1, - false => 0, - }) - ); - - if false { - info!("timer enabled: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ENABLE)); - info!("timer IMASK: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::IMASK)); - info!("timer status: {:?}", CNTP_CTL_EL0.read(CNTP_CTL_EL0::ISTATUS)); - } -} /// Registers an interrupt handler at the given IRQ interrupt number. /// @@ -337,19 +290,6 @@ pub fn eoi(irq_num: InterruptNumber) { int_ctrl.end_of_interrupt(irq_num); } -// A ClockSource for the time crate, implemented using -// the System Counter of the Generic Arm Timer. The -// period of this timer is computed in `init` above. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -struct PhysicalSystemCounter; - -impl ClockSource for PhysicalSystemCounter { - type ClockType = Monotonic; - - fn now() -> Instant { - Instant::new(CNTPCT_EL0.get()) - } -} #[rustfmt::skip] impl fmt::Debug for SpsrEL1 { diff --git a/kernel/interrupts/src/x86_64/mod.rs b/kernel/interrupts/src/x86_64/mod.rs index b052f775d8..0744dde3b1 100644 --- a/kernel/interrupts/src/x86_64/mod.rs +++ b/kernel/interrupts/src/x86_64/mod.rs @@ -307,8 +307,7 @@ pub fn eoi(irq: Option) { extern "x86-interrupt" fn apic_spurious_interrupt_handler(_stack_frame: InterruptStackFrame) { warn!("APIC SPURIOUS INTERRUPT HANDLER!"); - - eoi(None); + eoi(Some(apic::APIC_SPURIOUS_INTERRUPT_IRQ)); } extern "x86-interrupt" fn unimplemented_interrupt_handler(_stack_frame: InterruptStackFrame) { @@ -335,8 +334,7 @@ extern "x86-interrupt" fn unimplemented_interrupt_handler(_stack_frame: Interrup } }; - // TODO: use const generics here to know which IRQ to send an EOI for (only needed for PIC). - eoi(None); + eoi(Some(0xFF)); } diff --git a/kernel/scheduler/Cargo.toml b/kernel/scheduler/Cargo.toml index b12521db3a..ec88901838 100644 --- a/kernel/scheduler/Cargo.toml +++ b/kernel/scheduler/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Kevin Boos "] name = "scheduler" -description = "Provides scheduling functionality for selecting the next task and causing a task switch" +description = "Provides basic scheduling functionality for preemptive task switching." version = "0.1.0" edition = "2018" @@ -16,3 +16,8 @@ task = { path = "../task" } [target.'cfg(target_arch = "x86_64")'.dependencies] x86_64 = "0.14.8" + +[target.'cfg(target_arch = "aarch64")'.dependencies] +generic_timer_aarch64 = { path = "../generic_timer_aarch64" } +kernel_config = { path = "../kernel_config" } +spin = "0.9.4" diff --git a/kernel/scheduler/src/lib.rs b/kernel/scheduler/src/lib.rs index dce7b756bb..6ea07bf0ea 100644 --- a/kernel/scheduler/src/lib.rs +++ b/kernel/scheduler/src/lib.rs @@ -41,8 +41,8 @@ pub fn init() -> Result<(), &'static str> { } #[cfg(target_arch = "aarch64")] { - interrupts::init_timer(timer_tick_handler)?; - interrupts::enable_timer(true); + interrupts::setup_timer_interrupt(timer_tick_handler)?; + generic_timer_aarch64::enable_timer_interrupt(true); Ok(()) } } @@ -50,7 +50,7 @@ pub fn init() -> Result<(), &'static str> { // Architecture-independent timer interrupt handler for preemptive scheduling. interrupt_handler!(timer_tick_handler, None, _stack_frame, { #[cfg(target_arch = "aarch64")] - interrupts::schedule_next_timer_tick(); + generic_timer_aarch64::set_next_timer_interrupt(get_timeslice_ticks()); // tick count, only used for debugging if false { @@ -64,17 +64,32 @@ interrupt_handler!(timer_tick_handler, None, _stack_frame, { // in order to unblock any tasks that are done sleeping. sleep::unblock_sleeping_tasks(); - // We must acknowledge the interrupt before the end of this handler + // We must acknowledge the interrupt *before* the end of this handler // because we switch tasks here, which doesn't return. - { - #[cfg(target_arch = "x86_64")] - eoi(None); // None, because IRQ 0x22 cannot possibly be a PIC interrupt - - #[cfg(target_arch = "aarch64")] - eoi(CPU_LOCAL_TIMER_IRQ); - } + #[cfg(target_arch = "x86_64")] + eoi(Some(CPU_LOCAL_TIMER_IRQ)); + #[cfg(target_arch = "aarch64")] + eoi(CPU_LOCAL_TIMER_IRQ); schedule(); EoiBehaviour::HandlerSentEoi }); + + +/// Returns the (cached) number of system timer ticks needed for the scheduling timeslice interval. +/// +/// This is only needed on aarch64 because it only effectively offers a one-shot timer; +/// x86_64 can be configured once as a recurring periodic timer. +#[cfg(target_arch = "aarch64")] +fn get_timeslice_ticks() -> u64 { + use kernel_config::time::CONFIG_TIMESLICE_PERIOD_MICROSECONDS; + + static TIMESLICE_TICKS: spin::Once = spin::Once::new(); + + *TIMESLICE_TICKS.call_once(|| { + let timeslice_femtosecs = (CONFIG_TIMESLICE_PERIOD_MICROSECONDS as u64) * 1_000_000_000; + let tick_period_femtosecs = generic_timer_aarch64::timer_period_femtoseconds(); + timeslice_femtosecs / tick_period_femtosecs + }) +} diff --git a/kernel/sleep/src/lib.rs b/kernel/sleep/src/lib.rs index 171c93d139..ca264cf267 100644 --- a/kernel/sleep/src/lib.rs +++ b/kernel/sleep/src/lib.rs @@ -110,8 +110,7 @@ fn remove_next_task_from_delayed_tasklist() { } } -/// Remove all tasks that have been delayed but are able to be unblocked now, -/// the current tick count is provided by the system's interrupt tick count. +/// Remove all tasks that have been delayed but are able to be unblocked now. pub fn unblock_sleeping_tasks() { let time = now::(); while time > NEXT_DELAYED_TASK_UNBLOCK_TIME.load() { From 8b27c1139eaf590ca2b8d79bcfacaa6c299a47a9 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Tue, 17 Oct 2023 04:43:23 +0800 Subject: [PATCH 13/25] Make `eoi` (end of interrupt) consistent across architectures (#1057) * Remove the `Option` from `eoi` signature on x86_64, since the active interrupt chip (PIC vs. APIC) will correctly ignore that value even if it is always provided. * This makes the `eoi` interface much simpler. * Augment the `interrupt_handler!()` macro to allow the IRQ number parameter to be `_`, in order to indicate that the handler doesn't care or doesn't need to actually provide an IRQ number for EOI. --- kernel/ata/src/lib.rs | 4 +-- kernel/e1000/src/lib.rs | 2 +- kernel/generic_timer_aarch64/src/lib.rs | 9 ++--- kernel/interrupts/src/aarch64/mod.rs | 6 ++-- kernel/interrupts/src/macro-doc.md | 46 ++++++++++--------------- kernel/interrupts/src/x86_64/mod.rs | 25 +++++++------- kernel/keyboard/src/lib.rs | 2 +- kernel/mouse/src/lib.rs | 2 +- kernel/pit_clock/src/lib.rs | 2 +- kernel/scheduler/src/lib.rs | 5 +-- kernel/serial_port/src/lib.rs | 4 +-- 11 files changed, 48 insertions(+), 59 deletions(-) diff --git a/kernel/ata/src/lib.rs b/kernel/ata/src/lib.rs index 356f2155b8..0742ca585d 100644 --- a/kernel/ata/src/lib.rs +++ b/kernel/ata/src/lib.rs @@ -894,13 +894,13 @@ const ATA_SECONDARY_IRQ: u8 = interrupts::IRQ_BASE_OFFSET + 0xF; /// The primary ATA interrupt handler. Not yet used for anything, but useful for DMA. extern "x86-interrupt" fn primary_ata_handler(_stack_frame: InterruptStackFrame ) { info!("Primary ATA Interrupt ({:#X})", ATA_PRIMARY_IRQ); - interrupts::eoi(Some(ATA_PRIMARY_IRQ)); + interrupts::eoi(ATA_PRIMARY_IRQ); } /// The primary ATA interrupt handler. Not yet used for anything, but useful for DMA. extern "x86-interrupt" fn secondary_ata_handler(_stack_frame: InterruptStackFrame ) { info!("Secondary ATA Interrupt ({:#X})", ATA_SECONDARY_IRQ); - interrupts::eoi(Some(ATA_SECONDARY_IRQ)); + interrupts::eoi(ATA_SECONDARY_IRQ); } diff --git a/kernel/e1000/src/lib.rs b/kernel/e1000/src/lib.rs index 1b40638159..715e470d00 100644 --- a/kernel/e1000/src/lib.rs +++ b/kernel/e1000/src/lib.rs @@ -462,7 +462,7 @@ extern "x86-interrupt" fn e1000_handler(_stack_frame: InterruptStackFrame) { if let Err(e) = e1000_nic.handle_interrupt() { error!("e1000_handler(): error handling interrupt: {:?}", e); } - eoi(Some(e1000_nic.interrupt_num)); + eoi(e1000_nic.interrupt_num); } else { error!("BUG: e1000_handler(): E1000 NIC hasn't yet been initialized!"); } diff --git a/kernel/generic_timer_aarch64/src/lib.rs b/kernel/generic_timer_aarch64/src/lib.rs index b8a21594f5..502bc6d0e4 100644 --- a/kernel/generic_timer_aarch64/src/lib.rs +++ b/kernel/generic_timer_aarch64/src/lib.rs @@ -69,14 +69,15 @@ pub fn set_next_timer_interrupt(ticks_to_elapse: u64) { enable_timer_interrupt(true); } -/// Enables/disables the generic system timer interrupt on the current CPU. +/// Enables/disables the generic system timer and its interrupt on the current CPU. /// /// This writes the `CNTP_CTL_EL0` system register. pub fn enable_timer_interrupt(enable: bool) { - // Unmask the interrupt (to enable it), and enable the timer. + // If enable: unmask the interrupt (set bit to 0), and enable the timer. + // If disable: mask the interrupt (set bit to 1), and disable the timer. CNTP_CTL_EL0.write( - CNTP_CTL_EL0::IMASK.val(!enable as u64) - + CNTP_CTL_EL0::ENABLE.val(enable as u64) + CNTP_CTL_EL0::ENABLE.val(enable as u64) + + CNTP_CTL_EL0::IMASK.val(!enable as u64) ); if false { diff --git a/kernel/interrupts/src/aarch64/mod.rs b/kernel/interrupts/src/aarch64/mod.rs index abe9dd6ae2..7d3936f2f5 100644 --- a/kernel/interrupts/src/aarch64/mod.rs +++ b/kernel/interrupts/src/aarch64/mod.rs @@ -54,13 +54,15 @@ struct SpsrEL1(InMemoryRegister); #[repr(transparent)] struct EsrEL1(InMemoryRegister); -#[cfg(target_arch = "aarch64")] #[macro_export] #[doc = include_str!("../macro-doc.md")] macro_rules! interrupt_handler { + ($name:ident, _, $stack_frame:ident, $code:block) => { + interrupt_handler!($name, 0, $stack_frame, $code); + }; ($name:ident, $x86_64_eoi_param:expr, $stack_frame:ident, $code:block) => { extern "C" fn $name($stack_frame: &$crate::InterruptStackFrame) -> $crate::EoiBehaviour $code - } + }; } /// The exception context as it is stored on the stack on exception entry. diff --git a/kernel/interrupts/src/macro-doc.md b/kernel/interrupts/src/macro-doc.md index 6f6ef58245..dfba84d425 100644 --- a/kernel/interrupts/src/macro-doc.md +++ b/kernel/interrupts/src/macro-doc.md @@ -2,52 +2,42 @@ Macro which helps writing cross-platform interrupt handlers. # Arguments -- `$name`: the name of the function -- `$x86_64_eoi_param`: `Some(irq_num)` if this interrupt can be handled while - the PIC chip is active and the handler returns `HandlerDidNotSendEoi`; `None` otherwise. - Ignored on `aarch64`. See [`eoi`] for more information. If the IRQ number isn't - constant and this interrupt can happen with the PIC chip active, call [`eoi`] - manually as in Example 2. +- `$name`: the name of the interrupt handler function. +- `$x86_64_eoi_param`: one of two possible values: + 1. the literal underscore character `_`, used to indicate that this value isn't used + and the interrupt handler does not care about its value. + * This is useful for interrupt handlers that are aarch64 specific + or can only occur on x86_64's newer APIC interrupt chips, which do not require + specifying a specific IRQ number when sending an end of interrupt (EOI). + 2. a valid [`InterruptNumber`] if this interrupt may be handled by the legacy PIC chip + on x86_64, which is used if the handler returns `HandlerDidNotSendEoi`. - `$stack_frame`: Name for the [`InterruptStackFrame`] parameter. -- `$code`: The code for the interrupt handler itself. It must return an [`crate::EoiBehaviour`] enum. +- `$code`: The code for the interrupt handler itself, which must return [`crate::EoiBehaviour`]. -# Example 1 +## Example 1 This simply logs the stack frame to the console. ```ignore -interrupt_handler!(my_int_0x29_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x9), stack_frame, { +interrupt_handler!(my_int_0x29_handler, interrupts::IRQ_BASE_OFFSET + 0x9, stack_frame, { trace!("my_int_0x29_handler running! stack frame: {:?}", stack_frame); - // loop {} - EoiBehaviour::HandlerDidNotSendEoi }); ``` -# Example 2 +## Example 2 -Here's how [`eoi`] can be called manually. Note how we can pass `None` as -`$x86_64_eoi_param`, since we call [`eoi`] in the handler. +Here's how [`eoi`] can be called manually. Note how we use `_` for the second parameter +in the macro (the `$x86_64_eoi_param`), since we call [`eoi`] in the handler. ```ignore -interrupt_handler!(my_int_0x29_handler, None, stack_frame, { +interrupt_handler!(my_int_0x29_handler, _, stack_frame, { trace!("my_int_0x29_handler running! stack frame: {:?}", stack_frame); - #[cfg(target_arch = "x86_64")] - let irq_num = 0x29; - - #[cfg(target_arch = "aarch64")] + // Call `eoi` manually. let irq_num = 0x29; - - // Calling `eoi` manually: - { - #[cfg(target_arch = "x86_64")] - eoi(Some(irq_num)); - - #[cfg(target_arch = "aarch64")] - eoi(irq_num); - } + eoi(irq_num); EoiBehaviour::HandlerSentEoi }); diff --git a/kernel/interrupts/src/x86_64/mod.rs b/kernel/interrupts/src/x86_64/mod.rs index 0744dde3b1..db9d0ed3dc 100644 --- a/kernel/interrupts/src/x86_64/mod.rs +++ b/kernel/interrupts/src/x86_64/mod.rs @@ -37,6 +37,9 @@ static RESERVED_IRQ_LIST: [u8; 3] = [ #[macro_export] #[doc = include_str!("../macro-doc.md")] macro_rules! interrupt_handler { + ($name:ident, _, $stack_frame:ident, $code:block) => { + interrupt_handler!($name, 0, $stack_frame, $code); + }; ($name:ident, $x86_64_eoi_param:expr, $stack_frame:ident, $code:block) => { extern "x86-interrupt" fn $name(sf: $crate::InterruptStackFrame) { let $stack_frame = &sf; @@ -44,7 +47,7 @@ macro_rules! interrupt_handler { $crate::eoi($x86_64_eoi_param); } } - } + }; } @@ -279,9 +282,9 @@ pub fn deregister_interrupt(interrupt_num: u8, func: InterruptHandler) -> Result /// This function supports all types of interrupt chips -- APIC, x2apic, PIC -- /// and will perform the correct EOI operation based on which chip is currently active. /// -/// The `irq` argument is only used if the `PIC` chip is active, -/// but it doesn't hurt to always provide it. -pub fn eoi(irq: Option) { +/// The `irq` argument is only used if the legacy `PIC` chip is active on this system; +/// newer APIC chips do not use this. +pub fn eoi(irq: InterruptNumber) { match INTERRUPT_CHIP.load() { InterruptChip::APIC | InterruptChip::X2APIC => { if let Some(my_apic) = apic::get_my_apic() { @@ -292,11 +295,7 @@ pub fn eoi(irq: Option) { } InterruptChip::PIC => { if let Some(_pic) = PIC.get() { - if let Some(irq) = irq { - _pic.notify_end_of_interrupt(irq); - } else { - error!("BUG: missing required IRQ argument for PIC EOI!"); - } + _pic.notify_end_of_interrupt(irq); } else { error!("BUG: couldn't get PIC instance to send EOI!"); } @@ -307,7 +306,7 @@ pub fn eoi(irq: Option) { extern "x86-interrupt" fn apic_spurious_interrupt_handler(_stack_frame: InterruptStackFrame) { warn!("APIC SPURIOUS INTERRUPT HANDLER!"); - eoi(Some(apic::APIC_SPURIOUS_INTERRUPT_IRQ)); + eoi(apic::APIC_SPURIOUS_INTERRUPT_IRQ); } extern "x86-interrupt" fn unimplemented_interrupt_handler(_stack_frame: InterruptStackFrame) { @@ -334,7 +333,7 @@ extern "x86-interrupt" fn unimplemented_interrupt_handler(_stack_frame: Interrup } }; - eoi(Some(0xFF)); + eoi(0xFF); } @@ -353,7 +352,7 @@ extern "x86-interrupt" fn pic_spurious_interrupt_handler(_stack_frame: Interrupt if irq_regs.master_isr & 0x80 == 0x80 { println!("\nGot real IRQ7, not spurious! (Unexpected behavior)"); error!("Got real IRQ7, not spurious! (Unexpected behavior)"); - eoi(Some(IRQ_BASE_OFFSET + 0x7)); + eoi(IRQ_BASE_OFFSET + 0x7); } else { // do nothing. Do not send an EOI. @@ -378,7 +377,7 @@ extern "x86-interrupt" fn pic_spurious_interrupt_handler(_stack_frame: Interrupt // // we must ack the interrupt and send EOI before calling the handler, // // because the handler will not return. // rtc::rtc_ack_irq(); -// eoi(Some(IRQ_BASE_OFFSET + 0x8)); +// eoi(IRQ_BASE_OFFSET + 0x8); // rtc::handle_rtc_interrupt(); // } diff --git a/kernel/keyboard/src/lib.rs b/kernel/keyboard/src/lib.rs index 78f743568b..1a6ca62634 100644 --- a/kernel/keyboard/src/lib.rs +++ b/kernel/keyboard/src/lib.rs @@ -105,7 +105,7 @@ extern "x86-interrupt" fn ps2_keyboard_handler(_stack_frame: InterruptStackFrame warn!("ps2_keyboard_handler(): KEYBOARD isn't initialized yet, skipping interrupt."); } - interrupts::eoi(Some(PS2_KEYBOARD_IRQ)); + interrupts::eoi(PS2_KEYBOARD_IRQ); } diff --git a/kernel/mouse/src/lib.rs b/kernel/mouse/src/lib.rs index 15bf86a167..2368abfa06 100644 --- a/kernel/mouse/src/lib.rs +++ b/kernel/mouse/src/lib.rs @@ -74,7 +74,7 @@ extern "x86-interrupt" fn ps2_mouse_handler(_stack_frame: InterruptStackFrame) { warn!("ps2_mouse_handler(): MOUSE isn't initialized yet, skipping interrupt."); } - interrupts::eoi(Some(PS2_MOUSE_IRQ)); + interrupts::eoi(PS2_MOUSE_IRQ); } diff --git a/kernel/pit_clock/src/lib.rs b/kernel/pit_clock/src/lib.rs index 45ce1782ff..ff083c095b 100644 --- a/kernel/pit_clock/src/lib.rs +++ b/kernel/pit_clock/src/lib.rs @@ -72,5 +72,5 @@ extern "x86-interrupt" fn pit_timer_handler(_stack_frame: InterruptStackFrame) { let ticks = PIT_TICKS.fetch_add(1, Ordering::Acquire); trace!("PIT timer interrupt, ticks: {}", ticks); - interrupts::eoi(Some(PIT_CHANNEL_0_IRQ)); + interrupts::eoi(PIT_CHANNEL_0_IRQ); } diff --git a/kernel/scheduler/src/lib.rs b/kernel/scheduler/src/lib.rs index 6ea07bf0ea..94f454e366 100644 --- a/kernel/scheduler/src/lib.rs +++ b/kernel/scheduler/src/lib.rs @@ -48,7 +48,7 @@ pub fn init() -> Result<(), &'static str> { } // Architecture-independent timer interrupt handler for preemptive scheduling. -interrupt_handler!(timer_tick_handler, None, _stack_frame, { +interrupt_handler!(timer_tick_handler, _, _stack_frame, { #[cfg(target_arch = "aarch64")] generic_timer_aarch64::set_next_timer_interrupt(get_timeslice_ticks()); @@ -66,9 +66,6 @@ interrupt_handler!(timer_tick_handler, None, _stack_frame, { // We must acknowledge the interrupt *before* the end of this handler // because we switch tasks here, which doesn't return. - #[cfg(target_arch = "x86_64")] - eoi(Some(CPU_LOCAL_TIMER_IRQ)); - #[cfg(target_arch = "aarch64")] eoi(CPU_LOCAL_TIMER_IRQ); schedule(); diff --git a/kernel/serial_port/src/lib.rs b/kernel/serial_port/src/lib.rs index 2c4eccc904..00416bd6b9 100644 --- a/kernel/serial_port/src/lib.rs +++ b/kernel/serial_port/src/lib.rs @@ -388,7 +388,7 @@ static INTERRUPT_ACTION_COM2_COM4: Once> = Once::new // // * On x86_64, this is IRQ 0x24, used for COM1 and COM3 serial ports. // * On aarch64, this is interrupt 0x21, used for the PL011 UART serial port. -interrupt_handler!(primary_serial_port_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x4), _stack_frame, { +interrupt_handler!(primary_serial_port_interrupt_handler, interrupts::IRQ_BASE_OFFSET + 0x4, _stack_frame, { // log::trace!("COM1/COM3 serial handler"); #[cfg(target_arch = "aarch64")] { @@ -405,7 +405,7 @@ interrupt_handler!(primary_serial_port_interrupt_handler, Some(interrupts::IRQ_B }); // Cross-platform interrupt handler, only used on x86_64 for COM2 and COM4 (IRQ 0x23). -interrupt_handler!(secondary_serial_port_interrupt_handler, Some(interrupts::IRQ_BASE_OFFSET + 0x3), _stack_frame, { +interrupt_handler!(secondary_serial_port_interrupt_handler, interrupts::IRQ_BASE_OFFSET + 0x3, _stack_frame, { // trace!("COM2/COM4 serial handler"); if let Some(func) = INTERRUPT_ACTION_COM2_COM4.get() { func() From d5b2660df7a836ef89c579c29cb780a468f9bb40 Mon Sep 17 00:00:00 2001 From: Nathan Royer <61582713+NathanRoyer@users.noreply.github.com> Date: Tue, 17 Oct 2023 20:42:23 +0200 Subject: [PATCH 14/25] aarch64: minor improvements to interrupts module (#1052) * Remove unnecessary casts to `*const InterruptHandler` in favor of using `fn` function pointer type directly. * Closes #1047. --- kernel/interrupts/src/aarch64/mod.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/interrupts/src/aarch64/mod.rs b/kernel/interrupts/src/aarch64/mod.rs index 7d3936f2f5..bba771d27b 100644 --- a/kernel/interrupts/src/aarch64/mod.rs +++ b/kernel/interrupts/src/aarch64/mod.rs @@ -150,7 +150,7 @@ pub fn init() -> Result<(), &'static str> { pub fn setup_timer_interrupt(timer_tick_handler: InterruptHandler) -> Result<(), &'static str> { // register/deregister the handler for the timer IRQ. if let Err(existing_handler) = register_interrupt(CPU_LOCAL_TIMER_IRQ, timer_tick_handler) { - if timer_tick_handler as *const InterruptHandler != existing_handler { + if timer_tick_handler as InterruptHandler != existing_handler { return Err("A different interrupt handler has already been setup for the timer IRQ number"); } } @@ -174,7 +174,7 @@ pub fn setup_timer_interrupt(timer_tick_handler: InterruptHandler) -> Result<(), pub fn setup_ipi_handler(handler: InterruptHandler, local_num: InterruptNumber) -> Result<(), &'static str> { // register the handler if let Err(existing_handler) = register_interrupt(local_num, handler) { - if handler as *const InterruptHandler != existing_handler { + if handler as InterruptHandler != existing_handler { return Err("A different interrupt handler has already been setup for that IPI"); } } @@ -195,7 +195,7 @@ pub fn setup_ipi_handler(handler: InterruptHandler, local_num: InterruptNumber) /// Returns an error if the TLB Shootdown interrupt number already has a registered handler. pub fn setup_tlb_shootdown_handler(handler: InterruptHandler) -> Result<(), &'static str> { if let Err(existing_handler) = register_interrupt(TLB_SHOOTDOWN_IPI, handler) { - if handler as *const InterruptHandler != existing_handler { + if handler as InterruptHandler != existing_handler { return Err("A different interrupt handler has already been setup for that IPI"); } } @@ -229,13 +229,13 @@ pub fn init_pl011_rx_interrupt() -> Result<(), &'static str> { /// # Return /// * `Ok(())` if successfully registered, or /// * `Err(existing_handler_address)` if the given `irq_num` was already in use. -pub fn register_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> Result<(), *const InterruptHandler> { +pub fn register_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> Result<(), InterruptHandler> { let mut handlers = IRQ_HANDLERS.write(); let index = int_num as usize; if let Some(handler) = handlers[index] { error!("register_interrupt: the requested interrupt IRQ {} was already in use", index); - Err(handler as *const _) + Err(handler) } else { handlers[index] = Some(func); Ok(()) @@ -251,12 +251,12 @@ pub fn register_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> R /// # Arguments /// * `int_num`: the interrupt number that needs to be deregistered /// * `func`: the handler that should currently be stored for 'interrupt_num' -pub fn deregister_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> Result<(), Option<*const InterruptHandler>> { +pub fn deregister_interrupt(int_num: InterruptNumber, func: InterruptHandler) -> Result<(), Option> { let mut handlers = IRQ_HANDLERS.write(); let index = int_num as usize; - let func = func as *const InterruptHandler; - let handler = handlers[index].map(|h| h as *const InterruptHandler); + let func = func as InterruptHandler; + let handler = handlers[index].map(|h| h as InterruptHandler); if handler != Some(func) { error!("deregister_interrupt: Cannot free interrupt due to incorrect handler function"); From 27c5ce54e1287c741ece0f72180ba7052b53311e Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Sat, 21 Oct 2023 09:03:11 +0800 Subject: [PATCH 15/25] Add and improve README badges (#1060) --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 755699159f..17ed68345c 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,14 @@ # Theseus OS -[![Documentation Action](https://img.shields.io/github/actions/workflow/status/theseus-os/Theseus/docs.yaml?label=docs%20build)](https://github.com/theseus-os/Theseus/actions/workflows/docs.yaml) [![Documentation](https://img.shields.io/badge/view-docs-blue)](https://theseus-os.github.io/Theseus/doc/___Theseus_Crates___/index.html) [![Book](https://img.shields.io/badge/view-book-blueviolet)](https://theseus-os.github.io/Theseus/book/index.html) [![Blog](https://img.shields.io/badge/view-blog-orange)](https://theseus-os.com) [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=flat&logo=discord&logoColor=white)](https://discord.gg/NuUnqeYT8R) +
+[![Build Action](https://img.shields.io/github/actions/workflow/status/theseus-os/Theseus/docs.yaml?label=build)](https://github.com/theseus-os/Theseus/actions/workflows/docs.yaml) +[![Clippy Action](https://img.shields.io/github/actions/workflow/status/theseus-os/Theseus/check-clippy.yaml?label=clippy)](https://github.com/theseus-os/Theseus/actions/workflows/check-clippy.yaml) +[![QEMU tests](https://img.shields.io/github/actions/workflow/status/theseus-os/Theseus/test.yaml?label=QEMU%20tests)](https://github.com/theseus-os/Theseus/actions/workflows/test.yaml) + Theseus is a new OS written from scratch in [Rust](https://www.rust-lang.org/) to experiment with novel OS structure, better state management, and how to leverage **intralingual design** principles to shift OS responsibilities like resource management into the compiler. From a859ff585e87cee6da50cbad2b2c1788a5be12c1 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:48:25 -0700 Subject: [PATCH 16/25] book: Add links to KISV 2023 paper and GOSIM 2023 presentation (#1062) --- book/src/misc/papers_presentations.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/book/src/misc/papers_presentations.md b/book/src/misc/papers_presentations.md index 2431af26e1..c41491bf2d 100644 --- a/book/src/misc/papers_presentations.md +++ b/book/src/misc/papers_presentations.md @@ -11,13 +11,17 @@ This page offers a selected collection of the slide decks from those talks (incl [OSDI 2020 Video Talk](https://www.youtube.com/watch?v=i1pLDZKtlBI) — [OSDI 2020 Short Video](https://www.youtube.com/watch?v=T0Du5vps9aU) — [Slides (PDF)](https://www.usenix.org/sites/default/files/conference/protected-files/osdi20_slides_boos.pdf) -* \[OSDI 2022\] Poster: [Correct and Performant Device Drivers via Intralingual Design](https://www.usenix.org/conference/osdi22/poster-session) - * An overview of in-progress work to use formal verification + intralingual design for better device drivers. - * [Poster PDF](https://www.theseus-os.com/kevinaboos/docs/OSDI%202022%20Poster.pdf) +* \[[KISV 2023](https://kisv-workshop.github.io/program/)\] **[Leveraging Rust for Lightweight OS Correctness](https://dl.acm.org/doi/10.1145/3625275.3625398)** + * A paper about extending intralingual design and applying a hybrid approach (type systems plus formal verification) for proving lightweight correctness guarantees about Theseus's memory management subsystem. + * [Paper (PDF)](https://dl.acm.org/doi/pdf/10.1145/3625275.3625398) + * Kevin Boos PhD Dissertation: [Theseus: Rethinking Operating Systems Structure and State Management](https://scholarship.rice.edu/handle/1911/109201) * Ramla Ijaz Master's Thesis: [Exploring Intralingual Design in Operating Systems](https://scholarship.rice.edu/handle/1911/109609) -### Older papers +### Other published works +* \[OSDI 2022\] Poster: [Correct and Performant Device Drivers via Intralingual Design](https://www.usenix.org/conference/osdi22/poster-session) + * An overview of in-progress work to use formal verification + intralingual design for better device drivers. + * [Poster PDF](https://www.theseus-os.com/kevinaboos/docs/OSDI%202022%20Poster.pdf) * \[PLOS 2017\] [Theseus: A State Spill-free Operating System](https://www.sigops.org/s/conferences/sosp/2017/workshops.html) * [Paper PDF](https://www.theseus-os.com/kevinaboos/docs/theseus_plos2017.pdf) — a shorter, outdated ideas paper about early Theseus design. * Superseded by the OSDI 2020 paper. @@ -28,6 +32,7 @@ This page offers a selected collection of the slide decks from those talks (incl ## Selected Presentations and Slide Decks +* [Theseus: a Rust-native OS for Safety and Reliability (Sept 2023)](https://docs.google.com/presentation/d/e/2PACX-1vSq144Pl5Ql02OP9zq80wuy7iI1GwUNfCwUelpKay2qeIis4uMY2qOfSgIKeG7Rb053fMoVXXHa3ka9/pub?start=false&loop=false) – [[Video talk](https://www.bilibili.com/video/BV1d34y1373n/)] * [How Theseus uses Rust, plus Rust challenges (early 2022)](https://docs.google.com/presentation/d/e/2PACX-1vQ2InjW_5kpdepoJ9vdsH-B1G4mvcjohcj_CA2dzx-tVRz0ee52qo1bwCQ7TnDGE9PiE5doW4sIO_7W/pub?start=false&loop=false) * [How Safe-language OSes work, with Theseus examples](https://docs.google.com/presentation/d/e/2PACX-1vSa0gp8sbq8S9MB4V-FYjs6xJGIPm0fsZSVdtZ9U2bQWRX9gngwztXTIJiRwxtAosLWPk0v60abDMTU/pub?start=false&loop=false) — [[Video Talk](https://www.youtube.com/watch?v=n7r8zO7SodE)] @@ -48,4 +53,4 @@ This page offers a selected collection of the slide decks from those talks (incl * [Rust vs. C, with short Rust intro](https://docs.google.com/presentation/d/e/2PACX-1vQYomAnfTNucuCqYgNkPaxpIdrhPxil9Qzle_6-xd7TYfdEBlgML0B3vztdNC2odwc25dLzW3XsithZ/pub?start=false&loop=false) — [[Video Talk](https://www.youtube.com/watch?v=mmJiwscpB4o)] - + From 3fd2e4c63c19bdb6747023995083e9b59e92ba22 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 24 Oct 2023 13:15:24 +1100 Subject: [PATCH 17/25] Redesign `path` crate to follow `std::path` (#1061) * The `path` crate now exports a `Path` and `PathBuf` struct, akin to `std::path`. This makes it easier to support `std`, but also removes unnecessary allocations throughout Theseus. * Notably, `Path::file_stem()` now returns an `Option<&str>`. * This causes `crate_name_from_path()` to return a `Result`, which is responsible for most of the logic changes required. Signed-off-by: Klimenty Tsoutsman --- Cargo.lock | 4 - applications/bm/src/lib.rs | 14 +- applications/cat/src/lib.rs | 4 +- applications/cd/src/lib.rs | 6 +- applications/hull/src/builtin.rs | 14 +- applications/hull/src/lib.rs | 6 +- applications/loadc/src/lib.rs | 5 +- applications/ls/src/lib.rs | 3 +- applications/ns/src/lib.rs | 4 +- applications/qemu_test/src/lib.rs | 8 +- applications/rm/src/lib.rs | 4 +- applications/shell/src/lib.rs | 6 +- applications/swap/src/lib.rs | 4 +- applications/test_wasmtime/src/lib.rs | 2 +- applications/upd/src/lib.rs | 10 +- applications/wasm/src/lib.rs | 4 +- kernel/console/src/lib.rs | 4 +- kernel/crate_name_utils/src/lib.rs | 8 +- kernel/crate_swap/src/lib.rs | 34 +- kernel/environment/src/lib.rs | 3 +- kernel/fault_crate_swap/src/lib.rs | 4 +- kernel/first_application/src/lib.rs | 5 +- kernel/mod_mgmt/src/lib.rs | 15 +- kernel/mod_mgmt/src/parse_nano_core.rs | 4 +- kernel/path/Cargo.toml | 28 +- kernel/path/src/component.rs | 339 ++++++++ kernel/path/src/lib.rs | 763 +++++++++++++----- kernel/spawn/src/lib.rs | 6 +- kernel/task_fs/src/lib.rs | 24 +- .../wasi_interpreter/src/posix_file_system.rs | 19 +- kernel/window_manager/src/lib.rs | 9 +- ports/theseus_std/src/env.rs | 2 +- ports/theseus_std/src/fs_imp.rs | 9 +- 33 files changed, 1021 insertions(+), 353 deletions(-) create mode 100644 kernel/path/src/component.rs diff --git a/Cargo.lock b/Cargo.lock index c3b9f24bad..6baff26288 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2593,11 +2593,7 @@ name = "path" version = "0.1.0" dependencies = [ "fs_node", - "lazy_static", - "log", "root", - "spin 0.9.4", - "vfs_node", ] [[package]] diff --git a/applications/bm/src/lib.rs b/applications/bm/src/lib.rs index fab5fd45cf..a75331dd34 100644 --- a/applications/bm/src/lib.rs +++ b/applications/bm/src/lib.rs @@ -35,7 +35,7 @@ use alloc::string::{String, ToString}; use alloc::sync::Arc; use hpet::get_hpet; use heapfile::HeapFile; -use path::Path; +use path::{Path, PathBuf}; use fs_node::{DirRef, FileOrDir, FileRef}; use libtest::*; use memory::{create_mapping, PteFlags}; @@ -324,9 +324,9 @@ fn do_spawn_inner(overhead_ct: u64, th: usize, nr: usize, on_cpu: CpuId) -> Resu .map_err(|_| "could not find the application namespace")?; let namespace_dir = namespace.dir(); let app_path = namespace_dir.get_file_starting_with("hello-") - .map(|f| Path::new(f.lock().get_absolute_path())) + .map(|f| PathBuf::from(f.lock().get_absolute_path())) .ok_or("Could not find the application 'hello'")?; - let crate_name = crate_name_from_path(&app_path).to_string(); + let crate_name = crate_name_from_path(&app_path).ok_or("invalid app path")?.to_string(); // here we are taking the time at every iteration. // otherwise the crate is not fully unloaded from the namespace before the next iteration starts @@ -336,7 +336,7 @@ fn do_spawn_inner(overhead_ct: u64, th: usize, nr: usize, on_cpu: CpuId) -> Resu while namespace.get_crate(&crate_name).is_some() { } start_hpet = hpet.get_counter(); - let child = spawn::new_application_task_builder(app_path.clone(), None)? + let child = spawn::new_application_task_builder(&app_path, None)? .pin_on_cpu(on_cpu) .spawn()?; @@ -1237,7 +1237,7 @@ fn do_fs_read_with_open_inner(filename: &str, overhead_ct: u64, th: usize, nr: u let hpet = get_hpet().ok_or("Could not retrieve hpet counter")?; - let path = Path::new(filename.to_string()); + let path = PathBuf::from(filename.to_string()); let mut _dummy_sum: u64 = 0; let mut buf = vec![0; READ_BUF_SIZE]; let size = match get_file(filename) { @@ -1302,7 +1302,7 @@ fn do_fs_read_only_inner(filename: &str, overhead_ct: u64, th: usize, nr: usize) let hpet = get_hpet().ok_or("Could not retrieve hpet counter")?; - let path = Path::new(filename.to_string()); + let path = PathBuf::from(filename.to_string()); let _dummy_sum: u64 = 0; let mut buf = vec![0; READ_BUF_SIZE]; let size = match get_file(filename) { @@ -1640,7 +1640,7 @@ fn test_file_inner(fileref: FileRef) { /// Wrapper function to get a file provided a string. /// Not used in measurements fn get_file(filename: &str) -> Option { - let path = Path::new(filename.to_string()); + let path: &Path = filename.as_ref(); match path.get(&get_cwd().unwrap()) { Some(file_dir_enum) => { match file_dir_enum { diff --git a/applications/cat/src/lib.rs b/applications/cat/src/lib.rs index a387f9766a..ba4de221b3 100644 --- a/applications/cat/src/lib.rs +++ b/applications/cat/src/lib.rs @@ -11,7 +11,7 @@ extern crate core2; use core::str; use alloc::{ - string::{String, ToString}, + string::String, vec::Vec, }; use getopts::Options; @@ -47,7 +47,7 @@ pub fn main(args: Vec) -> isize { println!("failed to get current task"); return -1; }; - let path = Path::new(matches.free[0].to_string()); + let path: &Path = matches.free[0].as_ref(); // navigate to the filepath specified by first argument match path.get(&cwd) { diff --git a/applications/cd/src/lib.rs b/applications/cd/src/lib.rs index 47e57e5063..c2d9647ccd 100644 --- a/applications/cd/src/lib.rs +++ b/applications/cd/src/lib.rs @@ -10,11 +10,9 @@ extern crate root; extern crate task; use alloc::string::String; -use alloc::string::ToString; use alloc::sync::Arc; use alloc::vec::Vec; use getopts::Options; -use path::Path; pub fn main(args: Vec) -> isize { let mut opts = Options::new(); @@ -38,8 +36,8 @@ pub fn main(args: Vec) -> isize { if matches.free.is_empty() { curr_env.lock().working_dir = Arc::clone(root::get_root()); } else { - let path = Path::new(matches.free[0].to_string()); - match curr_env.lock().chdir(&path) { + let path = matches.free[0].as_ref(); + match curr_env.lock().chdir(path) { Err(environment::Error::NotADirectory) => { println!("not a directory: {}", path); return -1; diff --git a/applications/hull/src/builtin.rs b/applications/hull/src/builtin.rs index 436d6a6c7b..9b0ddf9f76 100644 --- a/applications/hull/src/builtin.rs +++ b/applications/hull/src/builtin.rs @@ -1,9 +1,8 @@ //! Builtin shell commands. use crate::{Error, Result, Shell}; -use alloc::{borrow::ToOwned, string::ToString}; +use alloc::string::ToString; use app_io::println; -use path::Path; // TODO: Decide which builtins we don't need. @@ -64,15 +63,16 @@ impl Shell { return Err(Error::Command(1)); } - let path = Path::new(if let Some(arg) = args.first() { - (*arg).to_owned() + let path = if let Some(arg) = args.first() { + *arg } else { - "/".to_owned() - }); + "/" + } + .as_ref(); let task = task::get_my_current_task().ok_or(Error::CurrentTaskUnavailable)?; - match task.get_env().lock().chdir(&path) { + match task.get_env().lock().chdir(path) { Ok(()) => Ok(()), Err(_) => { println!("no such file or directory: {path}"); diff --git a/applications/hull/src/lib.rs b/applications/hull/src/lib.rs index 49d1a3a32e..7e994937b4 100644 --- a/applications/hull/src/lib.rs +++ b/applications/hull/src/lib.rs @@ -39,7 +39,7 @@ use hashbrown::HashMap; use job::Job; use log::{error, warn}; use noline::{builder::EditorBuilder, sync::embedded::IO as Io}; -use path::Path; +use path::PathBuf; use stdio::Stdio; use sync_block::Mutex; use task::{ExitValue, KillReason}; @@ -306,7 +306,7 @@ impl Shell { .into_iter(); let app_path = match matching_files.next() { - Some(f) => Path::new(f.lock().get_absolute_path()), + Some(f) => PathBuf::from(f.lock().get_absolute_path()), None => return Err(Error::CommandNotFound(cmd.to_owned())), }; @@ -314,7 +314,7 @@ impl Shell { println!("multiple matching files found, running: {app_path}"); } - let task = spawn::new_application_task_builder(app_path, None) + let task = spawn::new_application_task_builder(&app_path, None) .map_err(Error::SpawnFailed)? .argument(args.into_iter().map(ToOwned::to_owned).collect::>()) .block() diff --git a/applications/loadc/src/lib.rs b/applications/loadc/src/lib.rs index c9ee6b1dde..4fdc62f406 100644 --- a/applications/loadc/src/lib.rs +++ b/applications/loadc/src/lib.rs @@ -26,8 +26,8 @@ use alloc::{collections::BTreeSet, string::{String, ToString}, sync::Arc, vec::V use getopts::{Matches, Options}; use memory::{Page, MappedPages, VirtualAddress, PteFlagsArch, PteFlags}; use mod_mgmt::{CrateNamespace, StrongDependency, find_symbol_table, RelocationEntry, write_relocation}; -use rustc_demangle::demangle; use path::Path; +use rustc_demangle::demangle; use xmas_elf::{ ElfFile, program::SegmentData, @@ -72,8 +72,7 @@ fn rmain(matches: Matches) -> Result { ).map_err(|_| String::from("failed to get current task"))?; let path = matches.free.get(0).ok_or_else(|| "Missing path to ELF executable".to_string())?; - let path = Path::new(path.clone()); - let file_ref = path.get_file(&curr_wd) + let file_ref = Path::new(path).get_file(&curr_wd) .ok_or_else(|| format!("Failed to access file at {path:?}"))?; let file = file_ref.lock(); diff --git a/applications/ls/src/lib.rs b/applications/ls/src/lib.rs index 0e885956ca..dd6cb40bef 100644 --- a/applications/ls/src/lib.rs +++ b/applications/ls/src/lib.rs @@ -9,7 +9,6 @@ extern crate path; use alloc::{ string::String, - string::ToString, vec::Vec, }; use core::fmt::Write; @@ -45,7 +44,7 @@ pub fn main(args: Vec) -> isize { return 0; } - let path = Path::new(matches.free[0].to_string()); + let path: &Path = matches.free[0].as_ref(); // Navigate to the path specified by first argument match path.get(&curr_wd) { diff --git a/applications/ns/src/lib.rs b/applications/ns/src/lib.rs index 1334ed5df4..c49baad568 100644 --- a/applications/ns/src/lib.rs +++ b/applications/ns/src/lib.rs @@ -25,7 +25,7 @@ use alloc::{ use getopts::{Options, Matches}; use mod_mgmt::CrateNamespace; use fs_node::FileRef; -use path::Path; +use path::PathBuf; pub fn main(args: Vec) -> isize { @@ -68,7 +68,7 @@ fn rmain(matches: Matches) -> Result<(), String> { let mut output = String::new(); if let Some(crate_obj_file_path) = matches.opt_str("load") { - let path = Path::new(crate_obj_file_path); + let path = PathBuf::from(crate_obj_file_path); let file = path.get_file(&curr_wd).ok_or_else(|| format!("Couldn't resolve path to crate object file at {path:?}") )?; diff --git a/applications/qemu_test/src/lib.rs b/applications/qemu_test/src/lib.rs index e6a46f726f..c79d2fc778 100644 --- a/applications/qemu_test/src/lib.rs +++ b/applications/qemu_test/src/lib.rs @@ -9,9 +9,9 @@ use alloc::{boxed::Box, string::String, vec::Vec}; use app_io::{print, println}; -use path::Path; use qemu_exit::{QEMUExit, X86}; use task::{ExitValue, KillReason}; +use path::{Path, PathBuf}; extern crate alloc; @@ -37,7 +37,7 @@ pub fn main(_: Vec) -> isize { // deadlock. let file = dir.lock().get_file(file_name.as_ref()).unwrap(); let path = file.lock().get_absolute_path(); - Some((file_name, Path::new(path))) + Some((file_name, PathBuf::from(path))) } else { None } @@ -56,7 +56,7 @@ pub fn main(_: Vec) -> isize { num_ignored += 1; println!("ignored"); } else { - match run_test(path) { + match run_test(&path) { Ok(_) => println!("ok"), Err(_) => { num_failed += 1; @@ -81,7 +81,7 @@ pub fn main(_: Vec) -> isize { } #[allow(clippy::result_unit_err)] -pub fn run_test(path: Path) -> Result<(), ()> { +pub fn run_test(path: &Path) -> Result<(), ()> { match spawn::new_application_task_builder(path, None) .unwrap() .argument(Vec::new()) diff --git a/applications/rm/src/lib.rs b/applications/rm/src/lib.rs index b2c440688e..3ad41784bd 100644 --- a/applications/rm/src/lib.rs +++ b/applications/rm/src/lib.rs @@ -13,7 +13,7 @@ use alloc::vec::Vec; use alloc::string::String; use alloc::string::ToString; use getopts::Options; -use path::Path; +use path::PathBuf; use fs_node::{FsNode, FileOrDir}; @@ -56,7 +56,7 @@ pub fn remove_node(args: Vec) -> Result<(), String> { } for path_string in &matches.free { - let path = Path::new(path_string.clone()); + let path = PathBuf::from(path_string.clone()); let node_to_delete = match path.get(&working_dir) { Some(node) => node, _ => return Err(format!("Couldn't find path {path}")), diff --git a/applications/shell/src/lib.rs b/applications/shell/src/lib.rs index 748f7ec303..6080e8a25c 100644 --- a/applications/shell/src/lib.rs +++ b/applications/shell/src/lib.rs @@ -638,10 +638,10 @@ impl Shell { let app_file = matching_apps.next(); let second_match = matching_apps.next(); // return an error if there are multiple matching apps let app_path = app_file.xor(second_match) - .map(|f| Path::new(f.lock().get_absolute_path())) + .map(|f| f.lock().get_absolute_path()) .ok_or(AppErr::NotFound(cmd))?; - let taskref = spawn::new_application_task_builder(app_path, None) + let taskref = spawn::new_application_task_builder(app_path.as_ref(), None) .map_err(|e| AppErr::SpawnErr(e.to_string()))? .argument(args) .block() @@ -860,7 +860,7 @@ impl Shell { // Walk through nodes existing in the command. for node in &nodes { - let path = Path::new(node.to_string()); + let path: &Path = node.as_ref(); match path.get(&curr_wd) { Some(file_dir_enum) => { match file_dir_enum { diff --git a/applications/swap/src/lib.rs b/applications/swap/src/lib.rs index 16c3867997..675340a0ba 100644 --- a/applications/swap/src/lib.rs +++ b/applications/swap/src/lib.rs @@ -72,7 +72,7 @@ fn rmain(matches: Matches) -> Result<(), String> { return Err("failed to get current task".to_string()); }; let override_namespace_crate_dir = if let Some(path) = matches.opt_str("d") { - let path = Path::new(path); + let path: &Path = path.as_ref(); let dir = match path.get(&curr_dir) { Some(FileOrDir::Dir(dir)) => dir, _ => return Err(format!("Error: could not find specified namespace crate directory: {path}.")), @@ -166,7 +166,7 @@ fn do_swap( let (into_new_crate_file, new_namespace) = { if let Some(f) = override_namespace_crate_dir.as_ref().and_then(|ns_dir| ns_dir.get_file_starting_with(new_crate_str)) { (IntoCrateObjectFile::File(f), None) - } else if let Some(FileOrDir::File(f)) = Path::new(String::from(new_crate_str)).get(curr_dir) { + } else if let Some(FileOrDir::File(f)) = Path::new(new_crate_str).get(curr_dir) { (IntoCrateObjectFile::File(f), None) } else { (IntoCrateObjectFile::Prefix(String::from(new_crate_str)), None) diff --git a/applications/test_wasmtime/src/lib.rs b/applications/test_wasmtime/src/lib.rs index 2893b36d5a..858d7c204f 100644 --- a/applications/test_wasmtime/src/lib.rs +++ b/applications/test_wasmtime/src/lib.rs @@ -29,7 +29,7 @@ pub fn main(args: Vec) -> isize { fn rmain(args: Vec) -> Result<(), String> { - let path_to_hello_cwasm = Path::new(args.get(0).cloned().unwrap_or("/extra_files/wasm/hello.cwasm".to_string())); + let path_to_hello_cwasm: &Path = args.get(0).map(|arg| &arg[..]).unwrap_or("/extra_files/wasm/hello.cwasm").as_ref(); let Ok(curr_wd) = task::with_current_task(|t| t.get_env().lock().working_dir.clone()) else { return Err("failed to get current task".to_string()); }; diff --git a/applications/upd/src/lib.rs b/applications/upd/src/lib.rs index d82fdce0c8..562f6af8cc 100644 --- a/applications/upd/src/lib.rs +++ b/applications/upd/src/lib.rs @@ -123,7 +123,7 @@ fn rmain(matches: Matches) -> Result<(), String> { } "apply" | "ap" => { let base_dir_path = matches.free.get(1).ok_or_else(|| String::from("missing BASE_DIR path argument"))?; - apply(&Path::new(base_dir_path.clone())) + apply(base_dir_path.as_ref()) } other => { Err(format!("unrecognized command {other:?}")) @@ -197,8 +197,8 @@ fn download(remote_endpoint: IpEndpoint, update_build: &str, crate_list: Option< let size = content.len(); // The name of the crate file that we downloaded is something like: "/keyboard_log/k#keyboard-36be916209949cef.o". // We need to get just the basename of the file, then remove the crate type prefix ("k#"). - let df_path = Path::new(df.name); - let cfile = new_namespace_dir.write_crate_object_file(df_path.basename(), content)?; + let file_name = Path::new(&df.name).file_name().ok_or("crate file path did not have file name")?; + let cfile = new_namespace_dir.write_crate_object_file(file_name, content)?; println!("Downloaded crate: {:?}, size {}", cfile.lock().get_absolute_path(), size); } @@ -257,7 +257,9 @@ fn apply(base_dir_path: &Path) -> Result<(), String> { // An empty old_crate_name indicates that there is no old crate or object file to remove, we are just loading a new crate (or inserting its object file) None } else { - let old_crate_name = mod_mgmt::crate_name_from_path(&Path::new(old_crate_module_file_name)).to_string(); + let old_crate_name = mod_mgmt::crate_name_from_path(old_crate_module_file_name.as_ref()) + .ok_or("invalid old crate module file name")? + .to_string(); if curr_namespace.get_crate(&old_crate_name).is_none() { println!("\t Note: old crate {:?} was not currently loaded into namespace {:?}.", old_crate_name, curr_namespace.name()); } diff --git a/applications/wasm/src/lib.rs b/applications/wasm/src/lib.rs index 15acd8fdfa..d52d82c0da 100644 --- a/applications/wasm/src/lib.rs +++ b/applications/wasm/src/lib.rs @@ -66,7 +66,7 @@ pub fn main(args: Vec) -> isize { // Verify passed preopened directories are real directories. for dir in preopened_dirs.iter() { - let dir_path = Path::new(dir.clone()); + let dir_path: &Path = dir.as_ref(); match dir_path.get(&curr_wd) { Some(file_dir_enum) => match file_dir_enum { @@ -92,7 +92,7 @@ pub fn main(args: Vec) -> isize { return -1; } - let wasm_binary_path = Path::new(args[0].clone()); + let wasm_binary_path: &Path = args[0].as_ref(); // Parse inputted WebAssembly binary path into byte array. let wasm_binary: Vec = match wasm_binary_path.get(&curr_wd) { diff --git a/kernel/console/src/lib.rs b/kernel/console/src/lib.rs index 38c2bf1ab8..723e628fca 100644 --- a/kernel/console/src/lib.rs +++ b/kernel/console/src/lib.rs @@ -116,8 +116,8 @@ fn shell_loop( mod_mgmt::CrateNamespace::get_crate_object_file_starting_with(&new_app_ns, "hull-") .expect("Couldn't find hull in default app namespace"); - let path = path::Path::new(app_file.lock().get_absolute_path()); - let task = spawn::new_application_task_builder(path, Some(new_app_ns))? + let path = app_file.lock().get_absolute_path(); + let task = spawn::new_application_task_builder(path.as_ref(), Some(new_app_ns))? .name(format!("{address:?}_hull")) .block() .spawn()?; diff --git a/kernel/crate_name_utils/src/lib.rs b/kernel/crate_name_utils/src/lib.rs index 25c2c75256..6de325cd40 100644 --- a/kernel/crate_name_utils/src/lib.rs +++ b/kernel/crate_name_utils/src/lib.rs @@ -25,12 +25,12 @@ use crate_metadata::CrateType; /// * be absolute or relative, /// * optionally end with an extension, e.g., `".o"`, optionally start /// * optionally start with a module file prefix, e.g., `"k#my_crate-.o"`. -pub fn crate_name_from_path(object_file_path: &Path) -> &str { - let stem = object_file_path.file_stem(); +pub fn crate_name_from_path(object_file_path: &Path) -> Option<&str> { + let stem = object_file_path.file_stem()?; if let Ok((_crate_type, _prefix, name)) = CrateType::from_module_name(stem) { - name + Some(name) } else { - stem + Some(stem) } } diff --git a/kernel/crate_swap/src/lib.rs b/kernel/crate_swap/src/lib.rs index db629c0d13..c9f943e87d 100644 --- a/kernel/crate_swap/src/lib.rs +++ b/kernel/crate_swap/src/lib.rs @@ -24,7 +24,7 @@ use core::{ ops::Deref, }; use alloc::{ - borrow::Cow, + borrow::{Cow, ToOwned}, collections::BTreeSet, string::{String, ToString}, sync::Arc, @@ -44,7 +44,7 @@ use mod_mgmt::{ StrongSectionRef, WeakDependent, StrRef, }; -use path::Path; +use path::{Path, PathBuf, Component}; use by_address::ByAddress; @@ -231,7 +231,7 @@ pub fn swap_crates( let reexport_new_symbols_as_old = *reexport_new_symbols_as_old; // Populate the list of new crate names for future usage. - let new_crate_name = crate_name_from_path(&Path::new(new_crate_object_file.lock().get_name())).to_string(); + let new_crate_name = crate_name_from_path(&PathBuf::from(new_crate_object_file.lock().get_name())).ok_or("invalid crate path")?.to_owned(); new_crate_names.push(new_crate_name.clone()); // Get a reference to the old crate that is currently loaded into the `old_namespace`. @@ -651,8 +651,8 @@ pub fn swap_crates( // FIXME: currently we use a hack to determine which namespace this freshly-loaded crate should be added to, // based on which directory its object file { - let objfile_path = Path::new(new_crate_ref.lock_as_ref().object_file.lock().get_absolute_path()); - if objfile_path.components().nth(1) == Some(mod_mgmt::CrateType::Kernel.default_namespace_name()) { + let objfile_path = PathBuf::from(new_crate_ref.lock_as_ref().object_file.lock().get_absolute_path()); + if objfile_path.components().nth(1) == Some(Component::Normal(mod_mgmt::CrateType::Kernel.default_namespace_name())) { let new_target_ns = this_namespace.recursive_namespace().unwrap_or(this_namespace); #[cfg(not(loscd_eval))] warn!("temp fix: changing target_ns from {} to {}, for crate {:?}", this_namespace.name(), new_target_ns.name(), new_crate_ref); @@ -919,7 +919,7 @@ impl SwapRequest { /// as the `this_namespace` argument that `swap_crates()` is invoked with. /// /// * `new_crate_object_file`: a type that can be converted into a crate object file. - /// This can either be a direct reference to the file, an absolute `Path` that points to the file, + /// This can either be a direct reference to the file, an absolute `PathBuf` that points to the file, /// or a prefix string used to find the file in the new namespace's directory of crate object files. /// /// * `new_namespace`: the `CrateNamespace` to which the new crate will be loaded and its symbols added. @@ -957,8 +957,10 @@ impl SwapRequest { let mut matching_files = CrateNamespace::get_crate_object_files_starting_with(&old_namespace, ocn); if matching_files.len() == 1 { let (old_crate_file, real_old_namespace) = matching_files.remove(0); - let old_crate_file_path = Path::new(old_crate_file.lock().get_name()); - let old_crate_full_name = crate_name_from_path(&old_crate_file_path).to_string(); + let old_crate_file_path = PathBuf::from(old_crate_file.lock().get_name()); + let old_crate_full_name = crate_name_from_path(&old_crate_file_path) + .ok_or(InvalidSwapRequest::OldCrateNotFound(old_crate_name.map(|name| name.to_owned()), old_namespace.clone(), Vec::new()))? + .into(); (Some(old_crate_full_name), real_old_namespace) } else { // Here, we couldn't find a single matching loaded crate or crate object file, so we return an error. @@ -1004,7 +1006,7 @@ impl SwapRequest { _ => if path.is_absolute() { return Err(InvalidSwapRequest::NewCrateAbsolutePathNotFound(path)); } else { - return Err(InvalidSwapRequest::NewCratePathNotAbsolute(path)); + return Err(InvalidSwapRequest::NewCratePathBufNotAbsolute(path)); }, } IntoCrateObjectFile::Prefix(prefix) => { @@ -1045,10 +1047,10 @@ pub enum InvalidSwapRequest { /// The enclosed vector is the list of matching crate names or crate object file names /// along with the `CrateNamespace` in which they were found. OldCrateNotFound(Option, Arc, Vec<(String, Arc)>), - /// The given absolute `Path` for the new crate object file could not be resolved. - NewCrateAbsolutePathNotFound(Path), - /// The given `Path` for the new crate object file was not an absolute path, as expected. - NewCratePathNotAbsolute(Path), + /// The given absolute `PathBuf` for the new crate object file could not be resolved. + NewCrateAbsolutePathNotFound(PathBuf), + /// The given `PathBuf` for the new crate object file was not an absolute path, as expected. + NewCratePathBufNotAbsolute(PathBuf), /// A single crate object file could not be found by matching the given prefix `String` /// within the given new `CrateNamespace` (which was searched recursively). /// Either zero or multiple crate object files matched the prefix, @@ -1072,11 +1074,11 @@ impl fmt::Debug for InvalidSwapRequest { } } Self::NewCrateAbsolutePathNotFound(path) => { - dbg.field("reason", &"New Crate Absolute Path Not Found") + dbg.field("reason", &"New Crate Absolute PathBuf Not Found") .field("path", &path); } - Self::NewCratePathNotAbsolute(path) => { - dbg.field("reason", &"New Crate Path Not Absolute") + Self::NewCratePathBufNotAbsolute(path) => { + dbg.field("reason", &"New Crate PathBuf Not Absolute") .field("path", &path); } Self::NewCratePrefixNotFound(prefix, new_namespace, matches) => { diff --git a/kernel/environment/src/lib.rs b/kernel/environment/src/lib.rs index b569283fd0..df3e23d1dc 100644 --- a/kernel/environment/src/lib.rs +++ b/kernel/environment/src/lib.rs @@ -31,7 +31,8 @@ impl Environment { /// Changes the current working directory. #[doc(alias("change"))] pub fn chdir(&mut self, path: &Path) -> Result<()> { - match path.get(&self.working_dir) { + let new_dir = self.working_dir.lock().get(path.as_ref()); + match new_dir { Some(FileOrDir::Dir(dir_ref)) => { self.working_dir = dir_ref; Ok(()) diff --git a/kernel/fault_crate_swap/src/lib.rs b/kernel/fault_crate_swap/src/lib.rs index a05a48156d..775b24a269 100644 --- a/kernel/fault_crate_swap/src/lib.rs +++ b/kernel/fault_crate_swap/src/lib.rs @@ -29,7 +29,7 @@ use mod_mgmt::{ NamespaceDir, IntoCrateObjectFile, }; -use path::Path; +use path::PathBuf; use crate_swap::{SwapRequest, swap_crates}; use fault_log::{RecoveryAction, FaultEntry, remove_unhandled_exceptions, log_handled_fault}; @@ -73,7 +73,7 @@ pub fn do_self_swap( let (into_new_crate_file, new_namespace) = { if let Some(f) = override_namespace_crate_dir.as_ref().and_then(|ns_dir| ns_dir.get_file_starting_with(crate_name)) { (IntoCrateObjectFile::File(f), None) - } else if let Some(FileOrDir::File(f)) = Path::new(String::from(crate_name)).get(curr_dir) { + } else if let Some(FileOrDir::File(f)) = PathBuf::from(String::from(crate_name)).get(curr_dir) { (IntoCrateObjectFile::File(f), None) } else { (IntoCrateObjectFile::Prefix(String::from(crate_name)), None) diff --git a/kernel/first_application/src/lib.rs b/kernel/first_application/src/lib.rs index 817e7f0352..ef2c402355 100644 --- a/kernel/first_application/src/lib.rs +++ b/kernel/first_application/src/lib.rs @@ -24,7 +24,6 @@ extern crate path; use alloc::format; use mod_mgmt::CrateNamespace; -use path::Path; /// See the crate-level docs and this crate's `Cargo.toml` for more. const FIRST_APPLICATION_CRATE_NAME: &str = { @@ -50,9 +49,9 @@ pub fn start() -> Result<(), &'static str> { FIRST_APPLICATION_CRATE_NAME, ).ok_or("Couldn't find first application in default app namespace")?; - let path = Path::new(app_file.lock().get_absolute_path()); + let path = app_file.lock().get_absolute_path(); info!("Starting first application: crate at {:?}", path); - spawn::new_application_task_builder(path, Some(new_app_ns))? + spawn::new_application_task_builder(path.as_ref(), Some(new_app_ns))? .name(format!("first_{}", &FIRST_APPLICATION_CRATE_NAME[.. FIRST_APPLICATION_CRATE_NAME.len() - 1])) .spawn()?; diff --git a/kernel/mod_mgmt/src/lib.rs b/kernel/mod_mgmt/src/lib.rs index 737e1c3e3d..a06cbff74e 100644 --- a/kernel/mod_mgmt/src/lib.rs +++ b/kernel/mod_mgmt/src/lib.rs @@ -21,7 +21,7 @@ use rustc_demangle::demangle; use qp_trie::Trie; use fs_node::{FileOrDir, File, FileRef, DirRef}; use vfs_node::VFSDirectory; -use path::Path; +use path::{Path, PathBuf}; use memfs::MemFile; use hashbrown::HashMap; use crate_metadata_serde::{CLS_SECTION_FLAG, CLS_SYMBOL_TYPE}; @@ -387,7 +387,7 @@ pub enum IntoCrateObjectFile { /// A direct reference to the crate object file. This will be used as-is. File(FileRef), /// An absolute path that points to the crate object file. - AbsolutePath(Path), + AbsolutePath(PathBuf), /// A string prefix that will be used to search for the crate object file in the namespace. /// This must be able to uniquely identify a single crate object file in the namespace directory (recursively searched). Prefix(String), @@ -1052,8 +1052,11 @@ impl CrateNamespace { ) -> Result<(StrongCrateRef, ElfFile<'f>), &'static str> { let mapped_pages = crate_file.as_mapping()?; let size_in_bytes = crate_file.len(); - let abs_path = Path::new(crate_file.get_absolute_path()); - let crate_name = StrRef::from(crate_name_from_path(&abs_path)); + let abs_path = PathBuf::from(crate_file.get_absolute_path()); + let crate_name = StrRef::from( + crate_name_from_path(&abs_path) + .ok_or("failed to get crate name from path")? + ); // First, check to make sure this crate hasn't already been loaded. // Application crates are now added to the CrateNamespace just like kernel crates, @@ -2839,9 +2842,9 @@ impl CrateNamespace { // The object files from the recursive namespace(s) are appended after the files in the initial namespace, // so they'll only be searched if the symbol isn't found in the current namespace. for (potential_crate_file, ns_of_crate_file) in self.method_get_crate_object_files_starting_with(&potential_crate_name) { - let potential_crate_file_path = Path::new(potential_crate_file.lock().get_absolute_path()); + let potential_crate_file_path = PathBuf::from(potential_crate_file.lock().get_absolute_path()); // Check to make sure this crate is not already loaded into this namespace (or its recursive namespace). - if self.get_crate(crate_name_from_path(&potential_crate_file_path)).is_some() { + if self.get_crate(crate_name_from_path(&potential_crate_file_path)?).is_some() { trace!(" (skipping already-loaded crate {:?})", potential_crate_file_path); continue; } diff --git a/kernel/mod_mgmt/src/parse_nano_core.rs b/kernel/mod_mgmt/src/parse_nano_core.rs index 604ea2549e..f3bf0eee7f 100644 --- a/kernel/mod_mgmt/src/parse_nano_core.rs +++ b/kernel/mod_mgmt/src/parse_nano_core.rs @@ -8,7 +8,7 @@ use alloc::{collections::{BTreeMap, BTreeSet}, string::{String, ToString}, sync::Arc}; use crate::{CrateNamespace, mp_range, CLS_SECTION_FLAG}; use fs_node::FileRef; -use path::Path; +use path::PathBuf; use rustc_demangle::demangle; use spin::Mutex; use cow_arc::{CowArc, CowWeak}; @@ -78,7 +78,7 @@ pub fn parse_nano_core( CrateNamespace::get_crate_object_file_starting_with(namespace, NANO_CORE_FILENAME_PREFIX) .ok_or("couldn't find the expected \"nano_core\" kernel file") ); - let nano_core_file_path = Path::new(nano_core_file.lock().get_absolute_path()); + let nano_core_file_path = PathBuf::from(nano_core_file.lock().get_absolute_path()); debug!( "parse_nano_core(): trying to load and parse the nano_core file: {:?}", nano_core_file_path diff --git a/kernel/path/Cargo.toml b/kernel/path/Cargo.toml index 6f23768517..c4119e1d5b 100644 --- a/kernel/path/Cargo.toml +++ b/kernel/path/Cargo.toml @@ -1,28 +1,10 @@ [package] name = "path" version = "0.1.0" -authors = ["Andrew Pham , Christine Wang , Christine Wang "] +description = "File system path manipulation" +edition = "2021" [dependencies] -spin = "0.9.4" - -[dependencies.lazy_static] -features = ["spin_no_std"] -version = "1.4.0" - -[dependencies.fs_node] -path = "../fs_node" - -[dependencies.vfs_node] -path = "../vfs_node" - -[dependencies.root] -path = "../root" - -[dependencies.log] -version = "0.4.8" - -[lib] -crate-type = ["rlib"] +fs_node = { path = "../fs_node" } +root = { path = "../root" } diff --git a/kernel/path/src/component.rs b/kernel/path/src/component.rs new file mode 100644 index 0000000000..0c5afad754 --- /dev/null +++ b/kernel/path/src/component.rs @@ -0,0 +1,339 @@ +/// The implementation is heavily based on `std`. +use crate::Path; + +pub const SEPARATOR: char = '/'; +pub const SEPARATOR_STR: &str = "/"; +pub const CURRENT_DIR_WITH_SEPARATOR: &str = "./"; + +/// An iterator over the components of a path. +/// +/// This struct is created by the [`components`] method on Path. See its +/// documentation for more details. +/// +/// [`components`]: Path::components +#[derive(Clone, PartialEq, PartialOrd, Debug)] +pub struct Components<'a> { + path: &'a Path, + front: State, + back: State, +} + +#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] +enum State { + StartDir = 0, + Body = 1, + Done = 2, +} + +impl<'a> Iterator for Components<'a> { + type Item = Component<'a>; + + #[inline] + fn next(&mut self) -> Option { + while !self.finished() { + match self.front { + State::StartDir => { + self.front = State::Body; + if self.path.inner.starts_with(SEPARATOR) { + // Trim the starting slash. Even if there are subsequent slashes, they will + // be ignored as we change our state to State::Body. + self.path = Path::new(&self.path.inner[1..]); + return Some(Component::RootDir); + } else if self.include_cur_dir() { + // Trim the dot. + self.path = Path::new(&self.path.inner[1..]); + return Some(Component::CurDir); + } + } + State::Body if !self.path.inner.is_empty() => { + let (rest, component) = self.peek(); + self.path = rest; + if component.is_some() { + return component; + } + } + State::Body => { + self.front = State::Done; + } + State::Done => unreachable!(), + } + } + None + } +} + +impl<'a> DoubleEndedIterator for Components<'a> { + #[inline] + fn next_back(&mut self) -> Option { + while !self.finished() { + match self.back { + State::Body if self.path.inner.len() > self.len_before_body() => { + let (rest, component) = self.peek_back(); + self.path = rest; + if component.is_some() { + return component; + } + } + State::Body => { + self.back = State::StartDir; + } + State::StartDir => { + self.back = State::Done; + if self.has_root() { + self.path = Path::new(&self.path.inner[..self.path.inner.len() - 1]); + return Some(Component::RootDir); + } else if self.include_cur_dir() { + self.path = Path::new(&self.path.inner[..self.path.inner.len() - 1]); + return Some(Component::CurDir); + } + } + State::Done => unreachable!(), + } + } + None + } +} + +impl<'a> Components<'a> { + pub(crate) fn new(path: &'a Path) -> Self { + Self { + path, + front: State::StartDir, + back: State::Body, + } + } + + /// Extracts a slice corresponding to the portion of the path remaining for + /// iteration. + #[inline] + pub fn as_path(&self) -> &'a Path { + let mut components = self.clone(); + if components.front == State::Body { + components.trim_left(); + } + if components.back == State::Body { + components.trim_right(); + } + components.path + } + + fn include_cur_dir(&self) -> bool { + self.path == ".".as_ref() || self.path.inner.starts_with(CURRENT_DIR_WITH_SEPARATOR) + } + + fn has_root(&self) -> bool { + self.path.inner.starts_with(SEPARATOR) + } + + fn len_before_body(&self) -> usize { + let root = if self.front == State::StartDir && self.has_root() { + 1 + } else { + 0 + }; + let cur_dir = if self.front == State::StartDir && self.include_cur_dir() { + 1 + } else { + 0 + }; + root + cur_dir + } + + fn trim_left(&mut self) { + while !self.path.inner.is_empty() { + let (rest, comp) = self.peek(); + if comp.is_some() { + return; + } else { + self.path = rest; + } + } + } + + fn peek(&self) -> (&'a Path, Option>) { + match self.path.inner.split_once(SEPARATOR) { + Some((next, rest)) => (Path::new(rest), component(next)), + None => (Path::new(""), component(self.path.as_ref())), + } + } + + fn trim_right(&mut self) { + while self.path.inner.len() > self.len_before_body() { + let (rest, comp) = self.peek_back(); + if comp.is_some() { + return; + } else { + self.path = rest; + } + } + } + + fn peek_back(&self) -> (&'a Path, Option>) { + match self.path.inner[self.len_before_body()..].rsplit_once(SEPARATOR) { + Some((rest, next)) => ( + Path::new(&self.path.inner[..(self.len_before_body() + rest.len())]), + component(next), + ), + None => ( + Path::new(&self.path.inner[..self.len_before_body()]), + component(&self.path.inner[self.len_before_body()..]), + ), + } + } + + fn finished(&self) -> bool { + self.front == State::Done || self.back == State::Done || self.front > self.back + } +} + +fn component(component: &str) -> Option> { + match component { + "." | "" => None, + ".." => Some(Component::ParentDir), + _ => Some(Component::Normal(component)), + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum Component<'a> { + RootDir, + CurDir, + ParentDir, + Normal(&'a str), +} + +impl<'a> AsRef for Component<'a> { + #[inline] + fn as_ref(&self) -> &'a Path { + // TODO: Why is this a lifetime error? + // Path::new(AsRef::::as_ref(self)) + match self { + Component::RootDir => Path::new(SEPARATOR_STR), + Component::CurDir => Path::new("."), + Component::ParentDir => Path::new(".."), + Component::Normal(path) => Path::new(*path), + } + } +} + +impl<'a> AsRef for Component<'a> { + #[inline] + fn as_ref(&self) -> &'a str { + match self { + Component::RootDir => SEPARATOR_STR, + Component::CurDir => ".", + Component::ParentDir => "..", + Component::Normal(path) => path, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_components_iter_front() { + let mut components = Path::new("/tmp/foo/bar.txt").components(); + assert_eq!(components.as_path(), "/tmp/foo/bar.txt".as_ref()); + assert_eq!(components.next(), Some(Component::RootDir)); + assert_eq!(components.as_path(), "tmp/foo/bar.txt".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("tmp"))); + assert_eq!(components.as_path(), "foo/bar.txt".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "bar.txt".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("bar.txt"))); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next(), None); + + let mut components = Path::new("//tmp//../foo/./").components(); + assert_eq!(components.as_path(), "//tmp//../foo".as_ref()); + assert_eq!(components.next(), Some(Component::RootDir)); + assert_eq!(components.as_path(), "tmp//../foo".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("tmp"))); + assert_eq!(components.as_path(), "../foo".as_ref()); + assert_eq!(components.next(), Some(Component::ParentDir)); + assert_eq!(components.as_path(), "foo".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next(), None); + + let mut components = Path::new("..//./foo").components(); + assert_eq!(components.as_path(), "..//./foo".as_ref()); + assert_eq!(components.next(), Some(Component::ParentDir)); + assert_eq!(components.as_path(), "foo".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next(), None); + } + + #[test] + fn test_components_iter_back() { + let mut components = Path::new("/tmp/foo/bar.txt").components(); + assert_eq!(components.as_path(), "/tmp/foo/bar.txt".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("bar.txt"))); + assert_eq!(components.as_path(), "/tmp/foo".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "/tmp".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("tmp"))); + assert_eq!(components.as_path(), "/".as_ref()); + assert_eq!(components.next_back(), Some(Component::RootDir)); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next_back(), None); + + let mut components = Path::new("//tmp//../foo/./").components(); + assert_eq!(components.as_path(), "//tmp//../foo".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "//tmp//..".as_ref()); + assert_eq!(components.next_back(), Some(Component::ParentDir)); + assert_eq!(components.as_path(), "//tmp".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("tmp"))); + assert_eq!(components.as_path(), "/".as_ref()); + assert_eq!(components.next_back(), Some(Component::RootDir)); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next_back(), None); + + let mut components = Path::new("..//./foo").components(); + assert_eq!(components.as_path(), "..//./foo".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "..".as_ref()); + assert_eq!(components.next_back(), Some(Component::ParentDir)); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next_back(), None); + } + + #[test] + fn test_components_iter_front_back() { + let mut components = Path::new("/tmp/foo/bar.txt").components(); + assert_eq!(components.as_path(), "/tmp/foo/bar.txt".as_ref()); + assert_eq!(components.next(), Some(Component::RootDir)); + assert_eq!(components.as_path(), "tmp/foo/bar.txt".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("bar.txt"))); + assert_eq!(components.as_path(), "tmp/foo".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("tmp"))); + assert_eq!(components.as_path(), "foo".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next_back(), None); + + let mut components = Path::new("//tmp//../foo/./").components(); + assert_eq!(components.as_path(), "//tmp//../foo".as_ref()); + assert_eq!(components.next(), Some(Component::RootDir)); + assert_eq!(components.as_path(), "tmp//../foo".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "tmp//..".as_ref()); + assert_eq!(components.next_back(), Some(Component::ParentDir)); + assert_eq!(components.as_path(), "tmp".as_ref()); + assert_eq!(components.next(), Some(Component::Normal("tmp"))); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next_back(), None); + + let mut components = Path::new("..//./foo").components(); + assert_eq!(components.as_path(), "..//./foo".as_ref()); + assert_eq!(components.next_back(), Some(Component::Normal("foo"))); + assert_eq!(components.as_path(), "..".as_ref()); + assert_eq!(components.next(), Some(Component::ParentDir)); + assert_eq!(components.as_path(), "".as_ref()); + assert_eq!(components.next_back(), None); + } +} diff --git a/kernel/path/src/lib.rs b/kernel/path/src/lib.rs index dfc9a431d0..3791ee497a 100644 --- a/kernel/path/src/lib.rs +++ b/kernel/path/src/lib.rs @@ -1,279 +1,628 @@ +//! File system paths. +//! +//! This crate is designed to mimic `std::path` and as such, much of the +//! documentation and implementation is the same. + #![no_std] -/// This crate contains all the necessary functions for navigating the virtual filesystem / obtaining specific -/// directories via the Path struct -// #[macro_use] extern crate log; + extern crate alloc; -extern crate spin; -extern crate fs_node; -extern crate root; +mod component; + +use alloc::{borrow::ToOwned, string::String, vec, vec::Vec}; use core::{ - fmt, - fmt::Write, + borrow::Borrow, + fmt::{self, Display}, ops::{Deref, DerefMut}, }; -use alloc::{ - string::{String, ToString}, - vec::Vec, - sync::Arc, -}; -use fs_node::{FileOrDir, FileRef, DirRef}; -pub const PATH_DELIMITER: &str = "/"; -pub const EXTENSION_DELIMITER: &str = "."; +pub use component::{Component, Components}; - -/// A structure that represents a relative or absolute path -/// to a file or directory. -#[derive(Debug, Clone, Eq, PartialEq, Hash)] +/// A slice of a path. +/// +/// This type is just a wrapper around a [`str`]. +#[derive(Debug, Eq, Ord, PartialEq, PartialOrd)] +#[repr(transparent)] pub struct Path { - path: String + inner: str, +} + +impl AsRef for Path { + #[inline] + fn as_ref(&self) -> &Path { + self + } +} + +impl AsMut for Path { + #[inline] + fn as_mut(&mut self) -> &mut Path { + self + } } -impl Deref for Path { - type Target = String; +impl AsRef for Path { + #[inline] + fn as_ref(&self) -> &str { + &self.inner + } +} - fn deref(&self) -> &String { - &self.path +impl AsMut for Path { + #[inline] + fn as_mut(&mut self) -> &mut str { + &mut self.inner } } -impl DerefMut for Path { - fn deref_mut(&mut self) -> &mut String { - &mut self.path + +impl AsRef for str { + #[inline] + fn as_ref(&self) -> &Path { + Path::new(self) } } -impl fmt::Display for Path { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "{}", self.path) +impl AsMut for str { + #[inline] + fn as_mut(&mut self) -> &mut Path { + // SAFETY: Path has the same type layout as str. This is the same + // implementation as std: https://github.com/rust-lang/rust/blob/f654229c27267334023a22233795b88b75fc340e/library/std/src/path.rs#L2047 + unsafe { &mut *(self as *mut str as *mut Path) } } } -impl From for Path { +impl AsRef for String { #[inline] - fn from(path: String) -> Self { - Path { path } + fn as_ref(&self) -> &Path { + self[..].as_ref() } } -impl From for String { +impl AsMut for String { #[inline] - fn from(path: Path) -> String { - path.path + fn as_mut(&mut self) -> &mut Path { + self[..].as_mut() + } +} + +impl Display for Path { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl ToOwned for Path { + type Owned = PathBuf; + + #[inline] + fn to_owned(&self) -> Self::Owned { + PathBuf { + inner: self.inner.to_owned(), + } } } impl Path { - /// Creates a new `Path` from the given String. - pub fn new(path: String) -> Self { - Path { path } + /// Wraps a string slice as a path slice. + /// + /// This is a cost-free conversion. + #[inline] + pub fn new(s: &S) -> &Self + where + S: AsRef + ?Sized, + { + // SAFETY: Path has the same type layout as str. This is the same + // implementation as std: https://github.com/rust-lang/rust/blob/f654229c27267334023a22233795b88b75fc340e/library/std/src/path.rs#L2041 + unsafe { &*(s.as_ref() as *const str as *const Path) } } - - /// Returns an iterator over the components of this `Path`, - /// split by the path delimiter `"/"`. - pub fn components(&self) -> impl Iterator { - self.path.split(PATH_DELIMITER) - .filter(|&x| !x.is_empty()) + + /// Produces an iterator over the [`Component`]s of the path. + /// + /// When parsing the path there is a small amount of normalization: + /// - Repeated separators are ignored, so `a/b` and `a//b` both have `a` and + /// `b` as components. + /// - Occurrences of `.` are normalized away, except if they are at the + /// beginning of the path. For example, `a/./b`, `a/b/`, `a/b/.` and `a/b` + /// all have `a` and `b` as components, but `./a/b` starts with an + /// additional [`CurDir`] component. + /// - A trailing slash is normalized away, `/a/b` and `/a/b/` are + /// equivalent. + /// + /// # Examples + /// + /// ``` + /// # use path::{Component, Path}; + /// let mut components = Path::new("/tmp/foo.txt").components(); + /// + /// assert_eq!(components.next(), Some(Component::RootDir)); + /// assert_eq!(components.next(), Some(Component::Normal("tmp"))); + /// assert_eq!(components.next(), Some(Component::Normal("foo.txt"))); + /// assert_eq!(components.next(), None) + /// ``` + /// + /// [`CurDir`]: Component::CurDir + #[inline] + pub fn components(&self) -> Components<'_> { + Components::new(self) } - /// Returns a reverse iterator over the components of this `Path`, - /// split by the path delimiter `"/"`. - pub fn rcomponents(&self) -> impl Iterator { - self.path.rsplit(PATH_DELIMITER) - .filter(|&x| !x.is_empty()) + /// Returns true if the path starts with the root. + /// + /// # Examples + /// + /// ``` + /// # use path::Path; + /// assert!(Path::new("/foo.txt").is_absolute()); + /// assert!(!Path::new("foo.txt").is_absolute()); + /// ``` + #[inline] + pub fn is_absolute(&self) -> bool { + self.inner.starts_with('/') } - /// Returns just the file name, i.e., the trailling component of the path. + /// Creates an owned [`PathBuf`] with `path` adjoined to `self`. + /// + /// If `path` is absolute, it replaces the current path. + /// /// # Examples - /// `"/path/to/my/file.a"` -> "file.a" - /// `"my/file.a"` -> "file.a" - /// `"file.a"` -> "file.a" - pub fn basename(&self) -> &str { - self.rcomponents() - .next() - .unwrap_or(&self.path) - } - - /// Like [`basename()`](#method.basename), but excludes the file extension, if present. - pub fn file_stem(&self) -> &str { - self.basename() - .split(EXTENSION_DELIMITER) - .find(|&x| !x.is_empty()) - .unwrap_or(&self.path) - } - - /// Returns the file extension, if present. - /// If there are multiple extensions as defined by the extension delimiter, `'.'`, - /// then the last one will be treated as the extension. - pub fn extension(&self) -> Option<&str> { - self.basename() - .rsplit(EXTENSION_DELIMITER) - .find(|&x| !x.is_empty()) - } - - /// Returns a canonical and absolute form of the current path (i.e. the path of the working directory) - /// TODO: FIXME: this doesn't work if the `current_path` is absolute. - #[allow(dead_code)] - fn canonicalize(&self, current_path: &Path) -> Path { - let mut new_components = Vec::new(); - // Push the components of the working directory to the components of the new path - new_components.extend(current_path.components()); - // Push components of the path to the components of the new path - for component in self.components() { - if component == "." { - continue; - } else if component == ".." { - new_components.pop(); - } else { - new_components.push(component); - } - } - // Create the new path from its components - let mut new_path = String::new(); - let mut first_cmpnt = true; - for component in new_components { - if first_cmpnt { - new_path.push_str(component); - first_cmpnt = false; - } - else { - write!(new_path, "/{component}").expect("Failed to create new path from its components"); - } - } - Path::new(new_path) - } - - /// Returns a `Path` that expresses a relative path from this `Path` (`self`) - /// to the given `other` `Path`. - // An example algorithm: https://docs.rs/pathdiff/0.1.0/src/pathdiff/lib.rs.html#32-74 - pub fn relative(&self, other: &Path) -> Option { - let mut ita_iter = self.components(); - let mut itb_iter = other.components(); - let mut comps: Vec = Vec::new(); - loop { - match (ita_iter.next(), itb_iter.next()) { - (None, None) => break, - (Some(a), None) => { - comps.push(a.to_string()); - for remaining_a in ita_iter { - comps.push(remaining_a.to_string()); - } - break; - } - (None, _) => comps.push("..".to_string()), - (Some(ref a), Some(ref b)) if comps.is_empty() && a == b => continue, - (Some(_a), Some(ref b)) if b == &".".to_string() => comps.push("..".to_string()), - (Some(_), Some(ref b)) if b == &"..".to_string() => return None, - (Some(a), Some(_)) => { - comps.push("..".to_string()); - for _ in itb_iter { - comps.push("..".to_string()); - } - comps.push(a.to_string()); - for remaining_a in ita_iter { - comps.push(remaining_a.to_string()); - } - break; - } + /// + /// ``` + /// # use path::{Path, PathBuf}; + /// assert_eq!( + /// Path::new("/etc").join("passwd"), + /// PathBuf::from("/etc/passwd") + /// ); + /// assert_eq!(Path::new("/etc").join("/bin/sh"), PathBuf::from("/bin/sh")); + /// ``` + #[inline] + pub fn join

(&self, path: P) -> PathBuf + where + P: AsRef, + { + let mut buf = self.to_owned(); + buf.push(path); + buf + } + + /// Returns the path without its final component, if there is one. + /// + /// # Examples + /// + /// ``` + /// # use path::Path; + /// let path = Path::new("/foo/bar"); + /// let parent = path.parent().unwrap(); + /// assert_eq!(parent, Path::new("/foo")); + /// + /// let grand_parent = parent.parent().unwrap(); + /// assert_eq!(grand_parent, Path::new("/")); + /// assert_eq!(grand_parent.parent(), None); + /// + /// let relative_path = Path::new("foo/bar"); + /// let parent = relative_path.parent(); + /// assert_eq!(parent, Some(Path::new("foo"))); + /// let grand_parent = parent.and_then(Path::parent); + /// assert_eq!(grand_parent, Some(Path::new(""))); + /// assert_eq!(grand_parent, Some(Path::new(""))); + /// let great_grand_parent = grand_parent.and_then(Path::parent); + /// assert_eq!(great_grand_parent, None); + /// ``` + #[inline] + pub fn parent(&self) -> Option<&Self> { + let mut components = self.components(); + + let component = components.next_back(); + component.and_then(|p| match p { + Component::Normal(_) | Component::CurDir | Component::ParentDir => { + Some(components.as_path()) } - } - // Create the new path from its components - let mut new_path = String::new(); - for component in comps.iter() { - write!(new_path, "{component}/").expect("Failed to create new path from its components"); - } - // Remove the trailing slash after the final path component - new_path.pop(); - Some(Path::new(new_path)) + _ => None, + }) } - - /// Returns a boolean indicating whether this Path is absolute, - /// i.e., whether it starts with the root directory. - pub fn is_absolute(&self) -> bool { - self.path.starts_with(PATH_DELIMITER) + + /// Returns the final component of the `Path`, if there is one. + /// + /// If the path is a normal file, this is the file name. If it's the path of + /// a directory, this is the directory name. + /// + /// Returns [`None`] if the path terminates in `..`. + /// + /// # Examples + /// + /// ``` + /// # use path::Path; + /// assert_eq!(Some("bin"), Path::new("/usr/bin/").file_name()); + /// assert_eq!(Some("foo.txt"), Path::new("tmp/foo.txt").file_name()); + /// assert_eq!(Some("foo.txt"), Path::new("foo.txt/.").file_name()); + /// assert_eq!(Some("foo.txt"), Path::new("foo.txt/.//").file_name()); + /// assert_eq!(None, Path::new("foo.txt/..").file_name()); + /// assert_eq!(None, Path::new("/").file_name()); + /// ``` + #[inline] + pub fn file_name(&self) -> Option<&str> { + self.components().next_back().and_then(|p| match p { + Component::Normal(p) => Some(p), + _ => None, + }) } - /// Returns the file or directory specified by the given path, - /// which can either be absolute or relative from the given starting directory. - pub fn get(&self, starting_dir: &DirRef) -> Option { - // let current_path = { Path::new(starting_dir.lock().get_absolute_path()) }; - let mut curr_dir = { - if self.is_absolute() { - Arc::clone(root::get_root()) + /// Extracts the stem (non-extension) portion of [`self.file_name`]. + /// + /// [`self.file_name`]: Path::file_name + /// + /// The stem is: + /// + /// - [`None`], if there is no file name; + /// - The entire file name if there is no embedded `.`; + /// - The entire file name if the file name begins with `.` and has no other + /// `.`s within; + /// - Otherwise, the portion of the file name before the final `.` + /// + /// # Examples + /// + /// ``` + /// # use path::Path; + /// assert_eq!("foo", Path::new("foo.rs").file_stem().unwrap()); + /// assert_eq!(".foo", Path::new(".foo").file_stem().unwrap()); + /// assert_eq!("foo.tar", Path::new("foo.tar.gz").file_stem().unwrap()); + /// ``` + #[inline] + pub fn file_stem(&self) -> Option<&str> { + self.file_name().map(|name| match name.rsplit_once('.') { + Some((before, _)) => { + if before.is_empty() { + // The file starts with a `.` and has no other `.`s within. + name + } else { + before + } } - else { - Arc::clone(starting_dir) + None => name, + }) + } + + // TODO: Move out of path crate. + + /// Returns the file or directory at the given path. + /// + /// The path can be relative or absolute. + /// + /// If the path does not point to a file system object, `None` is returned. + #[inline] + pub fn get(&self, cwd: &fs_node::DirRef) -> Option { + let mut iter = self.components().peekable(); + let mut current = match iter.peek() { + Some(Component::RootDir) => { + iter.next(); + root::get_root().clone() } + _ => cwd.clone(), }; - for component in self.components() { + while let Some(component) = iter.next() { match component { - "." => { - // stay in the current directory, do nothing. + Component::RootDir => current = root::get_root().clone(), + Component::CurDir => {} + Component::ParentDir => { + let temp = current.lock().get_parent_dir()?; + current = temp; } - ".." => { - // navigate to parent directory - let parent_dir = curr_dir.lock().get_parent_dir()?; - curr_dir = parent_dir; - } - cmpnt => { - // navigate to child directory, or return the child file - let child_dir = match curr_dir.lock().get(cmpnt) { - Some(FileOrDir::File(f)) => return Some(FileOrDir::File(f)), - Some(FileOrDir::Dir(d)) => d, - None => return None, - }; - curr_dir = child_dir; + Component::Normal(name) => { + if iter.peek().is_none() { + return current.lock().get(name); + } else { + let temp = match current.lock().get(name) { + Some(fs_node::FileOrDir::Dir(directory)) => directory, + // Path didn't exist or had a file in the middle e.g. /dir/file/dir + _ => return None, + }; + current = temp; + } } } } - Some(FileOrDir::Dir(curr_dir)) + + Some(fs_node::FileOrDir::Dir(current)) } - /// Returns the file specified by the given path, which can be either absolute, - /// or relative from the given starting directory. + // TODO: Move out of path crate. + /// Returns the file at the given path. /// - /// If the path is invalid or points to a directory, then `None` is returned. - pub fn get_file(&self, starting_dir: &DirRef) -> Option { - match self.get(starting_dir) { - Some(FileOrDir::File(file)) => Some(file), + /// The path can be relative or absolute. + /// + /// If the path does not point to a file, `None` is returned. + #[inline] + pub fn get_file(&self, cwd: &fs_node::DirRef) -> Option { + match self.get(cwd) { + Some(fs_node::FileOrDir::File(file)) => Some(file), _ => None, } } - /// Returns the file specified by the given path, which can be either absolute, - /// or relative from the given starting directory. + // TODO: Move out of path crate. + /// Returns the directory at the given path. + /// + /// The path can be relative or absolute. /// - /// If the path is invalid or points to a directory, then `None` is returned. - pub fn get_dir(&self, starting_dir: &DirRef) -> Option { - match self.get(starting_dir) { - Some(FileOrDir::Dir(dir)) => Some(dir), + /// If the path does not point to a directory, `None` is returned. + #[inline] + pub fn get_dir(&self, cwd: &fs_node::DirRef) -> Option { + match self.get(cwd) { + Some(fs_node::FileOrDir::Dir(dir)) => Some(dir), _ => None, } } - /// Returns the file or directory specified by the given absolute path - pub fn get_absolute(path: &Path) -> Option { + // TODO: Move out of path crate. + /// Returns the file or directory at the given absolute path. + /// + /// If the path does not point to a file system object or the path is + /// relative, `None` is returned. + #[inline] + pub fn get_absolute(path: &Path) -> Option { if path.is_absolute() { path.get(root::get_root()) } else { None } } + + /// Construct a relative path from a provided base directory path to the + /// provided path. + #[inline] + pub fn relative

(&self, base: P) -> Option + where + P: AsRef, + { + let base = base.as_ref(); + + if self.is_absolute() != base.is_absolute() { + if self.is_absolute() { + Some(self.to_owned()) + } else { + None + } + } else { + let mut ita = self.components(); + let mut itb = base.components(); + let mut comps: Vec = vec![]; + loop { + match (ita.next(), itb.next()) { + (None, None) => break, + (Some(a), None) => { + comps.push(a); + comps.extend(ita.by_ref()); + break; + } + (None, _) => comps.push(Component::ParentDir), + (Some(a), Some(b)) if comps.is_empty() && a == b => (), + (Some(a), Some(b)) if b == Component::CurDir => comps.push(a), + (Some(_), Some(b)) if b == Component::ParentDir => return None, + (Some(a), Some(_)) => { + comps.push(Component::ParentDir); + for _ in itb { + comps.push(Component::ParentDir); + } + comps.push(a); + comps.extend(ita.by_ref()); + break; + } + } + } + Some(comps.iter().map(|c| -> &Path { c.as_ref() }).collect()) + } + } + + /// Extracts the extension (without the leading dot) of [`self.file_name`], + /// if possible. + /// + /// The extension is: + /// + /// - [`None`], if there is no file name; + /// - [`None`], if there is no embedded `.`; + /// - [`None`], if the file name begins with `.` and has no other `.`s + /// within; + /// - Otherwise, the portion of the file name after the final `.` + /// + /// [`self.file_name`]: Path::file_name + /// + /// # Examples + /// + /// ``` + /// # use path::Path; + /// assert_eq!(None, Path::new("foo").extension()); + /// assert_eq!(None, Path::new(".foo").extension()); + /// assert_eq!("rs", Path::new("foo.rs").extension().unwrap()); + /// assert_eq!("gz", Path::new("foo.tar.gz").extension().unwrap()); + /// ``` + #[inline] + pub fn extension(&self) -> Option<&str> { + self.file_name() + .and_then(|file_name| file_name.rsplit_once('.')) + .and_then(|(before, after)| if before.is_empty() { None } else { Some(after) }) + } } -pub enum PathComponent { - RootDir, - ParentDir, - CurrentDir, +/// An owned, mutable path. +/// +/// This type is just a wrapper around a [`String`]. +#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct PathBuf { + inner: String, +} + +impl AsRef for PathBuf { + #[inline] + fn as_ref(&self) -> &str { + AsRef::::as_ref(self).as_ref() + } +} + +impl AsRef for PathBuf { + #[inline] + fn as_ref(&self) -> &Path { + self.deref() + } +} + +impl Borrow for PathBuf { + #[inline] + fn borrow(&self) -> &Path { + self.deref() + } +} + +impl Default for PathBuf { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl Deref for PathBuf { + type Target = Path; + + #[inline] + fn deref(&self) -> &Self::Target { + self.inner.deref().as_ref() + } +} + +impl DerefMut for PathBuf { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner.deref_mut().as_mut() + } } -impl PathComponent { - pub fn as_string(self) -> String { - match self { - PathComponent::RootDir => String::from(root::ROOT_DIRECTORY_NAME), - PathComponent::CurrentDir => String::from("."), - PathComponent::ParentDir => String::from(".."), +impl Display for PathBuf { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl From for PathBuf { + #[inline] + fn from(value: String) -> Self { + Self { inner: value } + } +} + +impl From for String { + #[inline] + fn from(value: PathBuf) -> Self { + value.inner + } +} + +impl From<&T> for PathBuf +where + T: ?Sized + AsRef, +{ + fn from(value: &T) -> Self { + Self { + inner: value.as_ref().to_owned(), + } + } +} + +impl

FromIterator

for PathBuf +where + P: AsRef, +{ + #[inline] + fn from_iter(iter: T) -> Self + where + T: IntoIterator, + { + let mut inner = String::new(); + let mut iter = iter.into_iter().peekable(); + while let Some(path) = iter.next() { + inner.push_str(path.as_ref().as_ref()); + if iter.peek().is_some() { + inner.push('/'); + } + } + Self { inner } + } +} + +impl PathBuf { + /// Allocates an empty `PathBuf`. + #[inline] + pub fn new() -> Self { + Self { + inner: String::new(), + } + } + + /// Extends self with path. + /// + /// If path is absolute, it replaces the current path. + /// + /// # Examples + /// + /// Pushing a relative path extends the existing path: + /// + /// ``` + /// use std::path::PathBuf; + /// + /// let mut path = PathBuf::from("/tmp"); + /// path.push("file.bk"); + /// assert_eq!(path, PathBuf::from("/tmp/file.bk")); + /// ``` + /// + /// Pushing an absolute path replaces the existing path: + /// + /// ``` + /// use std::path::PathBuf; + /// + /// let mut path = PathBuf::from("/tmp"); + /// path.push("/etc"); + /// assert_eq!(path, PathBuf::from("/etc")); + /// ``` + #[inline] + pub fn push

(&mut self, path: P) + where + P: AsRef, + { + if path.as_ref().is_absolute() { + *self = path.as_ref().to_owned(); + } else { + self.inner.push('/'); + self.inner.push_str(path.as_ref().as_ref()); } } -} \ No newline at end of file + + /// Truncates `self` to [`self.parent`]. + /// + /// Returns `false` and does nothing if [`self.parent`] is [`None`]. + /// Otherwise, returns `true`. + /// + /// [`self.parent`]: Path::parent + /// + /// # Examples + /// + /// ``` + /// use std::path::{Path, PathBuf}; + /// + /// let mut p = PathBuf::from("/spirited/away.rs"); + /// + /// p.pop(); + /// assert_eq!(Path::new("/spirited"), p); + /// p.pop(); + /// assert_eq!(Path::new("/"), p); + /// ``` + #[inline] + pub fn pop(&mut self) -> bool { + match self.parent().map(|p| p.inner.len()) { + Some(len) => { + self.inner.truncate(len); + true + } + None => false, + } + } +} diff --git a/kernel/spawn/src/lib.rs b/kernel/spawn/src/lib.rs index d0ff408b87..ba5ee60d77 100755 --- a/kernel/spawn/src/lib.rs +++ b/kernel/spawn/src/lib.rs @@ -33,7 +33,7 @@ use stack::Stack; use task::{Task, TaskRef, RestartInfo, RunState, JoinableTaskRef, ExitableTaskRef, FailureCleanupFunction}; use task_struct::ExposedTask; use mod_mgmt::{CrateNamespace, SectionType, SECTION_HASH_DELIMITER}; -use path::Path; +use path::{Path, PathBuf}; use fs_node::FileOrDir; use preemption::{hold_preemption, PreemptionGuard}; use no_drop::NoDrop; @@ -213,7 +213,7 @@ type MainFunc = fn(MainFuncArg) -> MainFuncRet; /// If not provided, the new Task will be spawned within the same namespace as the current task. /// pub fn new_application_task_builder( - crate_object_file: Path, // TODO FIXME: use `mod_mgmt::IntoCrateObjectFile`, + crate_object_file: &Path, // TODO FIXME: use `mod_mgmt::IntoCrateObjectFile`, new_namespace: Option>, ) -> Result, &'static str> { @@ -222,7 +222,7 @@ pub fn new_application_task_builder( .ok_or("spawn::new_application_task_builder(): couldn't get current task")?; let crate_object_file = match crate_object_file.get(namespace.dir()) - .or_else(|| Path::new(format!("{}.o", &crate_object_file)).get(namespace.dir())) // retry with ".o" extension + .or_else(|| PathBuf::from(format!("{}.o", &crate_object_file)).get(namespace.dir())) // retry with ".o" extension { Some(FileOrDir::File(f)) => f, _ => return Err("Couldn't find specified file path for new application crate"), diff --git a/kernel/task_fs/src/lib.rs b/kernel/task_fs/src/lib.rs index 80dcced539..2d76b06837 100644 --- a/kernel/task_fs/src/lib.rs +++ b/kernel/task_fs/src/lib.rs @@ -42,7 +42,7 @@ use alloc::sync::Arc; use fs_node::{DirRef, WeakDirRef, Directory, FileOrDir, File, FileRef, FsNode}; use memory::MappedPages; use task::WeakTaskRef; -use path::Path; +use path::{Path, PathBuf}; use io::{ByteReader, ByteWriter, KnownLength, IoError}; @@ -146,7 +146,7 @@ pub struct TaskDir { /// The name of the directory pub name: String, /// The absolute path of the TaskDir - path: Path, + path: PathBuf, task_id: usize, taskref: WeakTaskRef, /// We can store the parent (TaskFs) because it is a persistent directory @@ -163,7 +163,7 @@ impl TaskDir { ) -> Result { let directory = TaskDir { name, - path: Path::new(format!("{TASKS_DIRECTORY_PATH}/{task_id}")), + path: PathBuf::from(format!("{TASKS_DIRECTORY_PATH}/{task_id}")), task_id, taskref, parent: Arc::clone(parent), @@ -227,7 +227,7 @@ impl FsNode for TaskDir { pub struct TaskFile { taskref: WeakTaskRef, task_id: usize, - path: Path, + path: PathBuf, } impl TaskFile { @@ -235,7 +235,7 @@ impl TaskFile { TaskFile { taskref, task_id, - path: Path::new(format!("{TASKS_DIRECTORY_PATH}/{task_id}/task_info")), + path: PathBuf::from(format!("{TASKS_DIRECTORY_PATH}/{task_id}/task_info")), } } @@ -280,7 +280,7 @@ impl FsNode for TaskFile { } fn get_parent_dir(&self) -> Option { - let path = Path::new(format!("{}/{}", TASKS_DIRECTORY_PATH, self.task_id)); + let path = PathBuf::from(format!("{}/{}", TASKS_DIRECTORY_PATH, self.task_id)); match Path::get_absolute(&path) { Some(FileOrDir::Dir(d)) => Some(d), _ => None, @@ -333,7 +333,7 @@ impl File for TaskFile { pub struct MmiDir { taskref: WeakTaskRef, task_id: usize, - path: Path, + path: PathBuf, } impl MmiDir { @@ -342,7 +342,7 @@ impl MmiDir { MmiDir { taskref, task_id, - path: Path::new(format!("{TASKS_DIRECTORY_PATH}/{task_id}/mmi")), + path: PathBuf::from(format!("{TASKS_DIRECTORY_PATH}/{task_id}/mmi")), } } } @@ -383,7 +383,7 @@ impl FsNode for MmiDir { } fn get_parent_dir(&self) -> Option { - let path = Path::new(format!("{}/{}", TASKS_DIRECTORY_PATH, self.task_id)); + let path = PathBuf::from(format!("{}/{}", TASKS_DIRECTORY_PATH, self.task_id)); match Path::get_absolute(&path) { Some(FileOrDir::Dir(d)) => Some(d), _ => None, @@ -402,7 +402,7 @@ impl FsNode for MmiDir { pub struct MmiFile { taskref: WeakTaskRef, task_id: usize, - path: Path, + path: PathBuf, } impl MmiFile { @@ -410,7 +410,7 @@ impl MmiFile { MmiFile { taskref, task_id, - path: Path::new(format!("{TASKS_DIRECTORY_PATH}/{task_id}/mmi/MmiInfo")), + path: PathBuf::from(format!("{TASKS_DIRECTORY_PATH}/{task_id}/mmi/MmiInfo")), } } @@ -434,7 +434,7 @@ impl FsNode for MmiFile { } fn get_parent_dir(&self) -> Option { - let path = Path::new(format!("{}/{}/mmi", TASKS_DIRECTORY_PATH, self.task_id)); + let path = PathBuf::from(format!("{}/{}/mmi", TASKS_DIRECTORY_PATH, self.task_id)); match Path::get_absolute(&path) { Some(FileOrDir::Dir(d)) => Some(d), _ => None, diff --git a/kernel/wasi_interpreter/src/posix_file_system.rs b/kernel/wasi_interpreter/src/posix_file_system.rs index bcd1ea9829..8484e339b1 100644 --- a/kernel/wasi_interpreter/src/posix_file_system.rs +++ b/kernel/wasi_interpreter/src/posix_file_system.rs @@ -8,12 +8,11 @@ //! use alloc::string::String; -use alloc::vec::Vec; use core::{cmp, convert::TryFrom as _}; use fs_node::{DirRef, FileOrDir, FileRef, FsNode}; use hashbrown::HashMap; use memfs::MemFile; -use path::Path; +use path::{PathBuf, Path}; const FIRST_NONRESERVED_FD: wasi::Fd = 3; @@ -146,12 +145,12 @@ impl PosixNode { /// # Return /// Returns relative path of file descriptor as a string. pub fn get_relative_path(&self) -> String { - let absolute_path = Path::new(self.theseus_file_or_dir.get_absolute_path()); + let absolute_path = PathBuf::from(self.theseus_file_or_dir.get_absolute_path()); let wd_path = task::with_current_task(|t| - Path::new(t.get_env().lock().cwd()) + PathBuf::from(t.get_env().lock().cwd()) ).expect("couldn't get current task"); - let relative_path: Path = absolute_path.relative(&wd_path).unwrap(); + let relative_path = absolute_path.relative(wd_path).unwrap(); String::from(relative_path) } @@ -381,12 +380,10 @@ impl FileDescriptorTable { } // Split path into parent directory path and base path. - let file_path: Path = Path::new(String::from(path)); - let mut file_path_tokens: Vec<&str> = file_path.components().collect(); - file_path_tokens.truncate(file_path_tokens.len().saturating_sub(1)); - let parent_dir_path: Path = Path::new(file_path_tokens.join("/")); - let base_name: &str = file_path.basename(); - let base_path: Path = Path::new(String::from(base_name)); + let file_path: &Path = path.as_ref(); + let parent_dir_path = file_path.parent().ok_or(wasi::ERRNO_NOENT)?; + let base_name = file_path.file_name().ok_or(wasi::ERRNO_NOENT)?; + let base_path: &Path = base_name.as_ref(); // Get parent directory. let parent_dir: DirRef = match parent_dir_path.get(&starting_dir) { diff --git a/kernel/window_manager/src/lib.rs b/kernel/window_manager/src/lib.rs index 39d4ec8103..005627557b 100644 --- a/kernel/window_manager/src/lib.rs +++ b/kernel/window_manager/src/lib.rs @@ -31,18 +31,17 @@ extern crate color; use alloc::collections::VecDeque; use alloc::string::ToString; use alloc::sync::{Arc, Weak}; -use alloc::vec::{Vec}; +use alloc::vec::Vec; use compositor::{Compositor, FramebufferUpdates, CompositableRegion}; use mpmc::Queue; use event_types::{Event, MousePositionEvent}; use framebuffer::{Framebuffer, AlphaPixel}; -use color::{Color}; +use color::Color; use shapes::{Coord, Rectangle}; use framebuffer_compositor::{FRAME_COMPOSITOR}; use keycodes_ascii::{KeyAction, KeyEvent, Keycode}; use mouse_data::MouseEvent; -use path::Path; use spin::{Mutex, Once}; use window_inner::{WindowInner, WindowMovingStatus}; @@ -769,8 +768,8 @@ fn keyboard_handle_application(key_input: KeyEvent) -> Result<(), &'static str> let new_app_namespace = mod_mgmt::create_application_namespace(None)?; let shell_objfile = new_app_namespace.dir().get_file_starting_with("shell-") .ok_or("Couldn't find shell application file to run upon Ctrl+Alt+T")?; - let path = Path::new(shell_objfile.lock().get_absolute_path()); - spawn::new_application_task_builder(path, Some(new_app_namespace))? + let path = shell_objfile.lock().get_absolute_path(); + spawn::new_application_task_builder(path.as_ref(), Some(new_app_namespace))? .name("shell".to_string()) .spawn()?; diff --git a/ports/theseus_std/src/env.rs b/ports/theseus_std/src/env.rs index db9349ded6..11e7e546f8 100644 --- a/ports/theseus_std/src/env.rs +++ b/ports/theseus_std/src/env.rs @@ -12,7 +12,7 @@ pub fn current_dir() -> io::Result { ) .and_then(|task| match theseus_path::Path::get_absolute( - &task.get_env().lock().cwd().into() + &task.get_env().lock().cwd().as_ref() ) { Some(FileOrDir::File(_)) => Err(io::Error::new( io::ErrorKind::Other, diff --git a/ports/theseus_std/src/fs_imp.rs b/ports/theseus_std/src/fs_imp.rs index b61c575bbd..0d83aac78d 100644 --- a/ports/theseus_std/src/fs_imp.rs +++ b/ports/theseus_std/src/fs_imp.rs @@ -317,8 +317,11 @@ impl File { let parent_dir_of_file = path.parent() .ok_or(io::Error::from(io::ErrorKind::NotFound))?; - let theseus_file_path = theseus_path::Path::new(path.to_string_lossy().into()); - let theseus_dir_path = theseus_path::Path::new(parent_dir_of_file.to_string_lossy().into()); + let cow_file_path = path.to_string_lossy(); + let theseus_file_path: &theseus_path::Path = cow_file_path.as_ref().as_ref(); + + let cow_dir_path = parent_dir_of_file.to_string_lossy(); + let theseus_dir_path: &theseus_path::Path = cow_dir_path.as_ref().as_ref(); // `create_new` requires that the file must not previously exist at all. if opts.create_new && theseus_file_path.get(&curr_dir).is_some() { @@ -350,7 +353,7 @@ impl File { // Handle accessing a file that must exist (in any mode) else if opts.read || opts.write || opts.append { let working_dir = crate::env::current_dir()?; - theseus_path::Path::new(path.to_string_lossy().into()).get(&working_dir) + theseus_path::Path::new(path.to_string_lossy().as_ref()).get(&working_dir) .ok_or(io::ErrorKind::NotFound.into()) .map(|theseus_file_or_dir| match theseus_file_or_dir { theseus_fs_node::FileOrDir::File(f) => theseus_file_ref_to_file(f, opts.clone()), From c025bd1056131e3dbe4f1fda3657f2cf93fc6bec Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 24 Oct 2023 13:37:15 +1100 Subject: [PATCH 18/25] Ignore non-code paths in QEMU test CI action (#1063) * The QEMU test workflow won't run if the changes only include: - 'book/**' - 'c_test/**' - 'docker/**' - 'github_pages/**' - 'old_crates/**' - 'scripts/**' - '.gitignore' - 'LICENSE-MIT' - 'README.md' - 'bochsrc.txt' - 'rustfmt.toml' - 'slirp.conf' We specify ignore paths conservatively in order to avoid other paths inadvertently affecting the CI action, which could lead to issues. Signed-off-by: Klimenty Tsoutsman --- .github/workflows/test.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b2e2f638f1..b292ddcacc 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,6 +2,19 @@ name: QEMU Test on: pull_request: types: [synchronize, opened, reopened] + paths-ignore: + - 'book/**' + - 'c_test/**' + - 'docker/**' + - 'github_pages/**' + - 'old_crates/**' + - 'scripts/**' + - '.gitignore' + - 'LICENSE-MIT' + - 'README.md' + - 'bochsrc.txt' + - 'rustfmt.toml' + - 'slirp.conf' jobs: run-tests: runs-on: ubuntu-latest From 7ee342ebfd6b427d7bfeeb7f75d6960cf312388d Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 30 Oct 2023 15:13:11 -0700 Subject: [PATCH 19/25] Upgrade to latest Rust nightly v1.75 (#1065) Co-authored-by: Klimenty Tsoutsman --- Cargo.lock | 38 +++++++++++++---------- applications/hull/src/builtin.rs | 2 +- applications/loadc/src/lib.rs | 2 +- applications/ping/src/lib.rs | 2 +- applications/serial_echo/src/lib.rs | 2 +- applications/shell/src/lib.rs | 2 +- kernel/ata/Cargo.toml | 2 +- kernel/ata/src/lib.rs | 3 ++ kernel/boot_info/Cargo.toml | 2 +- kernel/frame_allocator/src/lib.rs | 6 ++-- kernel/framebuffer_compositor/src/lib.rs | 7 ++--- kernel/gdt/Cargo.toml | 2 +- kernel/iommu/Cargo.toml | 2 +- kernel/iommu/src/regs.rs | 1 + kernel/logger/src/lib.rs | 4 +-- kernel/memory/Cargo.toml | 2 +- kernel/mod_mgmt/src/lib.rs | 26 ++++++++++++---- kernel/net/src/lib.rs | 2 +- kernel/panic_entry/src/lib.rs | 1 + kernel/path/src/lib.rs | 4 +-- kernel/pte_flags/Cargo.toml | 2 +- kernel/pte_flags/src/lib.rs | 5 +-- kernel/pte_flags/src/pte_flags_aarch64.rs | 23 +++++++------- kernel/pte_flags/src/pte_flags_x86_64.rs | 7 +++-- kernel/scheduler_priority/src/lib.rs | 12 +++---- kernel/spawn/src/lib.rs | 2 +- kernel/text_terminal/Cargo.toml | 2 +- kernel/text_terminal/src/ansi_style.rs | 2 +- kernel/thread_local_macro/src/lib.rs | 7 +++-- kernel/wasi_interpreter/src/lib.rs | 2 +- kernel/window_manager/src/lib.rs | 2 +- libs/keycodes_ascii/Cargo.toml | 2 +- libs/keycodes_ascii/src/lib.rs | 1 + rust-toolchain.toml | 2 +- tools/uefi_builder/aarch64/Cargo.lock | 4 +-- tools/uefi_builder/common/Cargo.lock | 11 ++----- tools/uefi_builder/x86_64/Cargo.lock | 4 +-- 37 files changed, 111 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6baff26288..35c2f6ea2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -177,7 +177,7 @@ dependencies = [ name = "ata" version = "0.1.0" dependencies = [ - "bitflags", + "bitflags 2.4.1", "interrupts", "io", "log", @@ -282,6 +282,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + [[package]] name = "block-buffer" version = "0.10.3" @@ -337,7 +343,7 @@ dependencies = [ name = "boot_info" version = "0.1.0" dependencies = [ - "bitflags", + "bitflags 2.4.1", "kernel_config", "memory_structs", "multiboot2", @@ -652,7 +658,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd420c52d86c5b08c494e7e3d16bce23f08f3f6544cccce2d6cc986d3144dca1" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -838,7 +844,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a0ae7494d9bff013d7b89471f4c424356a71e9752e0c78abe7e6c608a16bb3" dependencies = [ - "bitflags", + "bitflags 1.3.2", "defmt-macros", ] @@ -1142,7 +1148,7 @@ name = "fatfs" version = "0.4.0" source = "git+https://github.com/rafalh/rust-fatfs#87fc1ed5074a32b4e0344fcdde77359ef9e75432" dependencies = [ - "bitflags", + "bitflags 1.3.2", "log", ] @@ -1338,7 +1344,7 @@ version = "0.1.0" dependencies = [ "atomic_linked_list", "bit_field 0.7.0", - "bitflags", + "bitflags 2.4.1", "cpu", "log", "memory", @@ -1663,7 +1669,7 @@ dependencies = [ name = "iommu" version = "0.1.0" dependencies = [ - "bitflags", + "bitflags 2.4.1", "log", "memory", "spin 0.9.4", @@ -1749,7 +1755,7 @@ dependencies = [ name = "keycodes_ascii" version = "0.1.0" dependencies = [ - "bitflags", + "bitflags 2.4.1", "num_enum", ] @@ -2010,7 +2016,7 @@ version = "0.1.0" dependencies = [ "atomic_linked_list", "bit_field 0.7.0", - "bitflags", + "bitflags 2.4.1", "boot_info", "frame_allocator", "kernel_config", @@ -2252,7 +2258,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6170b6f12ea75d8d0f5621e3ed780b041a666c4a5b904c77261fe343d0e798d" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -2822,7 +2828,7 @@ dependencies = [ name = "pte_flags" version = "0.1.0" dependencies = [ - "bitflags", + "bitflags 2.4.1", "cfg-if 1.0.0", ] @@ -2924,7 +2930,7 @@ version = "10.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -2965,7 +2971,7 @@ checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" name = "region" version = "3.0.0" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core2", "libc", "mach", @@ -3413,7 +3419,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d2e3a36ac8fea7b94e666dfa3871063d6e0a5c9d5d4fec9a1a6b7b6760f0229" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "cfg-if 1.0.0", "defmt", @@ -3955,7 +3961,7 @@ dependencies = [ name = "text_terminal" version = "0.1.0" dependencies = [ - "bitflags", + "bitflags 2.4.1", "core2", "derive_more", "event_types", @@ -4666,7 +4672,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958cd5cb28e720db2f59ee9dc4235b5f82a183d079fb0e6caf43ad074cfdc66a" dependencies = [ "bit_field 0.10.1", - "bitflags", + "bitflags 1.3.2", "rustversion", "volatile 0.4.4", ] diff --git a/applications/hull/src/builtin.rs b/applications/hull/src/builtin.rs index 9b0ddf9f76..33d559cb4b 100644 --- a/applications/hull/src/builtin.rs +++ b/applications/hull/src/builtin.rs @@ -135,7 +135,7 @@ impl Shell { // TODO: Sort IDs. for (id, job) in self.jobs.lock().iter() { // TODO: Separate job parts if they are in different states. - let Some(state) = &job.parts.get(0).map(|part| &part.state) else { + let Some(state) = &job.parts.first().map(|part| &part.state) else { continue; }; let line = &job.string; diff --git a/applications/loadc/src/lib.rs b/applications/loadc/src/lib.rs index 4fdc62f406..e715705f7a 100644 --- a/applications/loadc/src/lib.rs +++ b/applications/loadc/src/lib.rs @@ -71,7 +71,7 @@ fn rmain(matches: Matches) -> Result { ) ).map_err(|_| String::from("failed to get current task"))?; - let path = matches.free.get(0).ok_or_else(|| "Missing path to ELF executable".to_string())?; + let path = matches.free.first().ok_or_else(|| "Missing path to ELF executable".to_string())?; let file_ref = Path::new(path).get_file(&curr_wd) .ok_or_else(|| format!("Failed to access file at {path:?}"))?; let file = file_ref.lock(); diff --git a/applications/ping/src/lib.rs b/applications/ping/src/lib.rs index cd00999fa7..dd5e53523b 100644 --- a/applications/ping/src/lib.rs +++ b/applications/ping/src/lib.rs @@ -67,7 +67,7 @@ pub fn main(args: Vec) -> isize { } fn _main(matches: Matches) -> Result<(), &'static str> { - let remote = IpAddress::from_str(matches.free.get(0).ok_or("no arguments_provided")?) + let remote = IpAddress::from_str(matches.free.first().ok_or("no arguments_provided")?) .map_err(|_| "invalid argument")?; let interface = net::get_default_interface().ok_or("no network interfaces available")?; diff --git a/applications/serial_echo/src/lib.rs b/applications/serial_echo/src/lib.rs index 2a9c52b48d..2cf4f21660 100644 --- a/applications/serial_echo/src/lib.rs +++ b/applications/serial_echo/src/lib.rs @@ -25,7 +25,7 @@ use serial_port::{SerialPort, SerialPortAddress, get_serial_port}; pub fn main(args: Vec) -> isize { - let serial_port_address = args.get(0) + let serial_port_address = args.first() .and_then(|s| SerialPortAddress::try_from(&**s).ok()) .unwrap_or(SerialPortAddress::COM1); diff --git a/applications/shell/src/lib.rs b/applications/shell/src/lib.rs index 6080e8a25c..ea8197dd84 100644 --- a/applications/shell/src/lib.rs +++ b/applications/shell/src/lib.rs @@ -792,7 +792,7 @@ impl Shell { /// Try to match the incomplete command against all internal commands. Returns a /// vector that contains all matching results. fn find_internal_cmd_match(&mut self, incomplete_cmd: &String) -> Result, &'static str> { - let internal_cmds = vec!["fg", "bg", "jobs", "clear"]; + let internal_cmds = ["fg", "bg", "jobs", "clear"]; let mut match_cmds = Vec::new(); for cmd in internal_cmds.iter() { if cmd.starts_with(incomplete_cmd) { diff --git a/kernel/ata/Cargo.toml b/kernel/ata/Cargo.toml index 2c5b978dc4..663f7805ad 100644 --- a/kernel/ata/Cargo.toml +++ b/kernel/ata/Cargo.toml @@ -6,7 +6,7 @@ version = "0.1.0" edition = "2018" [dependencies] -bitflags = "1.1.0" +bitflags = "2.4.1" log = "0.4.8" spin = "0.9.4" x86_64 = "0.14.8" diff --git a/kernel/ata/src/lib.rs b/kernel/ata/src/lib.rs index 0742ca585d..8c98a90807 100644 --- a/kernel/ata/src/lib.rs +++ b/kernel/ata/src/lib.rs @@ -41,6 +41,7 @@ const PCI_BAR_PORT_MASK: u16 = 0xFFFC; bitflags! { /// The possible error values found in an ATA drive's error port. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct AtaError: u8 { const BAD_BLOCK = 0x80; const UNCORRECTABLE_DATA = 0x40; @@ -55,6 +56,7 @@ bitflags! { bitflags! { /// The possible status values found in an ATA drive's status port. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct AtaStatus: u8 { /// When set, the drive's port values are still changing, so ports shouldn't be accessed. const BUSY = 0x80; @@ -73,6 +75,7 @@ bitflags! { bitflags! { /// The possible control values used in an ATA drive's status port. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] struct AtaControl: u8 { /// Set this to read back the High Order Byte of the last-written LBA48 value. const HOB = 0x80; diff --git a/kernel/boot_info/Cargo.toml b/kernel/boot_info/Cargo.toml index 835a0ce11a..ef6e2bf3f8 100644 --- a/kernel/boot_info/Cargo.toml +++ b/kernel/boot_info/Cargo.toml @@ -6,7 +6,7 @@ description = "Abstraction over multiboot2 and UEFI boot information" edition = "2021" [dependencies] -bitflags = "1.3" +bitflags = "2.4.1" kernel_config = { path = "../kernel_config" } memory_structs = { path = "../memory_structs" } multiboot2 = { version = "0.14", optional = true } diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 2bc3a5de4a..90d46b3091 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -1196,13 +1196,13 @@ pub fn allocate_frames_deferred( // because we are searching the general frames list, it doesn't matter if part of the chunk was found // since we only create new reserved frames. trace!("Only part of the requested allocation was found in the general frames list."); - return Err(alloc_err).map_err(From::from); + return Err(From::from(alloc_err)); } - Err(_other) => return Err(alloc_err).map_err(From::from), + Err(_other) => return Err(From::from(alloc_err)), } }, AllocationError::ContiguousChunkNotFound(f, numf) => (f, numf), - _ => return Err(alloc_err).map_err(From::from), + _ => return Err(From::from(alloc_err)), } }; diff --git a/kernel/framebuffer_compositor/src/lib.rs b/kernel/framebuffer_compositor/src/lib.rs index 391efeef39..5b7780a59f 100644 --- a/kernel/framebuffer_compositor/src/lib.rs +++ b/kernel/framebuffer_compositor/src/lib.rs @@ -29,7 +29,7 @@ extern crate shapes; use alloc::collections::BTreeMap; use alloc::vec::{Vec}; -use core::hash::{Hash, Hasher, BuildHasher}; +use core::hash::{Hash, BuildHasher}; use hashbrown::hash_map::{DefaultHashBuilder}; use compositor::{Compositor, FramebufferUpdates, CompositableRegion}; use framebuffer::{Framebuffer, Pixel}; @@ -268,8 +268,5 @@ impl Compositor for FrameCompositor { /// Gets the hash of an item fn hash(item: T) -> u64 { - let builder = DefaultHashBuilder::default(); - let mut hasher = builder.build_hasher(); - item.hash(&mut hasher); - hasher.finish() + DefaultHashBuilder::default().hash_one(&item) } diff --git a/kernel/gdt/Cargo.toml b/kernel/gdt/Cargo.toml index d20598ebe7..216652e635 100644 --- a/kernel/gdt/Cargo.toml +++ b/kernel/gdt/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" spin = "0.9.4" x86_64 = "0.14.8" bit_field = "0.7.0" -bitflags = "1.1.0" +bitflags = "2.4.1" log = "0.4.8" [dependencies.atomic_linked_list] diff --git a/kernel/iommu/Cargo.toml b/kernel/iommu/Cargo.toml index c2672f185f..17d5a6c902 100644 --- a/kernel/iommu/Cargo.toml +++ b/kernel/iommu/Cargo.toml @@ -9,7 +9,7 @@ log = "0.4.8" spin = "0.9.4" volatile = "0.2.7" zerocopy = "0.5.0" -bitflags = "1.3.2" +bitflags = "2.4.1" [dependencies.sync_irq] path = "../../libs/sync_irq" diff --git a/kernel/iommu/src/regs.rs b/kernel/iommu/src/regs.rs index 52e169cbdb..277e21a38e 100644 --- a/kernel/iommu/src/regs.rs +++ b/kernel/iommu/src/regs.rs @@ -183,6 +183,7 @@ bitflags! { /// /// The least significant bits `[22:0]` are `RsvdZ`, /// meaning that they are reserved for future usage and must be set to 0. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct GlobalStatus: u32 { /// Compatibility Format Interrupt Status const CFIS = 1 << 23; diff --git a/kernel/logger/src/lib.rs b/kernel/logger/src/lib.rs index 44499e6e89..75b7d9993f 100644 --- a/kernel/logger/src/lib.rs +++ b/kernel/logger/src/lib.rs @@ -20,7 +20,7 @@ extern crate sync_irq; extern crate serial_port_basic; use log::{Record, Level, Metadata, Log}; -use core::{borrow::Borrow, fmt::{self, Write}, ops::Deref}; +use core::{fmt::{self, Write}, ops::Deref}; use sync_irq::IrqSafeMutex; use serial_port_basic::SerialPort; use alloc::{sync::Arc, vec::Vec}; @@ -210,7 +210,7 @@ impl DummyLogger { fn write_fmt(&self, arguments: fmt::Arguments) -> fmt::Result { if let Some(logger) = &*LOGGER.lock() { for writer in logger.writers.iter() { - let _ = writer.deref().borrow().lock().write_fmt(arguments); + let _ = writer.deref().lock().write_fmt(arguments); } } else { let _ = EARLY_LOGGER.lock().write_fmt(arguments); diff --git a/kernel/memory/Cargo.toml b/kernel/memory/Cargo.toml index e88940a918..899e8abaa0 100644 --- a/kernel/memory/Cargo.toml +++ b/kernel/memory/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] spin = "0.9.4" -bitflags = "1.1.0" +bitflags = "2.4.1" xmas-elf = { version = "0.6.2", git = "https://github.com/theseus-os/xmas-elf.git" } bit_field = "0.7.0" zerocopy = "0.5.0" diff --git a/kernel/mod_mgmt/src/lib.rs b/kernel/mod_mgmt/src/lib.rs index a06cbff74e..382b348f0c 100644 --- a/kernel/mod_mgmt/src/lib.rs +++ b/kernel/mod_mgmt/src/lib.rs @@ -141,7 +141,7 @@ fn parse_bootloader_modules_into_files( // Closure to create the directory for a new namespace. let create_dir = |dir_name: &str| -> Result { - VFSDirectory::create(dir_name.to_string(), &namespaces_dir).map(|d| NamespaceDir(d)) + VFSDirectory::create(dir_name.to_string(), &namespaces_dir).map(NamespaceDir) }; let mut process_module = |name: &str, size, pages| -> Result<_, &'static str> { @@ -1085,13 +1085,27 @@ impl CrateNamespace { return Err("not a relocatable elf file"); } - // If a `.theseus_merged` section exists, then the object file's sections have been merged by a partial relinking step. + // If a `.theseus_merged` section exists (it should come before any .text section), + // then the object file's sections have been merged by a partial relinking step. // If so, then we can use a much faster version of loading/linking. const THESEUS_MERGED_SEC_NAME: &str = ".theseus_merged"; - const THESEUS_MERGED_SEC_SHNDX: u16 = 1; - let sections_are_merged = elf_file.section_header(THESEUS_MERGED_SEC_SHNDX) - .map(|sec| sec.get_name(&elf_file) == Ok(THESEUS_MERGED_SEC_NAME)) - .unwrap_or(false); + let sections_are_merged = { + let mut found = false; + for sec_name in elf_file + .section_iter() + .filter_map(|sec| sec.get_name(&elf_file).ok()) + { + if sec_name == THESEUS_MERGED_SEC_NAME { + found = true; + break; + } + else if sec_name.starts_with(TEXT_SECTION_NAME) { + found = false; + break; + } + } + found + }; // Allocate enough space to load the sections let section_pages = allocate_section_pages(&elf_file, kernel_mmi_ref)?; diff --git a/kernel/net/src/lib.rs b/kernel/net/src/lib.rs index ea8b632b6e..1760150fef 100644 --- a/kernel/net/src/lib.rs +++ b/kernel/net/src/lib.rs @@ -63,7 +63,7 @@ pub fn get_interfaces() -> &'static Mutex>> { /// Returns the first available interface. pub fn get_default_interface() -> Option> { - NETWORK_INTERFACES.lock().get(0).cloned() + NETWORK_INTERFACES.lock().first().cloned() } /// Returns a port in the range reserved for private, dynamic, and ephemeral diff --git a/kernel/panic_entry/src/lib.rs b/kernel/panic_entry/src/lib.rs index 95dee82617..68a6416703 100644 --- a/kernel/panic_entry/src/lib.rs +++ b/kernel/panic_entry/src/lib.rs @@ -6,6 +6,7 @@ #![no_std] #![feature(alloc_error_handler)] +#![allow(internal_features)] #![feature(lang_items)] #![feature(panic_info_message)] diff --git a/kernel/path/src/lib.rs b/kernel/path/src/lib.rs index 3791ee497a..6bf49fa80a 100644 --- a/kernel/path/src/lib.rs +++ b/kernel/path/src/lib.rs @@ -397,8 +397,8 @@ impl Path { } (None, _) => comps.push(Component::ParentDir), (Some(a), Some(b)) if comps.is_empty() && a == b => (), - (Some(a), Some(b)) if b == Component::CurDir => comps.push(a), - (Some(_), Some(b)) if b == Component::ParentDir => return None, + (Some(a), Some(Component::CurDir)) => comps.push(a), + (Some(_), Some(Component::ParentDir)) => return None, (Some(a), Some(_)) => { comps.push(Component::ParentDir); for _ in itb { diff --git a/kernel/pte_flags/Cargo.toml b/kernel/pte_flags/Cargo.toml index 27605766da..e9a7cdd086 100644 --- a/kernel/pte_flags/Cargo.toml +++ b/kernel/pte_flags/Cargo.toml @@ -7,4 +7,4 @@ edition = "2021" [dependencies] cfg-if = "1.0.0" -bitflags = "1.3.2" +bitflags = "2.4.1" diff --git a/kernel/pte_flags/src/lib.rs b/kernel/pte_flags/src/lib.rs index bd499b84f6..65a87fac12 100644 --- a/kernel/pte_flags/src/lib.rs +++ b/kernel/pte_flags/src/lib.rs @@ -64,6 +64,7 @@ bitflags! { /// /// This type can also be converted *from* `PteFlagsX86_64` and `PteFlagsAarch64`, /// but it may be lossy as only the bit flags defined herein are preserved. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct PteFlags: u64 { /// * If set, this page is currently "present" in memory. /// * If not set, this page is not in memory, which could mean one of several things: @@ -184,8 +185,8 @@ impl PteFlags { /// that we don't care about. pub const fn new() -> Self { Self::from_bits_truncate( - Self::ACCESSED.bits - | Self::NOT_EXECUTABLE.bits + Self::ACCESSED.bits() + | Self::NOT_EXECUTABLE.bits() ) } diff --git a/kernel/pte_flags/src/pte_flags_aarch64.rs b/kernel/pte_flags/src/pte_flags_aarch64.rs index 00ff1d6a6a..3204731b61 100644 --- a/kernel/pte_flags/src/pte_flags_aarch64.rs +++ b/kernel/pte_flags/src/pte_flags_aarch64.rs @@ -33,6 +33,7 @@ bitflags! { /// /// [MAIR]: https://docs.rs/cortex-a/latest/cortex_a/registers/MAIR_EL1/index.html #[doc(cfg(target_arch = "aarch64"))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct PteFlagsAarch64: u64 { /// * If set, this page is currently "present" in memory. /// * If not set, this page is not in memory, which could mean one of several things: @@ -52,7 +53,7 @@ bitflags! { /// This page maps "normal" memory, i.e., non-device memory. /// /// Theseus uses `MAIR_INDEX_0` for this type of memory. - const NORMAL_MEMORY = Self::_MAIR_INDEX_0.bits; + const NORMAL_MEMORY = Self::_MAIR_INDEX_0.bits(); /// Indicates the page's cacheability is described by MAIR Index 1. /// /// Theseus uses this index for "device" memory. @@ -60,7 +61,7 @@ bitflags! { /// This page maps device memory, i.e., memory-mapped I/O registers. /// /// Theseus uses `MAIR_INDEX_1` for this type of memory. - const DEVICE_MEMORY = Self::_MAIR_INDEX_1.bits; + const DEVICE_MEMORY = Self::_MAIR_INDEX_1.bits(); /// Indicates the page's cacheability is described by MAIR Index 2. /// /// This is unused in Theseus. @@ -179,7 +180,7 @@ bitflags! { const _USER_EXEC_NEVER = 1 << 54; /// * If set, this page is not executable. /// * If not set, this page is executable. - const NOT_EXECUTABLE = Self::_PRIV_EXEC_NEVER.bits | Self::_USER_EXEC_NEVER.bits; + const NOT_EXECUTABLE = Self::_PRIV_EXEC_NEVER.bits() | Self::_USER_EXEC_NEVER.bits(); /// See [PteFlags::EXCLUSIVE]. /// We use bit 55 because it is available for custom OS usage on both x86_64 and aarch64. @@ -217,13 +218,13 @@ impl PteFlagsAarch64 { /// that we don't care about. pub const fn new() -> Self { Self::from_bits_truncate( - Self::NORMAL_MEMORY.bits - | Self::OUTER_SHAREABLE.bits - | Self::READ_ONLY.bits - | Self::PAGE_DESCRIPTOR.bits - | Self::ACCESSED.bits - | Self::_NOT_GLOBAL.bits - | Self::NOT_EXECUTABLE.bits + Self::NORMAL_MEMORY.bits() + | Self::OUTER_SHAREABLE.bits() + | Self::READ_ONLY.bits() + | Self::PAGE_DESCRIPTOR.bits() + | Self::ACCESSED.bits() + | Self::_NOT_GLOBAL.bits() + | Self::NOT_EXECUTABLE.bits() ) } @@ -361,7 +362,7 @@ impl PteFlagsAarch64 { /// * The three bits `[2:4]` for MAIR index values. /// * The two bits `[8:9]` for shareability. pub const MASKED_BITS_FOR_CONVERSION: PteFlagsAarch64 = PteFlagsAarch64::from_bits_truncate( - SHAREABLE_BITS_MASK.bits | MAIR_BITS_MASK.bits + SHAREABLE_BITS_MASK.bits() | MAIR_BITS_MASK.bits() ); /// Returns a copy of this `PteFlagsAarch64` with its flags adjusted diff --git a/kernel/pte_flags/src/pte_flags_x86_64.rs b/kernel/pte_flags/src/pte_flags_x86_64.rs index d8c0ffc568..c47a6880d0 100644 --- a/kernel/pte_flags/src/pte_flags_x86_64.rs +++ b/kernel/pte_flags/src/pte_flags_x86_64.rs @@ -22,6 +22,7 @@ bitflags! { /// * Bits `[52:62]` (inclusive) are available for custom OS usage. /// * Bit `63` is reserved by hardware for access flags (noexec). #[doc(cfg(target_arch = "x86_64"))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct PteFlagsX86_64: u64 { /// * If set, this page is currently "present" in memory. /// * If not set, this page is not in memory, which could mean one of several things: @@ -49,7 +50,7 @@ bitflags! { /// that index is used to determine the PAT entry that holds the /// memory caching type that is applied to this page. const WRITE_THROUGH = 1 << 3; - const PAT_BIT0 = Self::WRITE_THROUGH.bits; + const PAT_BIT0 = Self::WRITE_THROUGH.bits(); /// * If set, this page's content is never cached, neither for read nor writes. /// * If not set, this page's content is cached as normal, both for read nor writes. @@ -60,8 +61,8 @@ bitflags! { /// memory caching type that is applied to this page. const CACHE_DISABLE = 1 << 4; /// An alias for [`Self::CACHE_DISABLE`] in order to ease compatibility with aarch64. - const DEVICE_MEMORY = Self::CACHE_DISABLE.bits; - const PAT_BIT1 = Self::CACHE_DISABLE.bits; + const DEVICE_MEMORY = Self::CACHE_DISABLE.bits(); + const PAT_BIT1 = Self::CACHE_DISABLE.bits(); /// * The hardware will set this bit when the page is accessed. /// * The OS can then clear this bit once it has acknowledged that the page was accessed, diff --git a/kernel/scheduler_priority/src/lib.rs b/kernel/scheduler_priority/src/lib.rs index 1c11e6b6b8..85107ac951 100644 --- a/kernel/scheduler_priority/src/lib.rs +++ b/kernel/scheduler_priority/src/lib.rs @@ -144,16 +144,16 @@ impl PartialEq for PriorityTaskRef { impl PartialOrd for PriorityTaskRef { fn partial_cmp(&self, other: &Self) -> Option { - match self.priority.cmp(&other.priority) { - // Tasks that were ran longer ago should be prioritised. - Ordering::Equal => Some(self.last_ran.cmp(&other.last_ran).reverse()), - ordering => Some(ordering), - } + Some(self.cmp(other)) } } impl Ord for PriorityTaskRef { fn cmp(&self, other: &Self) -> core::cmp::Ordering { - self.priority.cmp(&other.priority) + match self.priority.cmp(&other.priority) { + // Tasks that were ran longer ago should be prioritised. + Ordering::Equal => self.last_ran.cmp(&other.last_ran).reverse(), + ordering => ordering, + } } } diff --git a/kernel/spawn/src/lib.rs b/kernel/spawn/src/lib.rs index ba5ee60d77..c89966572b 100755 --- a/kernel/spawn/src/lib.rs +++ b/kernel/spawn/src/lib.rs @@ -889,7 +889,7 @@ fn task_cleanup_final_internal(current_task: &ExitableTaskRef) { // that were lazily initialized during this execution of this task. for tls_dtor in thread_local_macro::take_current_tls_destructors().into_iter() { unsafe { - (tls_dtor.dtor)(tls_dtor.object_ptr as *mut u8); + (tls_dtor.dtor)(tls_dtor.object_ptr); } } diff --git a/kernel/text_terminal/Cargo.toml b/kernel/text_terminal/Cargo.toml index a79b5af16d..95ab31da2f 100644 --- a/kernel/text_terminal/Cargo.toml +++ b/kernel/text_terminal/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Kevin Boos "] description = "A text-based terminal emulator that supports ANSI, VT100, xterm, and other standards." [dependencies] -bitflags = "1.1.0" +bitflags = "2.4.1" core2 = { version = "0.4.0", default-features = false, features = ["alloc", "nightly"] } unicode-width = "0.1.8" vte = "0.10.1" diff --git a/kernel/text_terminal/src/ansi_style.rs b/kernel/text_terminal/src/ansi_style.rs index e2913f4755..1178c0fe38 100644 --- a/kernel/text_terminal/src/ansi_style.rs +++ b/kernel/text_terminal/src/ansi_style.rs @@ -419,7 +419,7 @@ bitflags! { /// /// This set of flags is completely self-contained within each `Unit` /// and does not need to reference any previous `Unit`'s flag as an anchor. - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct FormatFlags: u8 { /// If set, this character is displayed in a bright color, which is sometimes called "bold". const BRIGHT = 1 << 0; diff --git a/kernel/thread_local_macro/src/lib.rs b/kernel/thread_local_macro/src/lib.rs index f3ccf2b2ac..e59ae985c3 100644 --- a/kernel/thread_local_macro/src/lib.rs +++ b/kernel/thread_local_macro/src/lib.rs @@ -29,6 +29,7 @@ #![no_std] #![feature(thread_local)] +#![allow(internal_features)] #![feature(allow_internal_unstable)] // The code from Rust std uses unsafe blocks within unsafe functions, @@ -95,9 +96,9 @@ fn register_dtor(object_ptr: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { } -////////////////////////////////////////////////////////////////////////////////////// -//// Everything below here is a modified version of thread_local!() from Rust std //// -////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////// +// Everything below here is a modified version of thread_local!() from Rust std // +////////////////////////////////////////////////////////////////////////////////// use core::cell::{Cell, UnsafeCell}; use core::fmt; diff --git a/kernel/wasi_interpreter/src/lib.rs b/kernel/wasi_interpreter/src/lib.rs index 3441239ecc..88ab2b95a5 100644 --- a/kernel/wasi_interpreter/src/lib.rs +++ b/kernel/wasi_interpreter/src/lib.rs @@ -80,7 +80,7 @@ impl Externals for HostExternals { /// pub fn execute_binary(wasm_binary: Vec, args: Vec, preopen_dirs: Vec) -> isize { // Load wasm binary and prepare it for instantiation. - let module = Module::from_buffer(&wasm_binary).unwrap(); + let module = Module::from_buffer(wasm_binary).unwrap(); // Construct wasmi WebAssembly state machine. let state_machine = wasmi_state_machine::ProcessStateMachine::new( diff --git a/kernel/window_manager/src/lib.rs b/kernel/window_manager/src/lib.rs index 005627557b..470f3ec29d 100644 --- a/kernel/window_manager/src/lib.rs +++ b/kernel/window_manager/src/lib.rs @@ -492,7 +492,7 @@ impl WindowManager { bottom_right: self.mouse + (MOUSE_POINTER_SIZE_X as isize, MOUSE_POINTER_SIZE_Y as isize) }); - self.refresh_top(bounding_box.into_iter()) + self.refresh_top(bounding_box) } /// Move mouse. `relative` indicates the new position relative to current position. diff --git a/libs/keycodes_ascii/Cargo.toml b/libs/keycodes_ascii/Cargo.toml index 5b12474c42..4414cc2ba2 100644 --- a/libs/keycodes_ascii/Cargo.toml +++ b/libs/keycodes_ascii/Cargo.toml @@ -8,7 +8,7 @@ license = "MIT" edition = "2021" [dependencies] -bitflags = "1.1.0" +bitflags = "2.4.1" [dependencies.num_enum] version = "0.5.7" diff --git a/libs/keycodes_ascii/src/lib.rs b/libs/keycodes_ascii/src/lib.rs index 2c43634191..7d3c87c2ba 100644 --- a/libs/keycodes_ascii/src/lib.rs +++ b/libs/keycodes_ascii/src/lib.rs @@ -12,6 +12,7 @@ bitflags! { /// To save space, this is expressed using bitflags /// rather than a series of individual booleans, /// because Rust's `bool` type is a whole byte. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct KeyboardModifiers: u16 { const CONTROL_LEFT = 1 << 0; const CONTROL_RIGHT = 1 << 1; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 5fbcf5cc1e..088ea75c22 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "nightly-2023-06-22" +channel = "nightly-2023-10-27" components = [ "rust-src", "clippy" ] ## Rustup always installs the host target by default, so we don't need to specify it here. ## All we need to specify is the uefi targets used to build our `uefi-bootloader`. diff --git a/tools/uefi_builder/aarch64/Cargo.lock b/tools/uefi_builder/aarch64/Cargo.lock index 1bfabe0256..3cc98f3b7e 100644 --- a/tools/uefi_builder/aarch64/Cargo.lock +++ b/tools/uefi_builder/aarch64/Cargo.lock @@ -481,9 +481,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] diff --git a/tools/uefi_builder/common/Cargo.lock b/tools/uefi_builder/common/Cargo.lock index 4bff603e8b..5560ddfd80 100644 --- a/tools/uefi_builder/common/Cargo.lock +++ b/tools/uefi_builder/common/Cargo.lock @@ -385,12 +385,6 @@ version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" -[[package]] -name = "ovmf-prebuilt" -version = "0.1.0-alpha.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa50141d081512ab30fd9e7e7692476866df5098b028536ad6680212e717fa8d" - [[package]] name = "proc-macro-error" version = "1.0.4" @@ -417,9 +411,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -530,7 +524,6 @@ dependencies = [ "clap", "fatfs", "gpt", - "ovmf-prebuilt", "tempfile", ] diff --git a/tools/uefi_builder/x86_64/Cargo.lock b/tools/uefi_builder/x86_64/Cargo.lock index 10dea21aa4..469d3af8e2 100644 --- a/tools/uefi_builder/x86_64/Cargo.lock +++ b/tools/uefi_builder/x86_64/Cargo.lock @@ -481,9 +481,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] From bafc2e8de64417bd6155d81a41e66c9a3fcdda30 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 31 Oct 2023 10:01:28 +1100 Subject: [PATCH 20/25] `heap`: fix bug dealing with large allocations (#1067) The implementation of `GlobalAlloc` for `Heap` assumed that all memory under `INITIAL_HEAP_END_ADDR` was allocated using the initial allocator, but this isn't true. `MultipleHeaps` allocates large objects using mapped pages leading to objects allocated in the lower half of memory. When deallocating these objects, `Heap` tried to deallocate them using the initial allocator rather than `MultipleHeaps`. Signed-off-by: Klimenty Tsoutsman --- kernel/heap/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/heap/src/lib.rs b/kernel/heap/src/lib.rs index d5e9700d1c..9d93a8336a 100644 --- a/kernel/heap/src/lib.rs +++ b/kernel/heap/src/lib.rs @@ -89,7 +89,7 @@ unsafe impl GlobalAlloc for Heap { } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if (ptr as usize) < INITIAL_HEAP_END_ADDR { + if KERNEL_HEAP_START <= (ptr as usize) && (ptr as usize) < INITIAL_HEAP_END_ADDR { self.initial_allocator.lock().deallocate(ptr, layout); } else { From 8883597de78ebd59359971724497852b8f79dba3 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Tue, 7 Nov 2023 17:13:46 -0800 Subject: [PATCH 21/25] Parameterize `Frames` (`AllocatedFrames`) with `` (#1070) * This is another step towards huge pages support. * The next step is to parameterize `AllocatedPages` too, and then support varying `PageSize`s within `MappedPages`. * Then, we will add simple routines to convert a 4K-sized `AllocatedPages`/`Frames` into 2M or 1G sized chunks, which keeps the allocation routines simpler because they only have to deal with 4k-sized chunks of pages or frames. * This design enables only the `mapper` module and the related implementation of `MappedPages` to contain most of the complexity around handling multiple page sizes. * This is a better design choice than #1064, which attempts to contain that complexity within `AllocatedPages`/`Frames`, because that design still requires complexity within `mapper`. --- kernel/frame_allocator/src/lib.rs | 284 ++++++++++++++++------------- kernel/memory/src/lib.rs | 1 + kernel/memory/src/paging/mapper.rs | 200 +++++++++++++++++--- kernel/memory/src/paging/mod.rs | 3 +- kernel/memory_structs/src/lib.rs | 139 ++++++++++---- kernel/page_allocator/src/lib.rs | 17 +- kernel/page_table_entry/src/lib.rs | 4 +- 7 files changed, 442 insertions(+), 206 deletions(-) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 90d46b3091..a9c09425d7 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -31,18 +31,16 @@ mod test; mod static_array_rb_tree; // mod static_array_linked_list; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, ops::{Deref, DerefMut}, fmt}; +use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, mem, ops::{Deref, DerefMut}}; use intrusive_collections::Bound; use kernel_config::memory::*; use log::{error, warn, debug, trace}; -use memory_structs::{PhysicalAddress, Frame, FrameRange, MemoryState}; +use memory_structs::{PhysicalAddress, Frame, FrameRange, MemoryState, PageSize, Page4K, Page2M, Page1G}; use spin::Mutex; use static_array_rb_tree::*; use static_assertions::assert_not_impl_any; -const FRAME_SIZE: usize = PAGE_SIZE; -const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); -const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); +const FRAME_4K_SIZE_IN_BYTES: usize = PAGE_SIZE; // Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. @@ -261,7 +259,7 @@ fn check_and_add_free_region( #[derive(Clone, Debug, Eq)] pub struct PhysicalMemoryRegion { /// The Frames covered by this region, an inclusive range. - pub frames: FrameRange, + pub frames: FrameRange, /// The type of this memory region, e.g., whether it's in a free or reserved region. pub typ: MemoryRegionType, } @@ -361,62 +359,69 @@ pub enum MemoryRegionType { /// using a `Frame` value. /// It differs from the behavior of the `Deref` trait which returns a `FrameRange`. #[derive(Eq)] -pub struct Frames { +pub struct Frames { /// The type of this memory chunk, e.g., whether it's in a free or reserved region. typ: MemoryRegionType, - /// The Frames covered by this chunk, an inclusive range. - frames: FrameRange + /// The specific (inclusive) range of frames covered by this memory chunk. + frame_range: FrameRange

, } -/// A type alias for `Frames` in the `Free` state. -pub type FreeFrames = Frames<{MemoryState::Free}>; +/// A type alias for `Frames` in the `Free` state, which only suppports 4K pages. +pub type FreeFrames = Frames<{MemoryState::Free}, Page4K>; + /// A type alias for `Frames` in the `Allocated` state. -pub type AllocatedFrames = Frames<{MemoryState::Allocated}>; +#[allow(type_alias_bounds)] +pub type AllocatedFrames = Frames<{MemoryState::Allocated}, P>; /// A type alias for `Frames` in the `Mapped` state. -pub type MappedFrames = Frames<{MemoryState::Mapped}>; +#[allow(type_alias_bounds)] +pub type MappedFrames = Frames<{MemoryState::Mapped}, P>; /// A type alias for `Frames` in the `Unmapped` state. -pub type UnmappedFrames = Frames<{MemoryState::Unmapped}>; +#[allow(type_alias_bounds)] +pub type UnmappedFrames = Frames<{MemoryState::Unmapped}, P>; // Frames must not be Cloneable, and it must not expose its inner frames as mutable. -assert_not_impl_any!(Frames<{MemoryState::Free}>: DerefMut, Clone); -assert_not_impl_any!(Frames<{MemoryState::Allocated}>: DerefMut, Clone); -assert_not_impl_any!(Frames<{MemoryState::Mapped}>: DerefMut, Clone); -assert_not_impl_any!(Frames<{MemoryState::Unmapped}>: DerefMut, Clone); +assert_not_impl_any!(FreeFrames: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Allocated}, Page4K>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Allocated}, Page2M>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Allocated}, Page1G>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Mapped}, Page4K>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Mapped}, Page2M>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Mapped}, Page1G>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Unmapped}, Page4K>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Unmapped}, Page2M>: DerefMut, Clone); +assert_not_impl_any!(Frames<{MemoryState::Unmapped}, Page1G>: DerefMut, Clone); impl FreeFrames { /// Creates a new `Frames` object in the `Free` state. /// /// The frame allocator logic is responsible for ensuring that no two `Frames` objects overlap. - pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Self { - Frames { - typ, - frames, - } + pub(crate) fn new(typ: MemoryRegionType, frame_range: FrameRange) -> Self { + Frames { typ, frame_range } } /// Consumes this `Frames` in the `Free` state and converts them into the `Allocated` state. - pub fn into_allocated_frames(mut self) -> AllocatedFrames { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + pub fn into_allocated_frames(mut self) -> AllocatedFrames { + let frame_range = mem::take(&mut self.frame_range); let af = Frames { typ: self.typ, - frames, + frame_range, }; - core::mem::forget(self); + mem::forget(self); // TODO: is this necessary? we already replaced self with an empty range. af } } -impl AllocatedFrames { +impl AllocatedFrames

{ /// Consumes this `Frames` in the `Allocated` state and converts them into the `Mapped` state. /// This should only be called once a `MappedPages` has been created from the `Frames`. - pub fn into_mapped_frames(mut self) -> MappedFrames { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + pub fn into_mapped_frames(mut self) -> MappedFrames

{ + let frame_range = mem::take(&mut self.frame_range); let mf = Frames { typ: self.typ, - frames, + frame_range, }; - core::mem::forget(self); + mem::forget(self); // TODO: is this necessary? we already replaced self with an empty range. mf } @@ -424,7 +429,7 @@ impl AllocatedFrames { /// /// ## Panic /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. - pub fn as_allocated_frame(&self) -> AllocatedFrame { + pub fn as_allocated_frame(&self) -> AllocatedFrame

{ assert!(self.size_in_frames() == 1); AllocatedFrame { frame: *self.start(), @@ -436,12 +441,12 @@ impl AllocatedFrames { impl UnmappedFrames { /// Consumes this `Frames` in the `Unmapped` state and converts them into the `Allocated` state. pub fn into_allocated_frames(mut self) -> AllocatedFrames { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); + let frame_range = mem::take(&mut self.frame_range); let af = Frames { typ: self.typ, - frames + frame_range, }; - core::mem::forget(self); + mem::forget(self); // TODO: is this necessary? we already replaced self with an empty range. af } } @@ -456,24 +461,28 @@ impl UnmappedFrames { /// This exists to break the cyclic dependency chain between this crate and /// the `page_table_entry` crate, since `page_table_entry` must depend on types /// from this crate in order to enforce safety when modifying page table entries. -pub(crate) fn into_unmapped_frames(frames: FrameRange) -> UnmappedFrames { - let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { +pub(crate) fn into_unmapped_frames(frame_range: FrameRange) -> UnmappedFrames { + let typ = if contains_any(&RESERVED_REGIONS.lock(), &frame_range) { MemoryRegionType::Reserved } else { MemoryRegionType::Free }; - Frames{ typ, frames } + Frames { typ, frame_range } } -impl Drop for Frames { +impl Drop for Frames { fn drop(&mut self) { match S { + // Dropping free frames returns them to the allocator's free list. MemoryState::Free => { if self.size_in_frames() == 0 { return; } - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); - let free_frames: FreeFrames = Frames { typ: self.typ, frames }; + let frame_range = mem::take(&mut self.frame_range); + let free_frames: FreeFrames = Frames { + typ: self.typ, + frame_range: frame_range.into_4k_frames(), + }; let mut list = if free_frames.typ == MemoryRegionType::Reserved { FREE_RESERVED_FRAMES_LIST.lock() @@ -542,29 +551,40 @@ impl Drop for Frames { return; } } - log::error!("BUG: couldn't insert deallocated {:?} into free frames list", self.frames); + log::error!("BUG: couldn't insert deallocated {:?} into free frames list", self.frame_range); } + // Dropping allocated frames converts them into a 4K-sized `FreeFrames`, + // which itself is then dropped. MemoryState::Allocated => { - // trace!("Converting AllocatedFrames to FreeFrames. Drop handler will be called again {:?}", self.frames); - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); - let _to_drop = FreeFrames { typ: self.typ, frames }; + // trace!("Converting AllocatedFrames to FreeFrames. Drop handler will be called again {:?}", self.frame_range); + let frame_range = mem::take(&mut self.frame_range); + let _to_drop = Frames::<{MemoryState::Free}, P> { + typ: self.typ, + frame_range, + }; } + // Dropping mapped frames currently should not ever happen. MemoryState::Mapped => panic!("We should never drop a mapped frame! It should be forgotten instead."), + // Dropping unmapped frames converts them to `AllocatedFrames`, + // which are then also dropped. MemoryState::Unmapped => { - let frames = core::mem::replace(&mut self.frames, FrameRange::empty()); - let _to_drop = AllocatedFrames { typ: self.typ, frames }; + let frame_range = mem::take(&mut self.frame_range); + let _to_drop = Frames::<{MemoryState::Allocated}, P> { + typ: self.typ, + frame_range, + }; } } } } -impl<'f> IntoIterator for &'f AllocatedFrames { - type IntoIter = AllocatedFramesIter<'f>; - type Item = AllocatedFrame<'f>; +impl<'f, P: PageSize> IntoIterator for &'f AllocatedFrames

{ + type IntoIter = AllocatedFramesIter<'f, P>; + type Item = AllocatedFrame<'f, P>; fn into_iter(self) -> Self::IntoIter { AllocatedFramesIter { _owner: self, - range: self.frames.iter(), + range: self.frame_range.iter(), } } } @@ -579,12 +599,12 @@ impl<'f> IntoIterator for &'f AllocatedFrames { /// [`RangeInclusive`] instances rather than borrowing a reference to it. /// /// [`RangeInclusive`]: range_inclusive::RangeInclusive -pub struct AllocatedFramesIter<'f> { - _owner: &'f AllocatedFrames, - range: range_inclusive::RangeInclusiveIterator, +pub struct AllocatedFramesIter<'f, P: PageSize> { + _owner: &'f AllocatedFrames

, + range: range_inclusive::RangeInclusiveIterator>, } -impl<'f> Iterator for AllocatedFramesIter<'f> { - type Item = AllocatedFrame<'f>; +impl<'f, P: PageSize> Iterator for AllocatedFramesIter<'f, P> { + type Item = AllocatedFrame<'f, P>; fn next(&mut self) -> Option { self.range.next().map(|frame| AllocatedFrame { @@ -598,36 +618,38 @@ impl<'f> Iterator for AllocatedFramesIter<'f> { /// /// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. #[derive(Debug)] -pub struct AllocatedFrame<'f> { - frame: Frame, +pub struct AllocatedFrame<'f, P: PageSize> { + frame: Frame

, _phantom: core::marker::PhantomData<&'f Frame>, } -impl<'f> Deref for AllocatedFrame<'f> { - type Target = Frame; +impl<'f, P: PageSize> Deref for AllocatedFrame<'f, P> { + type Target = Frame

; fn deref(&self) -> &Self::Target { &self.frame } } -assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); +assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); +assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); +assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); /// The result of splitting a `Frames` object into multiple smaller `Frames` objects. -pub struct SplitFrames { - before_start: Option>, - start_to_end: Frames, - after_end: Option>, +pub struct SplitFrames { + before_start: Option>, + start_to_end: Frames, + after_end: Option>, } -impl Frames { +impl Frames { pub(crate) fn typ(&self) -> MemoryRegionType { self.typ } /// Returns a new `Frames` with an empty range of frames. /// Can be used as a placeholder, but will not permit any real usage. - pub const fn empty() -> Frames { + pub const fn empty() -> Frames { Frames { typ: MemoryRegionType::Unknown, - frames: FrameRange::empty(), + frame_range: FrameRange::empty(), } } @@ -659,8 +681,8 @@ impl Frames { }; // ensure the now-merged Frames doesn't run its drop handler - core::mem::forget(other); - self.frames = frames; + mem::forget(other); + self.frame_range = frames; Ok(()) } @@ -674,8 +696,8 @@ impl Frames { /// If `frames_to_extract` is not contained within `self`, then `self` is returned unchanged within an `Err`. pub fn split_range( self, - frames_to_extract: FrameRange - ) -> Result, Self> { + frames_to_extract: FrameRange

+ ) -> Result, Self> { if !self.contains_range(&frames_to_extract) { return Err(self); @@ -684,25 +706,25 @@ impl Frames { let start_frame = *frames_to_extract.start(); let start_to_end = frames_to_extract; - let before_start = if start_frame == MIN_FRAME || start_frame == *self.start() { + let before_start = if start_frame == Frame::

::MIN || start_frame == *self.start() { None } else { - Some(FrameRange::new(*self.start(), *start_to_end.start() - 1)) + Some(FrameRange::

::new(*self.start(), *start_to_end.start() - 1)) }; - let after_end = if *start_to_end.end() == MAX_FRAME || *start_to_end.end() == *self.end() { + let after_end = if *start_to_end.end() == Frame::

::MAX || *start_to_end.end() == *self.end() { None } else { - Some(FrameRange::new(*start_to_end.end() + 1, *self.end())) + Some(FrameRange::

::new(*start_to_end.end() + 1, *self.end())) }; let typ = self.typ; // ensure the original Frames doesn't run its drop handler and free its frames. - core::mem::forget(self); + mem::forget(self); Ok(SplitFrames { - before_start: before_start.map(|frames| Frames { typ, frames }), - start_to_end: Frames { typ, frames: start_to_end }, - after_end: after_end.map(|frames| Frames { typ, frames }), + before_start: before_start.map(|frame_range| Frames { typ, frame_range }), + start_to_end: Frames { typ, frame_range: start_to_end }, + after_end: after_end.map(|frame_range| Frames { typ, frame_range }), }) } @@ -718,24 +740,24 @@ impl Frames { /// Returns an `Err` containing this `Frames` if `at_frame` is otherwise out of bounds, or if `self` was empty. /// /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split_at(self, at_frame: Frame) -> Result<(Self, Self), Self> { + pub fn split_at(self, at_frame: Frame

) -> Result<(Self, Self), Self> { if self.is_empty() { return Err(self); } let end_of_first = at_frame - 1; let (first, second) = if at_frame == *self.start() && at_frame <= *self.end() { - let first = FrameRange::empty(); - let second = FrameRange::new(at_frame, *self.end()); + let first = FrameRange::

::empty(); + let second = FrameRange::

::new(at_frame, *self.end()); (first, second) } else if at_frame == (*self.end() + 1) && end_of_first >= *self.start() { - let first = FrameRange::new(*self.start(), *self.end()); - let second = FrameRange::empty(); + let first = FrameRange::

::new(*self.start(), *self.end()); + let second = FrameRange::

::empty(); (first, second) } else if at_frame > *self.start() && end_of_first <= *self.end() { - let first = FrameRange::new(*self.start(), end_of_first); - let second = FrameRange::new(at_frame, *self.end()); + let first = FrameRange::

::new(*self.start(), end_of_first); + let second = FrameRange::

::new(at_frame, *self.end()); (first, second) } else { @@ -744,44 +766,43 @@ impl Frames { let typ = self.typ; // ensure the original Frames doesn't run its drop handler and free its frames. - core::mem::forget(self); + mem::forget(self); Ok(( - Frames { typ, frames: first }, - Frames { typ, frames: second }, + Frames { typ, frame_range: first }, + Frames { typ, frame_range: second }, )) } } -impl Deref for Frames { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames +impl Deref for Frames { + type Target = FrameRange

; + fn deref(&self) -> &Self::Target { + &self.frame_range } } -impl Ord for Frames { +impl Ord for Frames { fn cmp(&self, other: &Self) -> Ordering { - self.frames.start().cmp(other.frames.start()) + self.frame_range.start().cmp(other.frame_range.start()) } } -impl PartialOrd for Frames { +impl PartialOrd for Frames { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl PartialEq for Frames { +impl PartialEq for Frames { fn eq(&self, other: &Self) -> bool { - self.frames.start() == other.frames.start() + self.frame_range.start() == other.frame_range.start() } } -impl Borrow for &'_ Frames { - fn borrow(&self) -> &Frame { - self.frames.start() +impl Borrow> for &'_ Frames { + fn borrow(&self) -> &Frame

{ + self.frame_range.start() } } - -impl fmt::Debug for Frames { +impl fmt::Debug for Frames { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Frames({:?}, {:?})", self.frames, self.typ) + write!(f, "Frames({:?}, {:?})", self.frame_range, self.typ) } } @@ -825,8 +846,8 @@ impl<'list> DeferredAllocAction<'list> { } impl<'list> Drop for DeferredAllocAction<'list> { fn drop(&mut self) { - let frames1 = core::mem::replace(&mut self.free1, Frames::empty()); - let frames2 = core::mem::replace(&mut self.free2, Frames::empty()); + let frames1 = mem::replace(&mut self.free1, Frames::empty()); + let frames2 = mem::replace(&mut self.free2, Frames::empty()); // Insert all of the chunks, both allocated and free ones, into the list. if frames1.size_in_frames() > 0 { @@ -851,14 +872,14 @@ impl<'list> Drop for DeferredAllocAction<'list> { #[derive(Debug)] enum AllocationError { /// The requested address was not free: it was already allocated. - AddressNotFree(Frame, usize), + AddressNotFree(Frame, usize), /// The requested address was outside the range of this allocator. - AddressNotFound(Frame, usize), + AddressNotFound(Frame, usize), /// The address space was full, or there was not a large-enough chunk /// or enough remaining chunks that could satisfy the requested allocation size. OutOfAddressSpace(usize), /// The starting address was found, but not all successive contiguous frames were available. - ContiguousChunkNotFound(Frame, usize), + ContiguousChunkNotFound(Frame, usize), } impl From for &'static str { fn from(alloc_err: AllocationError) -> &'static str { @@ -876,9 +897,9 @@ impl From for &'static str { /// `requested_frame` to `requested_frame + num_frames`. fn find_specific_chunk( list: &mut StaticArrayRBTree, - requested_frame: Frame, + requested_frame: Frame, num_frames: usize -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // The end frame is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. let requested_end_frame = requested_frame + (num_frames - 1); @@ -960,7 +981,7 @@ fn find_specific_chunk( fn find_any_chunk( list: &mut StaticArrayRBTree, num_frames: usize -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // During the first pass, we ignore designated regions. match list.0 { Inner::Array(ref mut arr) => { @@ -1022,10 +1043,10 @@ fn retrieve_frames_from_ref(mut frames_ref: ValueRefMut) -> Option, initial_chunk_ref: ValueRefMut, next_chunk: Option, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // Remove the initial chunk from the free frame list. let mut chosen_chunk = retrieve_frames_from_ref(initial_chunk_ref) .expect("BUG: Failed to retrieve chunk from free list"); @@ -1055,7 +1076,7 @@ fn allocate_from_chosen_chunk( /// Returns `true` if the given list contains *any* of the given `frames`. fn contains_any( list: &StaticArrayRBTree, - frames: &FrameRange, + frames: &FrameRange, ) -> bool { match &list.0 { Inner::Array(ref arr) => { @@ -1095,11 +1116,12 @@ fn contains_any( fn add_reserved_region_to_lists( regions_list: &mut StaticArrayRBTree, frames_list: &mut StaticArrayRBTree, - frames: FrameRange, -) -> Result { + frames: FrameRange, +) -> Result, &'static str> { // first check the regions list for overlaps and proceed only if there are none. - if contains_any(regions_list, &frames){ + if contains_any(regions_list, &frames) { + error!("Failed to add reserved region {frames:X?} due to overlap with existing regions."); return Err("Failed to add reserved region that overlapped with existing reserved regions."); } @@ -1173,7 +1195,7 @@ fn add_reserved_region_to_lists( pub fn allocate_frames_deferred( requested_paddr: Option, num_frames: usize, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { if num_frames == 0 { warn!("frame_allocator: requested an allocation of 0 frames... stupid!"); return Err("cannot allocate zero frames"); @@ -1230,13 +1252,13 @@ pub fn allocate_frames_deferred( pub fn allocate_frames_by_bytes_deferred( requested_paddr: Option, num_bytes: usize, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { let actual_num_bytes = if let Some(paddr) = requested_paddr { - num_bytes + (paddr.value() % FRAME_SIZE) + num_bytes + (paddr.value() % FRAME_4K_SIZE_IN_BYTES) } else { num_bytes }; - let num_frames = (actual_num_bytes + FRAME_SIZE - 1) / FRAME_SIZE; // round up + let num_frames = (actual_num_bytes + FRAME_4K_SIZE_IN_BYTES - 1) / FRAME_4K_SIZE_IN_BYTES; // round up allocate_frames_deferred(requested_paddr, num_frames) } @@ -1244,7 +1266,7 @@ pub fn allocate_frames_by_bytes_deferred( /// Allocates the given number of frames with no constraints on the starting physical address. /// /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames(num_frames: usize) -> Option { +pub fn allocate_frames(num_frames: usize) -> Option> { allocate_frames_deferred(None, num_frames) .map(|(af, _action)| af) .ok() @@ -1256,7 +1278,7 @@ pub fn allocate_frames(num_frames: usize) -> Option { /// /// This function still allocates whole frames by rounding up the number of bytes. /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { +pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option> { allocate_frames_by_bytes_deferred(None, num_bytes) .map(|(af, _action)| af) .ok() @@ -1267,7 +1289,7 @@ pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { /// /// This function still allocates whole frames by rounding up the number of bytes. /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result { +pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result, &'static str> { allocate_frames_by_bytes_deferred(Some(paddr), num_bytes) .map(|(af, _action)| af) } @@ -1276,13 +1298,13 @@ pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> /// Allocates the given number of frames starting at (inclusive of) the frame containing the given `PhysicalAddress`. /// /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result { +pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result, &'static str> { allocate_frames_deferred(Some(paddr), num_frames) .map(|(af, _action)| af) } -/// An enum that must be returned by the function passed into [`iter_free_frames()`] +/// An enum that must be returned by the function passed into [`inspect_then_allocate_free_frames()`] /// in order to define the post-iteration behavior. pub enum FramesIteratorRequest { /// Keep iterating to the next chunk of frames. @@ -1291,7 +1313,7 @@ pub enum FramesIteratorRequest { Stop, /// Stop iterating, and then attempt to allocate the specified frames. AllocateAt { - requested_frame: Frame, + requested_frame: Frame, num_frames: usize, } } @@ -1302,7 +1324,7 @@ pub enum FramesIteratorRequest { /// See [`FramesIteratorRequest`] for more detail. pub fn inspect_then_allocate_free_frames( func: &mut F, -) -> Result, &'static str> +) -> Result>, &'static str> where F: FnMut(&FreeFrames) -> FramesIteratorRequest { diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index a3161bd58e..cbd2e7d6b0 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -12,6 +12,7 @@ #![no_std] #![feature(ptr_internals)] +#![feature(more_qualified_paths)] extern crate alloc; diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index e5d03bb1d7..565407321f 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -18,6 +18,7 @@ use core::{ slice, }; use log::{error, warn, debug, trace}; +use memory_structs::{PageSize, Page4K}; use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames, UnmappedFrames}; use crate::paging::{ get_current_p4, @@ -50,7 +51,8 @@ use kernel_config::memory::ENTRIES_PER_PAGE_TABLE; /// This is safe because the frame allocator can only be initialized once, and also because /// only this crate has access to that function callback and can thus guarantee /// that it is only invoked for `UnmappedFrameRange`. -pub(super) static INTO_UNMAPPED_FRAMES_FUNC: Once UnmappedFrames> = Once::new(); +pub(super) static INTO_UNMAPPED_FRAMES_FUNC: + Once< fn(FrameRange) -> UnmappedFrames > = Once::new(); /// A convenience function to translate the given virtual address into a /// physical address using the currently-active page table. @@ -61,7 +63,7 @@ pub fn translate(virtual_address: VirtualAddress) -> Option { pub struct Mapper { p4: Unique>, /// The Frame contaning the top-level P4 page table. - pub(crate) target_p4: Frame, + pub(crate) target_p4: Frame, } impl Mapper { @@ -75,7 +77,7 @@ impl Mapper { /// to map the given `p4` frame. /// /// The given `p4` frame is the root frame of that upcoming page table. - pub(crate) fn with_p4_frame(p4: Frame) -> Mapper { + pub(crate) fn with_p4_frame(p4: Frame) -> Mapper { Mapper { p4: Unique::new(P4).unwrap(), // cannot panic; the P4 value is valid target_p4: p4, @@ -87,7 +89,7 @@ impl Mapper { /// to map that new page table. /// /// The given `p4` frame is the root frame of that upcoming page table. - pub(crate) fn upcoming(p4: Frame) -> Mapper { + pub(crate) fn upcoming(p4: Frame) -> Mapper { Mapper { p4: Unique::new(UPCOMING_P4).unwrap(), target_p4: p4, @@ -141,6 +143,9 @@ impl Mapper { } /// Translates a virtual memory `Page` to a physical memory `Frame` by walking the page tables. + /// + /// Note that this only supports translating a 4K page into a 4K frame, + /// but it still correctly handles the cases where huge pages are used in the page tables. pub fn translate_page(&self, page: Page) -> Option { let p3 = self.p4().next_table(page.p4_index()); @@ -183,21 +188,76 @@ impl Mapper { .or_else(huge_page) } + /* + * An unfinished implementation of a generically-sized translate routine that handles huge pages. + * + /// Translates a virtual memory `Page` to a physical memory `Frame` by walking the page tables. + pub fn translate_page(&self, page: Page

) -> Option> { + let p3 = self.p4().next_table(page.p4_index()); + + #[cfg(target_arch = "x86_64")] + let huge_page = || { + p3.and_then(|p3| { + let p3_entry = &p3[page.p3_index()]; + // 1GiB page? + if let Some(start_frame) = p3_entry.pointed_frame() { + if p3_entry.flags().is_huge() { + // address must be 1GiB aligned + assert!(start_frame.number() % (ENTRIES_PER_PAGE_TABLE * ENTRIES_PER_PAGE_TABLE) == 0); + return Some( + Frame::containing_address_1gb(PhysicalAddress::new_canonical( + PAGE_SIZE * (start_frame.number() + page.p2_index() * ENTRIES_PER_PAGE_TABLE + page.p1_index()) + )) + .from_1g_into_generic() + ); + } + } + if let Some(p2) = p3.next_table(page.p3_index()) { + let p2_entry = &p2[page.p2_index()]; + // 2MiB page? + if let Some(start_frame) = p2_entry.pointed_frame() { + if p2_entry.flags().is_huge() { + // address must be 2MiB aligned + assert!(start_frame.number() % ENTRIES_PER_PAGE_TABLE == 0); + return Some( + Frame::containing_address_2mb(PhysicalAddress::new_canonical( + PAGE_SIZE * (start_frame.number() + page.p1_index()) + )) + .from_2m_into_generic() + ); + } + } + } + None + }) + }; + #[cfg(target_arch = "aarch64")] + let huge_page = || { todo!("huge page (block descriptor) translation for aarch64") }; + + p3.and_then(|p3| p3.next_table(page.p3_index())) + .and_then(|p2| p2.next_table(page.p2_index())) + .and_then(|p1| p1[page.p1_index()].pointed_frame()) + .map(Frame::from_4k_into_generic) + .or_else(huge_page) + } + */ + /// An internal function that performs the actual mapping of a range of allocated `pages` /// to a range of allocated `frames`. /// /// Returns a tuple of the new `MappedPages` object containing the allocated `pages` /// and the allocated `frames` object. - pub(super) fn internal_map_to( + pub(super) fn internal_map_to( &mut self, - pages: AllocatedPages, - frames: Frames, - flags: Flags, - ) -> Result<(MappedPages, Frames::Inner), &'static str> - where - Frames: OwnedOrBorrowed, - Flags: Into, + pages: AllocatedPages/*

*/, + frames: BF, + flags: FL, + ) -> Result<(MappedPages, BF::Inner), &'static str> + where + P: PageSize, + BF: OwnedOrBorrowed>, + FL: Into, { let frames = frames.into_inner(); let flags = flags.into(); @@ -207,7 +267,7 @@ impl Mapper { // we are mapping it exclusively (i.e., owned `AllocatedFrames` are passed in). let actual_flags = flags .valid(true) - .exclusive(Frames::OWNED); + .exclusive(BF::OWNED); let pages_count = pages.size_in_pages(); let frames_count = frames.borrow().size_in_frames(); @@ -218,6 +278,8 @@ impl Mapper { return Err("map_allocated_pages_to(): page count must equal frame count"); } + // TODO FIXME: implement huge pages here. + // iterate over pages and frames in lockstep for (page, frame) in pages.range().clone().into_iter().zip(frames.borrow().into_iter()) { let p3 = self.p4_mut().next_table_create(page.p4_index(), higher_level_flags); @@ -246,12 +308,16 @@ impl Mapper { /// Maps the given virtual `AllocatedPages` to the given physical `AllocatedFrames`. /// /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. - pub fn map_allocated_pages_to>( + pub fn map_allocated_pages_to( &mut self, - pages: AllocatedPages, - frames: AllocatedFrames, - flags: F, - ) -> Result { + pages: AllocatedPages /*

*/, + frames: AllocatedFrames

, + flags: FL, + ) -> Result + where + P: PageSize, + FL: Into, + { let (mapped_pages, frames) = self.internal_map_to(pages, Owned(frames), flags)?; // Currently we forget the actual `AllocatedFrames` object because @@ -264,13 +330,17 @@ impl Mapper { } - /// Maps the given `AllocatedPages` to randomly chosen (allocated) physical frames. - /// + /// Maps the given 4K-sized `AllocatedPages` to randomly chosen (allocated) physical frames. + /// /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. - pub fn map_allocated_pages>( + /// + /// ## Note on huge pages + /// This function only supports 4K-sized pages, not huge pages. + /// To use huge pages, you must provide the huge frames and call [`Self::map_allocated_pages_to()`]. + pub fn map_allocated_pages>( &mut self, pages: AllocatedPages, - flags: F, + flags: FL, ) -> Result { let flags = flags.into(); let higher_level_flags = flags.adjust_for_higher_level_pte(); @@ -325,11 +395,11 @@ impl Mapper { /// Consumes the given `AllocatedPages` and returns a `MappedPages` object /// which contains those `AllocatedPages`. #[doc(hidden)] - pub unsafe fn map_to_non_exclusive>( + pub unsafe fn map_to_non_exclusive>( mapper: &mut Self, pages: AllocatedPages, - frames: &AllocatedFrames, - flags: F, + frames: &AllocatedFrames, + flags: FL, ) -> Result { // In this function, none of the frames can be mapped as exclusive // because we're accepting a *reference* to an `AllocatedFrames`, not consuming it. @@ -339,6 +409,84 @@ impl Mapper { } +/// A macro for applying the same field/method accessors to all variants +/// in an enum based on the three possible [`PageSize`]s. +#[macro_export] +macro_rules! chunk_sized_expr { + ($t:ty, $chunk:ident, .$($method:tt)*) => { + match $chunk { + <$t>::Normal4K(c) => c.$($method)*, + <$t>::Huge2M(c) => c.$($method)*, + <$t>::Huge1G(c) => c.$($method)*, + } + }; +} + +/// A version of [`AllocatedPages`] that encodes its [`PageSize`] with internal enum variants. +#[derive(Debug)] +#[allow(dead_code)] +pub enum AllocatedPagesSized { + // TODO: support huge pages via the `P: PageSize` parameter. + + /// A range of normal 4K-sized allocated pages. + Normal4K(AllocatedPages /* */), + /// A range of huge 2M-sized allocated pages. + Huge2M(AllocatedPages /* */), + /// A range of huge 1G-sized allocated pages. + Huge1G(AllocatedPages /* */), +} +impl Default for AllocatedPagesSized { + fn default() -> Self { + Self::empty() + } +} +impl From*/> for AllocatedPagesSized { + fn from(p: AllocatedPages/* */) -> Self { + Self::Normal4K(p) + } +} +/* + * TODO: support huge pages via the `P: PageSize` parameter. + * +impl From> for AllocatedPagesSized { + fn from(p: AllocatedPages) -> Self { + Self::Huge2M(chunk) + } +} +impl From> for AllocatedPagesSized { + fn from(p: AllocatedPages) -> Self { + Self::Huge1G(chunk) + } +} +*/ +#[allow(dead_code)] +impl AllocatedPagesSized { + /// Returns an empty `AllocatedPagesSized` object that performs no page allocation. + /// Can be used as a placeholder, but will not permit any real usage. + pub const fn empty() -> Self { + Self::Normal4K(AllocatedPages::empty()) + } + /// Returns the 4K-sized number of the starting page of the enclosed `AllocatedPages`. + pub const fn number(&self) -> usize { + chunk_sized_expr!(Self, self, .start().number()) + } + /// Returns the virtual address of the starting page of the enclosed `AllocatedPages`. + pub const fn start_address(&self) -> VirtualAddress { + chunk_sized_expr!(Self, self, .start_address()) + } + /// Converts this into a 4K-sized `AllocatedPages`. + pub fn into_4k(self) -> AllocatedPages /* */ { + // To make this a const fn, we cannot use the implementations of `Into`. + match self { + Self::Normal4K(p) => p, + Self::Huge2M(p) => p, /* TODO: support huge page range conversions */ + Self::Huge1G(p) => p, /* TODO: support huge page range conversions */ + } + } +} + + + /// Represents a contiguous range of virtual memory pages that are currently mapped. /// A `MappedPages` object can only have a single range of contiguous pages, not multiple disjoint ranges. /// This does not guarantee that its pages are mapped to frames that are contiguous in physical memory. @@ -350,7 +498,7 @@ impl Mapper { #[derive(Debug)] pub struct MappedPages { /// The Frame containing the top-level P4 page table that this MappedPages was originally mapped into. - page_table_p4: Frame, + page_table_p4: Frame, /// The range of allocated virtual pages contained by this mapping. pages: AllocatedPages, // The PTE flags that define the page permissions of this mapping. diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 92093c1f49..cd1daaa400 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -30,6 +30,7 @@ use core::{ use log::debug; use super::{ Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, + Page4K, AllocatedPages, allocate_pages, AllocatedFrames, UnmappedFrames, PteFlags, InitialMemoryMappings, tlb_flush_all, tlb_flush_virt_addr, get_p4, find_section_memory_bounds, @@ -214,7 +215,7 @@ impl PageTable { /// Returns the current top-level (P4) root page table frame. -pub fn get_current_p4() -> Frame { +pub fn get_current_p4() -> Frame { Frame::containing_address(get_p4()) } diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index 20181c16eb..6d0bef8625 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -21,7 +21,7 @@ use core::{ marker::{ConstParamTy, PhantomData}, ops::{Add, AddAssign, Deref, DerefMut, Sub, SubAssign}, }; -use kernel_config::memory::{MAX_PAGE_NUMBER, PAGE_SIZE, ENTRIES_PER_PAGE_TABLE}; +use kernel_config::memory::{MAX_PAGE_NUMBER, MAX_VIRTUAL_ADDRESS, PAGE_SIZE, ENTRIES_PER_PAGE_TABLE}; use zerocopy::FromBytes; use paste::paste; use derive_more::*; @@ -39,7 +39,7 @@ pub enum MemChunkSize { /// /// This is used to parameterize `Page`- and `Frame`-related types with a page size, /// in order to define normal and huge pages in a generic manner. -pub trait PageSize: Ord + PartialOrd + Clone + Copy + private::Sealed { +pub trait PageSize: Ord + PartialOrd + Clone + Copy + private::Sealed + 'static { const SIZE: MemChunkSize; const NUM_4K_PAGES: usize; const SIZE_IN_BYTES: usize; @@ -312,7 +312,6 @@ implement_address!( ); - /// A macro for defining `Page` and `Frame` structs /// and implementing their common traits, which are generally identical. macro_rules! implement_page_frame { @@ -345,6 +344,15 @@ macro_rules! implement_page_frame { size: PhantomData } } + + #[doc = "Converts a known 4K-sized `" $TypeName "` into a + `" $TypeName "

` with a generic `PageSize` parameter."] + pub const fn from_4k_into_generic(self) -> $TypeName

{ + $TypeName::

{ + number: self.number, + size: PhantomData + } + } } impl $TypeName { #[doc = "Returns the 2MiB huge `" $TypeName "` containing the given `" $address "`."] @@ -354,6 +362,15 @@ macro_rules! implement_page_frame { size: PhantomData, } } + + #[doc = "Converts a known 2M-sized `" $TypeName "` into a + `" $TypeName "

` with a generic `PageSize` parameter."] + pub const fn from_2m_into_generic(self) -> $TypeName

{ + $TypeName::

{ + number: self.number, + size: PhantomData + } + } } impl $TypeName { #[doc = "Returns the 1GiB huge `" $TypeName "` containing the given `" $address "`."] @@ -363,8 +380,29 @@ macro_rules! implement_page_frame { size: PhantomData, } } + + #[doc = "Converts a known 1G-sized `" $TypeName "` into a + `" $TypeName "

` with a generic `PageSize` parameter."] + pub const fn from_1g_into_generic(self) -> $TypeName

{ + $TypeName::

{ + number: self.number, + size: PhantomData + } + } } - impl $TypeName

{ + impl $TypeName

{ + #[doc = "The minimum (smallest) valid value a `" $TypeName "` can have."] + pub const MIN: $TypeName

= $TypeName { + number: 0, + size: PhantomData, + }; + + #[doc = "The maximum (largest) valid value a `" $TypeName "` can have."] + pub const MAX: $TypeName

= $TypeName { + number: (MAX_VIRTUAL_ADDRESS / P::SIZE_IN_BYTES) * P::NUM_4K_PAGES, + size: PhantomData, + }; + #[doc = "Returns the 4K-sized number of this `" $TypeName "`."] #[inline(always)] pub const fn number(&self) -> usize { @@ -381,12 +419,12 @@ macro_rules! implement_page_frame { P::SIZE } } - impl fmt::Debug for $TypeName

{ + impl fmt::Debug for $TypeName

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, concat!(stringify!($TypeName), "(", $prefix, "{:#X})"), self.start_address()) } } - impl Add for $TypeName

{ + impl Add for $TypeName

{ type Output = $TypeName

; fn add(self, rhs: usize) -> $TypeName

{ // cannot exceed max page number (which is also max frame number) @@ -399,7 +437,7 @@ macro_rules! implement_page_frame { } } } - impl AddAssign for $TypeName

{ + impl AddAssign for $TypeName

{ fn add_assign(&mut self, rhs: usize) { *self = $TypeName { number: core::cmp::min( @@ -410,7 +448,7 @@ macro_rules! implement_page_frame { } } } - impl Sub for $TypeName

{ + impl Sub for $TypeName

{ type Output = $TypeName

; fn sub(self, rhs: usize) -> $TypeName

{ $TypeName { @@ -419,7 +457,7 @@ macro_rules! implement_page_frame { } } } - impl SubAssign for $TypeName

{ + impl SubAssign for $TypeName

{ fn sub_assign(&mut self, rhs: usize) { *self = $TypeName { number: self.number.saturating_sub(rhs.saturating_mul(P::NUM_4K_PAGES)), @@ -427,7 +465,7 @@ macro_rules! implement_page_frame { } } } - impl Step for $TypeName

{ + impl Step for $TypeName

{ #[inline] fn steps_between(start: &$TypeName

, end: &$TypeName

) -> Option { Step::steps_between(&start.number, &end.number) @@ -494,7 +532,7 @@ implement_page_frame!(Page, "virtual", "v", VirtualAddress); implement_page_frame!(Frame, "physical", "p", PhysicalAddress); // Implement other functions for the `Page` type that aren't relevant for `Frame. -impl Page

{ +impl Page

{ /// Returns the 9-bit part of this `Page`'s [`VirtualAddress`] that is the index into the P4 page table entries list. pub const fn p4_index(&self) -> usize { (self.number >> 27) & 0x1FF @@ -532,11 +570,6 @@ macro_rules! implement_page_frame_range { pub struct $TypeName(RangeInclusive<$chunk::

>); impl $TypeName { - #[doc = "Creates a `" $TypeName "` that will always yield `None` when iterated."] - pub const fn empty() -> Self { - Self::new($chunk { number: 1, size: PhantomData }, $chunk { number: 0, size: PhantomData }) - } - #[doc = "A convenience method for creating a new `" $TypeName "` that spans \ all [`" $chunk "`]s from the given [`" $address "`] to an end bound based on the given size."] pub const fn [](starting_addr: $address, size_in_bytes: usize) -> $TypeName { @@ -552,12 +585,30 @@ macro_rules! implement_page_frame_range { } } } - impl $TypeName

{ + impl $TypeName

{ + #[doc = "Creates an empty `" $TypeName "` that will always yield `None` when iterated."] + pub const fn empty() -> Self { + Self::new( + $chunk { number: 1, size: PhantomData }, + $chunk { number: 0, size: PhantomData }, + ) + } + #[doc = "Creates a new range of [`" $chunk "`]s that spans from `start` to `end`, both inclusive bounds."] pub const fn new(start: $chunk

, end: $chunk

) -> $TypeName

{ $TypeName(RangeInclusive::new(start, end)) } + #[doc = "Returns the starting [`" $chunk "`] in this `" $TypeName "`."] + pub const fn start(&self) -> &$chunk

{ + self.0.start() + } + + #[doc = "Returns the ending [`" $chunk "`] in this `" $TypeName "`."] + pub const fn end(&self) -> &$chunk

{ + self.0.end() + } + #[doc = "Returns the [`" $address "`] of the starting [`" $chunk "`] in this `" $TypeName "`."] pub const fn start_address(&self) -> $address { self.0.start().start_address() @@ -630,8 +681,7 @@ macro_rules! implement_page_frame_range { && (other.start() >= self.start()) && (other.end() <= self.end()) } - } - impl $TypeName

{ + #[doc = "Returns an inclusive `" $TypeName "` representing the [`" $chunk "`]s that overlap \ across this `" $TypeName "` and the given other `" $TypeName "`.\n\n \ If there is no overlap between the two ranges, `None` is returned."] @@ -644,13 +694,32 @@ macro_rules! implement_page_frame_range { None } } + + #[doc = "Converts this range of [`" $chunk "`]s into an identical 4K-sized range."] + pub fn [](self) -> $TypeName { + $TypeName::::new( + $chunk:: { number: self.0.start().number, size: PhantomData }, + $chunk:: { + // Add 1 because the end bound is inclusive; + // Subtract 1 because the 4K end bound should extend right up to the end + // of the 2M or 1G chunk, not one past it. + number: (self.0.end().number + (1 * P::NUM_4K_PAGES) - 1), + size: PhantomData, + }, + ) + } + } + impl Default for $TypeName

{ + fn default() -> Self { + Self::empty() + } } - impl fmt::Debug for $TypeName

{ + impl fmt::Debug for $TypeName

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.0) } } - impl Deref for $TypeName

{ + impl Deref for $TypeName

{ type Target = RangeInclusive<$chunk

>; fn deref(&self) -> &RangeInclusive<$chunk

> { &self.0 @@ -661,39 +730,29 @@ macro_rules! implement_page_frame_range { &mut self.0 } } - impl IntoIterator for $TypeName

{ + impl IntoIterator for &'_ $TypeName

{ type Item = $chunk

; type IntoIter = RangeInclusiveIterator<$chunk

>; fn into_iter(self) -> Self::IntoIter { self.0.iter() } } - - - #[doc = "A `" $TypeName "` that implements `Copy`."] - #[derive(Clone, Copy)] - pub struct [] { - start: $chunk

, - end: $chunk

, - } - impl From<$TypeName

> for []

{ - fn from(r: $TypeName

) -> Self { - Self { start: *r.start(), end: *r.end() } - } - } - impl From<[]

> for $TypeName

{ - fn from(cr: []

) -> Self { - Self::new(cr.start, cr.end) + impl IntoIterator for $TypeName

{ + type Item = $chunk

; + type IntoIter = RangeInclusiveIterator<$chunk

>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() } } + impl From<$TypeName> for $TypeName { fn from(r: $TypeName) -> Self { - Self::new($chunk::from(*r.start()), $chunk::from(*r.end())) + r.[]() } } impl From<$TypeName> for $TypeName { fn from(r: $TypeName) -> Self { - Self::new($chunk::from(*r.start()), $chunk::from(*r.end())) + r.[]() } } impl TryFrom<$TypeName> for $TypeName { diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index cdb476c768..7c15ec9f6e 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -217,6 +217,11 @@ impl fmt::Debug for AllocatedPages { write!(f, "AllocatedPages({:?})", self.pages) } } +impl Default for AllocatedPages { + fn default() -> Self { + Self::empty() + } +} impl AllocatedPages { /// Returns an empty AllocatedPages object that performs no page allocation. @@ -228,32 +233,32 @@ impl AllocatedPages { } /// Returns the starting `VirtualAddress` in this range of pages. - pub fn start_address(&self) -> VirtualAddress { + pub const fn start_address(&self) -> VirtualAddress { self.pages.start_address() } /// Returns the size in bytes of this range of pages. - pub fn size_in_bytes(&self) -> usize { + pub const fn size_in_bytes(&self) -> usize { self.pages.size_in_bytes() } /// Returns the size in number of pages of this range of pages. - pub fn size_in_pages(&self) -> usize { + pub const fn size_in_pages(&self) -> usize { self.pages.size_in_pages() } /// Returns the starting `Page` in this range of pages. - pub fn start(&self) -> &Page { + pub const fn start(&self) -> &Page { self.pages.start() } /// Returns the ending `Page` (inclusive) in this range of pages. - pub fn end(&self) -> &Page { + pub const fn end(&self) -> &Page { self.pages.end() } /// Returns a reference to the inner `PageRange`, which is cloneable/iterable. - pub fn range(&self) -> &PageRange { + pub const fn range(&self) -> &PageRange { &self.pages } diff --git a/kernel/page_table_entry/src/lib.rs b/kernel/page_table_entry/src/lib.rs index 2606ed3039..07bb703d69 100644 --- a/kernel/page_table_entry/src/lib.rs +++ b/kernel/page_table_entry/src/lib.rs @@ -13,7 +13,7 @@ #![no_std] use core::ops::Deref; -use memory_structs::{Frame, FrameRange, PhysicalAddress}; +use memory_structs::{Frame, FrameRange, PhysicalAddress, PageSize}; use zerocopy::FromBytes; use frame_allocator::AllocatedFrame; use pte_flags::{PteFlagsArch, PTE_FRAME_MASK}; @@ -90,7 +90,7 @@ impl PageTableEntry { /// This is the actual mapping action that informs the MMU of a new mapping. /// /// Note: this performs no checks about the current value of this page table entry. - pub fn set_entry(&mut self, frame: AllocatedFrame, flags: PteFlagsArch) { + pub fn set_entry(&mut self, frame: AllocatedFrame

, flags: PteFlagsArch) { self.0 = (frame.start_address().value() as u64) | flags.bits(); } From 029b538523dd02f0a4d4efe574d412c88a4d4157 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Wed, 8 Nov 2023 13:17:25 -0800 Subject: [PATCH 22/25] Parameterize `AllocatedPages` with `P: PageSize` (#1072) * This is another small step in the effort to support huge pages. * 4K-sized pages are always the default, so no other code has to change yet. * We always specify `

` or `` parameters even when not needed, just for the sake of explicit clarity. * Just like the frame allocator, the page allocator only deals with 4K-sized page chunks. Huge pages can be obtained by attempting to convert a range of 4K-sized pages into a range of huge 2M or 1G sized pages. --- kernel/page_allocator/src/lib.rs | 119 ++++++++++++++++--------------- 1 file changed, 62 insertions(+), 57 deletions(-) diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 7c15ec9f6e..558c8d7d28 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -32,7 +32,7 @@ mod static_array_rb_tree; use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}}; use kernel_config::memory::*; -use memory_structs::{VirtualAddress, Page, PageRange}; +use memory_structs::{VirtualAddress, Page, PageRange, PageSize, Page4K, Page2M, Page1G}; use spin::{Mutex, Once}; use static_array_rb_tree::*; @@ -140,7 +140,7 @@ pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &' } -/// A range of contiguous pages. +/// A range of contiguous 4K-sized pages. /// /// # Ordering and Equality /// @@ -154,11 +154,11 @@ pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &' #[derive(Debug, Clone, Eq)] struct Chunk { /// The Pages covered by this chunk, an inclusive range. - pages: PageRange, + pages: PageRange, } impl Chunk { - fn as_allocated_pages(&self) -> AllocatedPages { - AllocatedPages { + fn as_allocated_pages(&self) -> AllocatedPages { + AllocatedPages:: { pages: self.pages.clone(), } } @@ -166,13 +166,13 @@ impl Chunk { /// Returns a new `Chunk` with an empty range of pages. fn empty() -> Chunk { Chunk { - pages: PageRange::empty(), + pages: PageRange::::empty(), } } } impl Deref for Chunk { - type Target = PageRange; - fn deref(&self) -> &PageRange { + type Target = PageRange; + fn deref(&self) -> &PageRange { &self.pages } } @@ -191,8 +191,8 @@ impl PartialEq for Chunk { self.pages.start() == other.pages.start() } } -impl Borrow for &'_ Chunk { - fn borrow(&self) -> &Page { +impl Borrow> for &'_ Chunk { + fn borrow(&self) -> &Page { self.pages.start() } } @@ -205,30 +205,32 @@ impl Borrow for &'_ Chunk { /// /// This object represents ownership of the allocated virtual pages; /// if this object falls out of scope, its allocated pages will be auto-deallocated upon drop. -pub struct AllocatedPages { - pages: PageRange, +pub struct AllocatedPages { + pages: PageRange

, } // AllocatedPages must not be Cloneable, and it must not expose its inner pages as mutable. -assert_not_impl_any!(AllocatedPages: DerefMut, Clone); +assert_not_impl_any!(AllocatedPages: DerefMut, Clone); +assert_not_impl_any!(AllocatedPages: DerefMut, Clone); +assert_not_impl_any!(AllocatedPages: DerefMut, Clone); -impl fmt::Debug for AllocatedPages { +impl fmt::Debug for AllocatedPages

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AllocatedPages({:?})", self.pages) } } -impl Default for AllocatedPages { - fn default() -> Self { +impl Default for AllocatedPages

{ + fn default() -> AllocatedPages

{ Self::empty() } } -impl AllocatedPages { +impl AllocatedPages

{ /// Returns an empty AllocatedPages object that performs no page allocation. /// Can be used as a placeholder, but will not permit any real usage. - pub const fn empty() -> AllocatedPages { + pub const fn empty() -> AllocatedPages

{ AllocatedPages { - pages: PageRange::empty() + pages: PageRange::

::empty() } } @@ -248,17 +250,17 @@ impl AllocatedPages { } /// Returns the starting `Page` in this range of pages. - pub const fn start(&self) -> &Page { + pub const fn start(&self) -> &Page

{ self.pages.start() } /// Returns the ending `Page` (inclusive) in this range of pages. - pub const fn end(&self) -> &Page { + pub const fn end(&self) -> &Page

{ self.pages.end() } /// Returns a reference to the inner `PageRange`, which is cloneable/iterable. - pub const fn range(&self) -> &PageRange { + pub const fn range(&self) -> &PageRange

{ &self.pages } @@ -294,12 +296,12 @@ impl AllocatedPages { /// that is, `self.end` must equal `ap.start`. /// If this condition is met, `self` is modified and `Ok(())` is returned, /// otherwise `Err(ap)` is returned. - pub fn merge(&mut self, ap: AllocatedPages) -> Result<(), AllocatedPages> { + pub fn merge(&mut self, ap: AllocatedPages

) -> Result<(), AllocatedPages

> { // make sure the pages are contiguous if *ap.start() != (*self.end() + 1) { return Err(ap); } - self.pages = PageRange::new(*self.start(), *ap.end()); + self.pages = PageRange::

::new(*self.start(), *ap.end()); // ensure the now-merged AllocatedPages doesn't run its drop handler and free its pages. core::mem::forget(ap); Ok(()) @@ -317,22 +319,25 @@ impl AllocatedPages { /// Returns an `Err` containing this `AllocatedPages` if `at_page` is otherwise out of bounds. /// /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split(self, at_page: Page) -> Result<(AllocatedPages, AllocatedPages), AllocatedPages> { + pub fn split( + self, + at_page: Page

, + ) -> Result<(AllocatedPages

, AllocatedPages

), AllocatedPages

> { let end_of_first = at_page - 1; let (first, second) = if at_page == *self.start() && at_page <= *self.end() { - let first = PageRange::empty(); - let second = PageRange::new(at_page, *self.end()); + let first = PageRange::

::empty(); + let second = PageRange::

::new(at_page, *self.end()); (first, second) } else if at_page == (*self.end() + 1) && end_of_first >= *self.start() { - let first = PageRange::new(*self.start(), *self.end()); - let second = PageRange::empty(); + let first = PageRange::

::new(*self.start(), *self.end()); + let second = PageRange::

::empty(); (first, second) } else if at_page > *self.start() && end_of_first <= *self.end() { - let first = PageRange::new(*self.start(), end_of_first); - let second = PageRange::new(at_page, *self.end()); + let first = PageRange::

::new(*self.start(), end_of_first); + let second = PageRange::

::new(at_page, *self.end()); (first, second) } else { @@ -342,19 +347,19 @@ impl AllocatedPages { // ensure the original AllocatedPages doesn't run its drop handler and free its pages. core::mem::forget(self); Ok(( - AllocatedPages { pages: first }, - AllocatedPages { pages: second }, + AllocatedPages::

{ pages: first }, + AllocatedPages::

{ pages: second }, )) } } -impl Drop for AllocatedPages { +impl Drop for AllocatedPages

{ fn drop(&mut self) { if self.size_in_pages() == 0 { return; } // trace!("page_allocator: deallocating {:?}", self); let chunk = Chunk { - pages: self.pages.clone(), + pages: self.pages.clone().into_4k_pages(), }; let mut list = FREE_PAGE_LIST.lock(); match &mut list.0 { @@ -452,11 +457,11 @@ impl<'list> Drop for DeferredAllocAction<'list> { #[derive(Debug)] pub enum AllocationError { /// The requested address was not free: it was already allocated, or is outside the range of this allocator. - AddressNotFree(Page, usize), + AddressNotFree(Page, usize), /// The address space was full, or there was not a large-enough chunk /// or enough remaining chunks (within the given `PageRange`, if any) /// that could satisfy the requested allocation size. - OutOfAddressSpace(usize, Option), + OutOfAddressSpace(usize, Option>), /// The allocator has not yet been initialized. NotInitialized, } @@ -476,9 +481,9 @@ impl From for &'static str { /// `requested_page` to `requested_page + num_pages`. fn find_specific_chunk( list: &mut StaticArrayRBTree, - requested_page: Page, + requested_page: Page, num_pages: usize -) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { // The end page is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. let requested_end_page = requested_page + (num_pages - 1); @@ -548,12 +553,12 @@ fn find_specific_chunk( fn find_any_chunk( list: &mut StaticArrayRBTree, num_pages: usize, - within_range: Option<&PageRange>, + within_range: Option<&PageRange>, alignment_4k_pages: usize, ) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { let designated_low_end = DESIGNATED_PAGES_LOW_END.get() .ok_or(AllocationError::NotInitialized)?; - let full_range = PageRange::new(*designated_low_end + 1, DESIGNATED_PAGES_HIGH_START - 1); + let full_range = PageRange::::new(*designated_low_end + 1, DESIGNATED_PAGES_HIGH_START - 1); let range = within_range.unwrap_or(&full_range); // During the first pass, we only search within the given range. @@ -705,11 +710,11 @@ fn find_any_chunk( /// This function breaks up that chunk into multiple ones and returns an `AllocatedPages` /// from (part of) that chunk, ranging from `start_page` to `start_page + num_pages`. fn adjust_chosen_chunk( - start_page: Page, + start_page: Page, num_pages: usize, chosen_chunk: &Chunk, mut chosen_chunk_ref: ValueRefMut, -) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { // The new allocated chunk might start in the middle of an existing chunk, // so we need to break up that existing chunk into 3 possible chunks: before, newly-allocated, and after. @@ -718,20 +723,20 @@ fn adjust_chosen_chunk( // an overlapping duplicate Chunk at either the very minimum or the very maximum of the address space. let new_allocation = Chunk { // The end page is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. - pages: PageRange::new(start_page, start_page + (num_pages - 1)), + pages: PageRange::::new(start_page, start_page + (num_pages - 1)), }; let before = if start_page == MIN_PAGE { None } else { Some(Chunk { - pages: PageRange::new(*chosen_chunk.start(), *new_allocation.start() - 1), + pages: PageRange::::new(*chosen_chunk.start(), *new_allocation.start() - 1), }) }; let after = if new_allocation.end() == &MAX_PAGE { None } else { Some(Chunk { - pages: PageRange::new(*new_allocation.end() + 1, *chosen_chunk.end()), + pages: PageRange::::new(*new_allocation.end() + 1, *chosen_chunk.end()), }) }; @@ -770,7 +775,7 @@ pub enum AllocationRequest<'r> { /// Note: alignment is specified in number of 4KiB pages, not number of bytes. AlignedTo { alignment_4k_pages: usize }, /// The allocated pages can be located anywhere within the given range. - WithinRange(&'r PageRange), + WithinRange(&'r PageRange), /// The allocated pages can be located at any virtual address /// and have no special alignment requirements beyond a single page. Any, @@ -801,7 +806,7 @@ pub enum AllocationRequest<'r> { pub fn allocate_pages_deferred( request: AllocationRequest, num_pages: usize, -) -> Result<(AllocatedPages, DeferredAllocAction<'static>), &'static str> { +) -> Result<(AllocatedPages, DeferredAllocAction<'static>), &'static str> { if num_pages == 0 { warn!("PageAllocator: requested an allocation of 0 pages... stupid!"); return Err("cannot allocate zero pages"); @@ -839,7 +844,7 @@ pub fn allocate_pages_deferred( pub fn allocate_pages_by_bytes_deferred( request: AllocationRequest, num_bytes: usize, -) -> Result<(AllocatedPages, DeferredAllocAction<'static>), &'static str> { +) -> Result<(AllocatedPages, DeferredAllocAction<'static>), &'static str> { let actual_num_bytes = if let AllocationRequest::AtVirtualAddress(vaddr) = request { num_bytes + (vaddr.value() % PAGE_SIZE) } else { @@ -853,7 +858,7 @@ pub fn allocate_pages_by_bytes_deferred( /// Allocates the given number of pages with no constraints on the starting virtual address. /// /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. -pub fn allocate_pages(num_pages: usize) -> Option { +pub fn allocate_pages(num_pages: usize) -> Option> { allocate_pages_deferred(AllocationRequest::Any, num_pages) .map(|(ap, _action)| ap) .ok() @@ -865,7 +870,7 @@ pub fn allocate_pages(num_pages: usize) -> Option { /// /// This function still allocates whole pages by rounding up the number of bytes. /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. -pub fn allocate_pages_by_bytes(num_bytes: usize) -> Option { +pub fn allocate_pages_by_bytes(num_bytes: usize) -> Option> { allocate_pages_by_bytes_deferred(AllocationRequest::Any, num_bytes) .map(|(ap, _action)| ap) .ok() @@ -876,7 +881,7 @@ pub fn allocate_pages_by_bytes(num_bytes: usize) -> Option { /// /// This function still allocates whole pages by rounding up the number of bytes. /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. -pub fn allocate_pages_by_bytes_at(vaddr: VirtualAddress, num_bytes: usize) -> Result { +pub fn allocate_pages_by_bytes_at(vaddr: VirtualAddress, num_bytes: usize) -> Result, &'static str> { allocate_pages_by_bytes_deferred(AllocationRequest::AtVirtualAddress(vaddr), num_bytes) .map(|(ap, _action)| ap) } @@ -885,7 +890,7 @@ pub fn allocate_pages_by_bytes_at(vaddr: VirtualAddress, num_bytes: usize) -> Re /// Allocates the given number of pages starting at (inclusive of) the page containing the given `VirtualAddress`. /// /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. -pub fn allocate_pages_at(vaddr: VirtualAddress, num_pages: usize) -> Result { +pub fn allocate_pages_at(vaddr: VirtualAddress, num_pages: usize) -> Result, &'static str> { allocate_pages_deferred(AllocationRequest::AtVirtualAddress(vaddr), num_pages) .map(|(ap, _action)| ap) } @@ -895,8 +900,8 @@ pub fn allocate_pages_at(vaddr: VirtualAddress, num_pages: usize) -> Result Result { + range: &PageRange, +) -> Result, &'static str> { allocate_pages_deferred(AllocationRequest::WithinRange(range), num_pages) .map(|(ap, _action)| ap) } @@ -906,8 +911,8 @@ pub fn allocate_pages_in_range( /// they must be within the given inclusive `range` of pages. pub fn allocate_pages_by_bytes_in_range( num_bytes: usize, - range: &PageRange, -) -> Result { + range: &PageRange, +) -> Result, &'static str> { allocate_pages_by_bytes_deferred(AllocationRequest::WithinRange(range), num_bytes) .map(|(ap, _action)| ap) } From af51e78477ec87f9552b51ccb976ff9ef0071ee9 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 15 Nov 2023 14:44:03 +1100 Subject: [PATCH 23/25] Add `.DS_Store` files to `.gitignore` (#1074) Signed-off-by: Klimenty Tsoutsman --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index b070989ee3..30a858d3b3 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,6 @@ github_pages/doc/ # library lock files /libs/**/Cargo.lock + +# macOS directory stores +/**/.DS_Store From 562a39cf6c662738f7718473f6bbc010970dce53 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 15 Nov 2023 17:40:59 +1100 Subject: [PATCH 24/25] Remove old irrelevant items from `Cargo.toml` (#1075) Some editors may complain about non-existent directories. Signed-off-by: Klimenty Tsoutsman --- Cargo.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 250e103cc9..6ac877dcce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,9 +34,6 @@ exclude = [ "build", "target", - ## Exclude the `aarch64` directory, which is a WIP port that currently must be built separately. - "aarch64", - ## Exclude configuration, tools, scripts, etc "cfg", "compiler_plugins", @@ -45,7 +42,6 @@ exclude = [ ## Exclude old components "old_crates", - "userspace", ## Exclude third-party libs and ports for now. ## This allows Theseus crates that *are* included in a build to pull these @@ -73,6 +69,7 @@ exclude = [ ## Exclude benchmark-related crates in all builds; they must be explicitly included via features. ## TODO: move these to a specific "benches" folder so we can exclude that entire folder. "applications/bm", + "applications/channel_eval", "applications/heap_eval", "applications/rq_eval", "applications/scheduler_eval", @@ -80,6 +77,7 @@ exclude = [ ## Exclude application crates used for testing specific Theseus functionality. ## TODO: move these to a specific "tests" folder so we can exclude that entire folder. "applications/test_aligned_page_allocation", + "applications/test_async", "applications/test_backtrace", "applications/test_block_io", "applications/test_channel", @@ -89,13 +87,15 @@ exclude = [ "applications/test_libc", "applications/test_mlx5", "applications/test_panic", + "applications/test_preemption_counter", "applications/test_restartable", - "applications/test_serial_echo", + "applications/test_scheduler", "applications/test_std_fs", "applications/test_sync_block", + "applications/test_task_cancel", + "applications/test_tls", "applications/test_wait_queue", "applications/test_wasmtime", - "applications/tls_test", "applications/unwind_test", ] From 3f339e1a7fe2e852d6978315d1a8b93b8f81bb9f Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Tue, 21 Nov 2023 14:38:55 -0800 Subject: [PATCH 25/25] Clarify MacOS build instructions (#1077) Apparently Intel-based Macs also need `gmake`, not `make`; this is not specific to Apple silicon-based Macs (e.g., M1). --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 17ed68345c..ac748ca685 100644 --- a/README.md +++ b/README.md @@ -104,8 +104,10 @@ If you're on WSL, also do the following steps: ```sh rm -rf /tmp/theseus_tools_src ``` - - * If you're building Theseus on an M1-based Mac, you may need to use `gmake` instead of `make` for build commands. Alternatively, you can use `bash` with x86 emulation, but this is generally not necessary. + * **NOTE**: on MacOS, you need to run `gmake` instead of `make` for build commands (or you can simply create a shell alias). + * This is because HomeBrew installs its binaries in a way that doesn't conflict with built-in versions of system utilities. + + * *(This is typically not necessary)*: if you're building Theseus on older Apple Silicon (M1 chips), you may need to use `bash` with x86 emulation: ```sh arch -x86_64 bash # or another shell of your choice ```