From 7ffdfae3732527f4e0cbd3c39e745a8e0a16eba5 Mon Sep 17 00:00:00 2001 From: Klimenty Tsoutsman Date: Sat, 9 Sep 2023 02:01:00 +1000 Subject: [PATCH] Cleanup `scheduler.rs` Signed-off-by: Klimenty Tsoutsman --- kernel/scheduler_epoch/src/lib.rs | 1 - kernel/scheduler_priority/src/lib.rs | 3 +-- kernel/task/src/scheduler.rs | 34 +++++++++++++++++++++------- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/kernel/scheduler_epoch/src/lib.rs b/kernel/scheduler_epoch/src/lib.rs index d1ff29d838..3baa9016f0 100644 --- a/kernel/scheduler_epoch/src/lib.rs +++ b/kernel/scheduler_epoch/src/lib.rs @@ -21,7 +21,6 @@ const MAX_PRIORITY: u8 = 40; const DEFAULT_PRIORITY: u8 = 20; const INITIAL_TOKENS: usize = 10; -#[derive(Clone)] pub struct Scheduler { idle_task: TaskRef, queue: VecDeque, diff --git a/kernel/scheduler_priority/src/lib.rs b/kernel/scheduler_priority/src/lib.rs index b07643d6d7..9e237f7b0d 100644 --- a/kernel/scheduler_priority/src/lib.rs +++ b/kernel/scheduler_priority/src/lib.rs @@ -12,7 +12,6 @@ use time::Instant; const DEFAULT_PRIORITY: u8 = 0; -#[derive(Clone)] pub struct Scheduler { idle_task: TaskRef, queue: BinaryHeap, @@ -64,7 +63,7 @@ impl task::scheduler::Scheduler for Scheduler { self.queue .retain(|priority_task| priority_task.task != *task); let new_len = self.queue.len(); - // We should have at most removed one task from the run queue. + // We should have removed at most one task from the run queue. debug_assert!( old_len - new_len < 2, "difference between run queue lengths was: {}", diff --git a/kernel/task/src/scheduler.rs b/kernel/task/src/scheduler.rs index cefe7e18cd..e48050855c 100644 --- a/kernel/task/src/scheduler.rs +++ b/kernel/task/src/scheduler.rs @@ -11,6 +11,10 @@ use crate::TaskRef; /// /// This is primarily used for spawning tasks, either to find the least busy CPU /// or spawn a task pinned to a particular CPU. +/// +/// The outer mutex does not need to be preemption-safe, because it is never +/// accessed from `schedule`. In fact, ideally it would be a blocking mutex, but +/// that leads to circular dependencies. static SCHEDULERS: Mutex)>> = Mutex::new(Vec::new()); /// A reference to the current CPUs scheduler. @@ -119,7 +123,6 @@ pub fn add_task(task: TaskRef) { } } - // TODO locked[least_busy_index.unwrap()].1.lock().add(task); } @@ -178,13 +181,20 @@ pub trait Scheduler: Send + Sync + 'static { /// Removes a task from the run queue. fn remove(&mut self, task: &TaskRef) -> bool; + /// Returns the scheduler as a priority scheduler, if it is one. fn as_priority_scheduler(&mut self) -> Option<&mut dyn PriorityScheduler>; + /// Clears the scheduler, returning all contained tasks as an iterator. fn drain(&mut self) -> Box + '_>; + /// Returns a list of contained tasks. + /// + /// The list should be considered out-of-date as soon as it is called, but + /// can be useful as a heuristic. fn dump(&self) -> Vec; } +/// A task scheduler with some notion of priority. pub trait PriorityScheduler { /// Sets the priority of the given task. fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool; @@ -276,14 +286,22 @@ impl<'a> Drop for PriorityInheritanceGuard<'a> { } } -/// Returns a list of +/// Returns the list of tasks running on each CPU. /// -/// This should only be used for debugging. +/// To avoid race conditions with migrating tasks, this function takes a lock +/// over all system schedulers. This is incredibly disruptive and should be +/// avoided at all costs. pub fn dump() -> Vec<(CpuId, Vec)> { let schedulers = SCHEDULERS.lock().clone(); - schedulers - .into_iter() - .map(|(cpu, scheduler)| (cpu, scheduler.lock().dump())) - // We want to eagerly collect so that all the locking predictably happens in this function. - .collect() + let locked = schedulers + .iter() + .map(|(cpu, scheduler)| (cpu, scheduler.lock())) + // We eagerly collect so that all schedulers are actually locked. + .collect::>(); + let result = locked + .iter() + .map(|(cpu, locked_scheduler)| (**cpu, locked_scheduler.dump())) + .collect(); + drop(locked); + result }