rustubs/proc/
sched.rs

1use crate::arch::x86_64::is_int_enabled;
2use crate::machine::interrupt::{irq_restore, irq_save};
3use crate::proc::sync::*;
4use crate::proc::task::*;
5use alloc::collections::VecDeque;
6use core::sync::atomic::AtomicBool;
7use core::sync::atomic::Ordering;
8pub static GLOBAL_SCHEDULER: L2Sync<Scheduler> = L2Sync::new(Scheduler::new());
9/// A global flag indicating whether reschedule is required.
10pub static NEED_RESCHEDULE: AtomicBool = AtomicBool::new(false);
11
12/// set NEED_RESCHEDULE to true regardless its value; return the previous state.
13#[inline(always)]
14#[allow(non_snake_case)]
15pub fn SET_NEED_RESCHEDULE() -> bool {
16	NEED_RESCHEDULE.swap(true, Ordering::Relaxed)
17}
18
19/// set NEED_RESCHEDULE to false regardless its value; return the previous
20/// state.
21#[inline(always)]
22#[allow(non_snake_case)]
23pub fn CLEAR_NEED_RESCHEDULE() -> bool {
24	NEED_RESCHEDULE.swap(false, Ordering::Relaxed)
25}
26
27pub struct Scheduler {
28	pub run_queue: VecDeque<TaskId>,
29	pub need_schedule: bool,
30}
31
32impl Scheduler {
33	pub const MIN_TASK_CAP: usize = 16;
34	pub const fn new() -> Self {
35		return Self {
36			run_queue: VecDeque::new(),
37			need_schedule: false,
38		};
39	}
40
41	// maybe we reject inserting existing tasks?
42	pub fn insert_task(&mut self, tid: TaskId) {
43		self.run_queue.push_back(tid);
44	}
45
46	pub fn try_remove(&mut self, _tid: TaskId) {
47		todo!("not implemented");
48	}
49
50	/// unsafe because this must be called on a linearization point on Epilogue
51	/// level (l2); It will check the NEED_RESCHEDULE flag.
52	pub unsafe fn try_reschedule() {
53		// this assert doesn't check if you own the L2, but at least a sanity
54		// check.
55		debug_assert!(is_int_enabled());
56		// TODO maybe refine memory ordering here
57		let r = NEED_RESCHEDULE.compare_exchange(
58			true,
59			false,
60			Ordering::Relaxed,
61			Ordering::Relaxed,
62		);
63		if r != Ok(true) {
64			return;
65		}
66		Self::do_schedule();
67	}
68
69	/// do_schedule is only called from epilogue level, so we don't need to lock
70	/// here. For cooperative scheduling call [Self::yield_cpu] instead.
71	pub unsafe fn do_schedule() {
72		let me = Task::current().unwrap();
73		let next_task;
74		let next_tid;
75		{
76			let r = irq_save();
77			// begin L3 critical section
78			// make sure we drop the mutable borrow before doing context swap
79			let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
80			if sched.run_queue.is_empty() && me.state == TaskState::Run {
81				// I'm the only one, just return;
82				irq_restore(r);
83				return;
84			}
85			next_tid = sched.run_queue.pop_front().expect("no runnable task");
86			next_task = next_tid.get_task_ref_mut();
87			debug_assert_eq!(next_task.state, TaskState::Run);
88			if me.state == TaskState::Run {
89				sched.run_queue.push_back(me.taskid());
90			}
91			// end L3 critical section
92			irq_restore(r);
93		}
94		if me.taskid() == next_task.taskid() {
95			return;
96		}
97		unsafe {
98			context_swap(
99				&(me.context) as *const _ as u64,
100				&(next_task.context) as *const _ as u64,
101			);
102		}
103	}
104
105	/// guards do_schedule and makes sure it's also sequentialized at L2. Must
106	/// not call this in interrupt context
107	pub fn yield_cpu() {
108		debug_assert!(is_int_enabled());
109		ENTER_L2();
110		unsafe {
111			Self::do_schedule();
112		}
113		LEAVE_L2();
114	}
115
116	/// like do_schedule but we there is no running context to save
117	pub unsafe fn kickoff() {
118		let irq = irq_save();
119		// must not lock the GLOBAL_SCHEDULER here because we never return.
120		// well, the "LEAVE_L2" call in the task entries logically release
121		// the GLOBAL_SCHEDULER but semantically that's too weird
122		let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
123		let tid = sched
124			.run_queue
125			.pop_front()
126			.expect("run queue empty, can't start");
127		let first_task = tid.get_task_ref_mut();
128		irq_restore(irq);
129		// kickoff simulates a do_schedule, so we need to enter l2 here.
130		// new tasks must leave l2 explicitly on their first run
131		ENTER_L2();
132		unsafe {
133			context_swap_to(&(first_task.context) as *const _ as u64);
134		}
135	}
136}