rustubs/proc/
sched.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
use crate::arch::x86_64::is_int_enabled;
use crate::machine::interrupt::{irq_restore, irq_save};
use crate::proc::sync::*;
use crate::proc::task::*;
use alloc::collections::VecDeque;
use core::sync::atomic::AtomicBool;
use core::sync::atomic::Ordering;
pub static GLOBAL_SCHEDULER: L2Sync<Scheduler> = L2Sync::new(Scheduler::new());
/// A global flag indicating whether reschedule is required.
pub static NEED_RESCHEDULE: AtomicBool = AtomicBool::new(false);

/// set NEED_RESCHEDULE to true regardless its value; return the previous state.
#[inline(always)]
#[allow(non_snake_case)]
pub fn SET_NEED_RESCHEDULE() -> bool {
	NEED_RESCHEDULE.swap(true, Ordering::Relaxed)
}

/// set NEED_RESCHEDULE to false regardless its value; return the previous
/// state.
#[inline(always)]
#[allow(non_snake_case)]
pub fn CLEAR_NEED_RESCHEDULE() -> bool {
	NEED_RESCHEDULE.swap(false, Ordering::Relaxed)
}

pub struct Scheduler {
	pub run_queue: VecDeque<TaskId>,
	pub need_schedule: bool,
}

impl Scheduler {
	pub const MIN_TASK_CAP: usize = 16;
	pub const fn new() -> Self {
		return Self {
			run_queue: VecDeque::new(),
			need_schedule: false,
		};
	}

	// maybe we reject inserting existing tasks?
	pub fn insert_task(&mut self, tid: TaskId) {
		self.run_queue.push_back(tid);
	}

	pub fn try_remove(&mut self, _tid: TaskId) {
		todo!("not implemented");
	}

	/// unsafe because this must be called on a linearization point on Epilogue
	/// level (l2); It will check the NEED_RESCHEDULE flag.
	pub unsafe fn try_reschedule() {
		// this assert doesn't check if you own the L2, but at least a sanity
		// check.
		debug_assert!(is_int_enabled());
		// TODO maybe refine memory ordering here
		let r = NEED_RESCHEDULE.compare_exchange(
			true,
			false,
			Ordering::Relaxed,
			Ordering::Relaxed,
		);
		if r != Ok(true) {
			return;
		}
		Self::do_schedule();
	}

	/// do_schedule is only called from epilogue level, so we don't need to lock
	/// here. For cooperative scheduling call [Self::yield_cpu] instead.
	pub unsafe fn do_schedule() {
		let me = Task::current().unwrap();
		let next_task;
		let next_tid;
		{
			let r = irq_save();
			// begin L3 critical section
			// make sure we drop the mutable borrow before doing context swap
			let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
			if sched.run_queue.is_empty() && me.state == TaskState::Run {
				// I'm the only one, just return;
				irq_restore(r);
				return;
			}
			next_tid = sched.run_queue.pop_front().expect("no runnable task");
			next_task = next_tid.get_task_ref_mut();
			debug_assert_eq!(next_task.state, TaskState::Run);
			if me.state == TaskState::Run {
				sched.run_queue.push_back(me.taskid());
			}
			// end L3 critical section
			irq_restore(r);
		}
		if me.taskid() == next_task.taskid() {
			return;
		}
		unsafe {
			context_swap(
				&(me.context) as *const _ as u64,
				&(next_task.context) as *const _ as u64,
			);
		}
	}

	/// guards do_schedule and makes sure it's also sequentialized at L2. Must
	/// not call this in interrupt context
	pub fn yield_cpu() {
		debug_assert!(is_int_enabled());
		ENTER_L2();
		unsafe {
			Self::do_schedule();
		}
		LEAVE_L2();
	}

	/// like do_schedule but we there is no running context to save
	pub unsafe fn kickoff() {
		let irq = irq_save();
		// must not lock the GLOBAL_SCHEDULER here because we never return.
		// well, the "LEAVE_L2" call in the task entries logically release
		// the GLOBAL_SCHEDULER but semantically that's too weird
		let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
		let tid = sched
			.run_queue
			.pop_front()
			.expect("run queue empty, can't start");
		let first_task = tid.get_task_ref_mut();
		irq_restore(irq);
		// kickoff simulates a do_schedule, so we need to enter l2 here.
		// new tasks must leave l2 explicitly on their first run
		ENTER_L2();
		unsafe {
			context_swap_to(&(first_task.context) as *const _ as u64);
		}
	}
}