1use crate::arch::x86_64::is_int_enabled;
2use crate::machine::interrupt::{irq_restore, irq_save};
3use crate::proc::sync::*;
4use crate::proc::task::*;
5use alloc::collections::VecDeque;
6use core::sync::atomic::AtomicBool;
7use core::sync::atomic::Ordering;
8pub static GLOBAL_SCHEDULER: L2Sync<Scheduler> = L2Sync::new(Scheduler::new());
9pub static NEED_RESCHEDULE: AtomicBool = AtomicBool::new(false);
11
12#[inline(always)]
14#[allow(non_snake_case)]
15pub fn SET_NEED_RESCHEDULE() -> bool {
16 NEED_RESCHEDULE.swap(true, Ordering::Relaxed)
17}
18
19#[inline(always)]
22#[allow(non_snake_case)]
23pub fn CLEAR_NEED_RESCHEDULE() -> bool {
24 NEED_RESCHEDULE.swap(false, Ordering::Relaxed)
25}
26
27pub struct Scheduler {
28 pub run_queue: VecDeque<TaskId>,
29 pub need_schedule: bool,
30}
31
32impl Scheduler {
33 pub const MIN_TASK_CAP: usize = 16;
34 pub const fn new() -> Self {
35 return Self {
36 run_queue: VecDeque::new(),
37 need_schedule: false,
38 };
39 }
40
41 pub fn insert_task(&mut self, tid: TaskId) {
43 self.run_queue.push_back(tid);
44 }
45
46 pub fn try_remove(&mut self, _tid: TaskId) {
47 todo!("not implemented");
48 }
49
50 pub unsafe fn try_reschedule() {
53 debug_assert!(is_int_enabled());
56 let r = NEED_RESCHEDULE.compare_exchange(
58 true,
59 false,
60 Ordering::Relaxed,
61 Ordering::Relaxed,
62 );
63 if r != Ok(true) {
64 return;
65 }
66 Self::do_schedule();
67 }
68
69 pub unsafe fn do_schedule() {
72 let me = Task::current().unwrap();
73 let next_task;
74 let next_tid;
75 {
76 let r = irq_save();
77 let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
80 if sched.run_queue.is_empty() && me.state == TaskState::Run {
81 irq_restore(r);
83 return;
84 }
85 next_tid = sched.run_queue.pop_front().expect("no runnable task");
86 next_task = next_tid.get_task_ref_mut();
87 debug_assert_eq!(next_task.state, TaskState::Run);
88 if me.state == TaskState::Run {
89 sched.run_queue.push_back(me.taskid());
90 }
91 irq_restore(r);
93 }
94 if me.taskid() == next_task.taskid() {
95 return;
96 }
97 unsafe {
98 context_swap(
99 &(me.context) as *const _ as u64,
100 &(next_task.context) as *const _ as u64,
101 );
102 }
103 }
104
105 pub fn yield_cpu() {
108 debug_assert!(is_int_enabled());
109 ENTER_L2();
110 unsafe {
111 Self::do_schedule();
112 }
113 LEAVE_L2();
114 }
115
116 pub unsafe fn kickoff() {
118 let irq = irq_save();
119 let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
123 let tid = sched
124 .run_queue
125 .pop_front()
126 .expect("run queue empty, can't start");
127 let first_task = tid.get_task_ref_mut();
128 irq_restore(irq);
129 ENTER_L2();
132 unsafe {
133 context_swap_to(&(first_task.context) as *const _ as u64);
134 }
135 }
136}