1use crate::arch::x86_64::arch_regs::Context64;
2use crate::arch::x86_64::{arch_regs, is_int_enabled};
3use crate::mm::vmm::{VMArea, VMMan, VMPerms, VMType};
4use crate::mm::KSTACK_ALLOCATOR;
5use crate::proc::sched::GLOBAL_SCHEDULER;
6use crate::proc::sync::bellringer::{BellRinger, Sleeper};
7use crate::{defs::*, Scheduler};
8use alloc::collections::VecDeque;
9use alloc::string::String;
10use core::ops::Range;
11use core::ptr;
12use core::str::FromStr;
13#[repr(C)]
23pub struct Task {
24 pub magic: u64,
25 pub pid: u32,
26 pub kernel_stack: u64,
28 pub mm: VMMan,
29 pub state: TaskState,
31 pub context: arch_regs::Context64,
32}
33
34#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
41pub struct TaskId(u64);
42
43impl TaskId {
44 pub fn new(addr: u64) -> Self { Self(addr) }
45
46 pub fn get_task_ref(&self) -> &Task {
47 return unsafe { &*(self.0 as *mut Task) };
48 }
49
50 pub fn get_task_ref_mut(&self) -> &mut Task {
51 return unsafe { &mut *(self.0 as *mut Task) };
52 }
53}
54
55#[derive(Debug, PartialEq)]
59pub enum TaskState {
60 Run,
61 Wait,
62 Block,
63 Dead,
64 Eating,
65 Purr,
66 Meow,
67 Angry,
68}
69
70extern "C" {
71 pub fn context_swap(from_ctx: u64, to_ctx: u64);
72 pub fn context_swap_to(to_ctx: u64);
73}
74
75impl Task {
78 #[inline(always)]
81 unsafe fn settle_on_stack<'a>(stack_addr: u64, t: Task) -> &'a mut Task {
82 ptr::write_volatile(stack_addr as *mut Task, t);
83 return &mut *(stack_addr as *mut Task);
84 }
85
86 #[inline(always)]
89 fn prepare_context(&mut self, entry: u64) {
90 let mut sp = self.get_init_kernel_sp();
91 unsafe {
92 sp -= 8;
93 *(sp as *mut u64) = 0;
94 sp -= 8;
95 *(sp as *mut u64) = entry;
96 }
97 self.context.rsp = sp;
98 }
99
100 #[inline(always)]
104 fn get_init_kernel_sp(&self) -> u64 {
105 let mut sp = self.kernel_stack + Mem::KERNEL_STACK_SIZE;
106 sp &= !0b111;
107 sp
108 }
109
110 pub fn current<'a>() -> Option<&'a mut Task> {
119 let addr = arch_regs::get_sp() & !Mem::KERNEL_STACK_MASK;
120 let t = unsafe { &mut *(addr as *mut Task) };
121 if t.magic != Mem::KERNEL_STACK_TASK_MAGIC {
122 return None;
123 }
124 return Some(t);
125 }
126
127 #[inline]
128 pub fn taskid(&self) -> TaskId { TaskId::new(self as *const _ as u64) }
129
130 pub unsafe fn curr_wait_in(wait_room: &mut VecDeque<TaskId>) {
134 let t = Task::current().unwrap();
135 debug_assert_ne!(t.state, TaskState::Wait);
136 t.state = TaskState::Wait;
137 wait_room.push_back(t.taskid());
138 }
139
140 pub unsafe fn wakeup(&mut self) {
143 if self.state != TaskState::Wait {
144 return;
146 }
147 self.state = TaskState::Run;
149 let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
150 sched.insert_task(self.taskid());
151 }
152
153 pub fn nanosleep(&mut self, ns: u64) {
154 debug_assert!(self.state == TaskState::Run);
155 self.state = TaskState::Wait;
156 BellRinger::check_in(Sleeper::new(self.taskid(), ns));
157 debug_assert!(is_int_enabled());
158 Scheduler::yield_cpu();
159 }
160
161 pub fn create_task(pid: u32, entry: u64) -> TaskId {
164 let sp = unsafe { KSTACK_ALLOCATOR.lock().allocate() };
165 let tid = TaskId::new(sp);
166 println!("new task on {:#X}", sp);
167 let nt = unsafe {
168 Task::settle_on_stack(
169 sp,
170 Task {
171 magic: Mem::KERNEL_STACK_TASK_MAGIC,
172 pid,
173 kernel_stack: sp,
174 state: TaskState::Run,
175 context: Context64::default(),
176 mm: VMMan::new(),
177 },
178 )
179 };
180 nt.mm.vmas.push(VMArea {
182 vm_range: Range::<u64> {
183 start: Mem::ID_MAP_START,
184 end: Mem::ID_MAP_END,
185 },
186 tag: String::from_str("KERNEL IDMAP").unwrap(),
187 user_perms: VMPerms::NONE,
188 backing: VMType::ANOM,
189 });
190 nt.mm.vmas.push(VMArea {
192 vm_range: Range::<u64> {
193 start: Mem::KERNEL_OFFSET,
194 end: Mem::KERNEL_OFFSET + 64 * Mem::G,
195 },
196 tag: String::from_str("KERNEL").unwrap(),
197 user_perms: VMPerms::NONE,
198 backing: VMType::ANOM,
199 });
200 nt.mm.vmas.push(VMArea {
202 vm_range: Range::<u64> {
203 start: Mem::USER_STACK_START,
204 end: Mem::USER_STACK_START + Mem::USER_STACK_SIZE,
205 },
206 tag: String::from_str("USER STACK").unwrap(),
207 user_perms: VMPerms::R | VMPerms::W,
208 backing: VMType::ANOM,
209 });
210 nt.prepare_context(entry);
211 tid
212 }
213}