rustubs/proc/
task.rs

1use crate::arch::x86_64::arch_regs::Context64;
2use crate::arch::x86_64::{arch_regs, is_int_enabled};
3use crate::mm::vmm::{VMArea, VMMan, VMPerms, VMType};
4use crate::mm::KSTACK_ALLOCATOR;
5use crate::proc::sched::GLOBAL_SCHEDULER;
6use crate::proc::sync::bellringer::{BellRinger, Sleeper};
7use crate::{defs::*, Scheduler};
8use alloc::collections::VecDeque;
9use alloc::string::String;
10use core::ops::Range;
11use core::ptr;
12use core::str::FromStr;
13/// currently only kernelSp and Context are important.
14/// the task struct will be placed on the starting addr (low addr) of the kernel stack.
15/// therefore we can retrive the task struct at anytime by masking the kernel stack
16/// NOTE: we assume all fields in [Task] are only modified by the task itself,
17/// i.e. no task should modify another task's state. (this may change though, in
18/// which case we will need some atomics)
19/// TODO: the mm is heap allocated object (vec of vmas). But the task struct
20/// doesn't have a lifetime. Must cleanup the memory used by the mm itself when
21/// exiting a task.
22#[repr(C)]
23pub struct Task {
24	pub magic: u64,
25	pub pid: u32,
26	/// note that this points to the stack bottom (low addr)
27	pub kernel_stack: u64,
28	pub mm: VMMan,
29	// pub user_stack: u64,
30	pub state: TaskState,
31	pub context: arch_regs::Context64,
32}
33
34/// not to confuse with a integer TID. A TaskID identifies a task and __locate__
35/// it. In this case the TaskID wraps around the task struct's address. The
36/// reason why the scheduler doesn't directly store `Box<Task>` (or alike) is that
37/// the smart pointer types automatically drops the owned values when their
38/// lifetime end. For now want to have manual control of when, where and how I
39/// drop the Task because there could be more plans than just freeing the memory
40#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
41pub struct TaskId(u64);
42
43impl TaskId {
44	pub fn new(addr: u64) -> Self { Self(addr) }
45
46	pub fn get_task_ref(&self) -> &Task {
47		return unsafe { &*(self.0 as *mut Task) };
48	}
49
50	pub fn get_task_ref_mut(&self) -> &mut Task {
51		return unsafe { &mut *(self.0 as *mut Task) };
52	}
53}
54
55/// currently don't differentiate between running and ready states because the
56/// scheduler push the next task to the back of the queue. i.e. the running task
57/// is also "ready" in the run_queue
58#[derive(Debug, PartialEq)]
59pub enum TaskState {
60	Run,
61	Wait,
62	Block,
63	Dead,
64	Eating,
65	Purr,
66	Meow,
67	Angry,
68}
69
70extern "C" {
71	pub fn context_swap(from_ctx: u64, to_ctx: u64);
72	pub fn context_swap_to(to_ctx: u64);
73}
74
75// NOTE Task struct is manually placed on the stack, new() or default() is not
76// provided.
77impl Task {
78	/// unsafe because you have to make sure the stack pointer is valid
79	/// i.e. allocated through KStackAllocator.
80	#[inline(always)]
81	unsafe fn settle_on_stack<'a>(stack_addr: u64, t: Task) -> &'a mut Task {
82		ptr::write_volatile(stack_addr as *mut Task, t);
83		return &mut *(stack_addr as *mut Task);
84	}
85
86	/// settle_on_stack and prepare_context must be called before switching to
87	/// the task. TODO: combine them into one single API
88	#[inline(always)]
89	fn prepare_context(&mut self, entry: u64) {
90		let mut sp = self.get_init_kernel_sp();
91		unsafe {
92			sp -= 8;
93			*(sp as *mut u64) = 0;
94			sp -= 8;
95			*(sp as *mut u64) = entry;
96		}
97		self.context.rsp = sp;
98	}
99
100	/// get kernel stack top (high addr) to initialize the new task Note that
101	/// there are often alignment requirements of stack pointer. We do
102	/// 8 bytes here
103	#[inline(always)]
104	fn get_init_kernel_sp(&self) -> u64 {
105		let mut sp = self.kernel_stack + Mem::KERNEL_STACK_SIZE;
106		sp &= !0b111;
107		sp
108	}
109
110	/// return a reference of the current running task struct. Return none of
111	/// the magic number is currupted on the kernel stack, this is because
112	/// 1. the task struct is not currectly put on the stack
113	/// 2. trying to get the current of the initial task, who has no task struct
114	///       on the stack
115	/// 3. the stack is corrupted (due to e.g. stack overflow)
116	///
117	/// TODO add a canary also at the end of the task struct and check it.
118	pub fn current<'a>() -> Option<&'a mut Task> {
119		let addr = arch_regs::get_sp() & !Mem::KERNEL_STACK_MASK;
120		let t = unsafe { &mut *(addr as *mut Task) };
121		if t.magic != Mem::KERNEL_STACK_TASK_MAGIC {
122			return None;
123		}
124		return Some(t);
125	}
126
127	#[inline]
128	pub fn taskid(&self) -> TaskId { TaskId::new(self as *const _ as u64) }
129
130	/// a task may be present in multiple wait rooms; this is logically not
131	/// possible at the moment, but would be necessary for stuffs like EPoll.
132	/// require manual attention for sync
133	pub unsafe fn curr_wait_in(wait_room: &mut VecDeque<TaskId>) {
134		let t = Task::current().unwrap();
135		debug_assert_ne!(t.state, TaskState::Wait);
136		t.state = TaskState::Wait;
137		wait_room.push_back(t.taskid());
138	}
139
140	/// does not lock the GLOBAL_SCHEDULER, the caller is responsible of doing
141	/// that, e.g. call task.wakeup() from epilogue
142	pub unsafe fn wakeup(&mut self) {
143		if self.state != TaskState::Wait {
144			// already awake. why? I don't know.
145			return;
146		}
147		// TODO: makesure you don't put a task in the run queue more than once.
148		self.state = TaskState::Run;
149		let sched = GLOBAL_SCHEDULER.get_ref_mut_unguarded();
150		sched.insert_task(self.taskid());
151	}
152
153	pub fn nanosleep(&mut self, ns: u64) {
154		debug_assert!(self.state == TaskState::Run);
155		self.state = TaskState::Wait;
156		BellRinger::check_in(Sleeper::new(self.taskid(), ns));
157		debug_assert!(is_int_enabled());
158		Scheduler::yield_cpu();
159	}
160
161	/// create a kernel thread, you need to add it to the scheduler run queue
162	/// manually
163	pub fn create_task(pid: u32, entry: u64) -> TaskId {
164		let sp = unsafe { KSTACK_ALLOCATOR.lock().allocate() };
165		let tid = TaskId::new(sp);
166		println!("new task on {:#X}", sp);
167		let nt = unsafe {
168			Task::settle_on_stack(
169				sp,
170				Task {
171					magic: Mem::KERNEL_STACK_TASK_MAGIC,
172					pid,
173					kernel_stack: sp,
174					state: TaskState::Run,
175					context: Context64::default(),
176					mm: VMMan::new(),
177				},
178			)
179		};
180		// KERNEL ID MAPPING
181		nt.mm.vmas.push(VMArea {
182			vm_range: Range::<u64> {
183				start: Mem::ID_MAP_START,
184				end: Mem::ID_MAP_END,
185			},
186			tag: String::from_str("KERNEL IDMAP").unwrap(),
187			user_perms: VMPerms::NONE,
188			backing: VMType::ANOM,
189		});
190		// KERNEL
191		nt.mm.vmas.push(VMArea {
192			vm_range: Range::<u64> {
193				start: Mem::KERNEL_OFFSET,
194				end: Mem::KERNEL_OFFSET + 64 * Mem::G,
195			},
196			tag: String::from_str("KERNEL").unwrap(),
197			user_perms: VMPerms::NONE,
198			backing: VMType::ANOM,
199		});
200		// KERNEL
201		nt.mm.vmas.push(VMArea {
202			vm_range: Range::<u64> {
203				start: Mem::USER_STACK_START,
204				end: Mem::USER_STACK_START + Mem::USER_STACK_SIZE,
205			},
206			tag: String::from_str("USER STACK").unwrap(),
207			user_perms: VMPerms::R | VMPerms::W,
208			backing: VMType::ANOM,
209		});
210		nt.prepare_context(entry);
211		tid
212	}
213}