rustubs/proc/
sync.rs

1//! the sync module defines the OOStuBS prologue/epilogue synchronization model
2//! for interrupt and preemptive scheduling. Read `docs/sync_model.md` for
3//! details
4#![doc = include_str!("../../docs/sync_model.md")]
5pub mod bellringer;
6pub mod irq;
7pub mod semaphore;
8use crate::arch::x86_64::is_int_enabled;
9use crate::black_magic::Void;
10use core::cell::SyncUnsafeCell;
11use core::ops::{Deref, DerefMut};
12use core::sync::atomic::{AtomicBool, Ordering};
13pub use irq::*;
14/// indicates whether a task is running in L2. Maybe make it L3SyncCell as well.
15static L2_AVAILABLE: AtomicBool = AtomicBool::new(true);
16/// RAII lock guard for the global L2 flag, the u64 is not to be used.
17static L2_GUARD: L2Sync<Void> = L2Sync::new(Void::new());
18
19#[inline(always)]
20#[allow(non_snake_case)]
21pub fn IS_L2_AVAILABLE() -> bool {
22	return L2_AVAILABLE.load(Ordering::Relaxed);
23}
24
25#[allow(non_snake_case)]
26#[inline(always)]
27pub fn ENTER_L2() {
28	let r = L2_AVAILABLE.compare_exchange(
29		true,
30		false,
31		Ordering::Relaxed,
32		Ordering::Relaxed,
33	);
34	debug_assert_eq!(r, Ok(true));
35}
36
37#[inline(always)]
38#[allow(non_snake_case)]
39pub fn LEAVE_L2() {
40	let r = L2_AVAILABLE.compare_exchange(
41		false,
42		true,
43		Ordering::Relaxed,
44		Ordering::Relaxed,
45	);
46	debug_assert_eq!(r, Ok(false));
47}
48
49/// also clear the epilogue queue before really leaving.
50#[inline(always)]
51#[allow(non_snake_case)]
52pub fn LEAVE_L2_CLEAR_QUEUE() {
53	todo!();
54}
55
56/// RAII guard for L2Sync objects
57pub struct L2Guard<'a, T: 'a> {
58	lock: &'a L2Sync<T>,
59	// poison is implicit (using the L2_AVAILABLE flag)
60}
61
62impl<T> Deref for L2Guard<'_, T> {
63	type Target = T;
64	fn deref(&self) -> &T { unsafe { &*self.lock.data.get() } }
65}
66
67impl<T> DerefMut for L2Guard<'_, T> {
68	fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.lock.data.get() } }
69}
70
71impl<T> Drop for L2Guard<'_, T> {
72	fn drop(&mut self) { LEAVE_L2(); }
73}
74
75/// All L2Sync objects are guaranteed to be synchronized on the epilogue level.
76pub struct L2Sync<T> {
77	data: SyncUnsafeCell<T>,
78}
79
80impl<T> L2Sync<T> {
81	pub const fn new(data: T) -> Self {
82		Self { data: SyncUnsafeCell::new(data) }
83	}
84	pub fn lock(&self) -> L2Guard<T> {
85		ENTER_L2();
86		L2Guard { lock: self }
87	}
88
89	/// This breaks synchronization, the caller is responsible of checking the
90	/// global L2_AVAILABLE flag, and do other stuffs (like relaying) when
91	/// epilogue level is occupied.
92	pub unsafe fn get_ref_unguarded(&self) -> &T { &*self.data.get() }
93
94	pub unsafe fn get_ref_mut_unguarded(&self) -> &mut T {
95		&mut *self.data.get()
96	}
97}
98
99/// L3Sync is like RefCell, instead of counting the reference numbers, we check
100/// that the interrupt must be disabled. e.g. epilogue queue
101///
102/// TODO: implement reference counting to make sure the sync model is followed
103pub struct L3Sync<T> {
104	data: SyncUnsafeCell<T>,
105}
106
107impl<T> L3Sync<T> {
108	pub const fn new(data: T) -> Self {
109		Self { data: SyncUnsafeCell::new(data) }
110	}
111	/// get a readonly reference to the protected data. It should be fine to get
112	/// a read only ref without masking interrupts but we haven't implemented
113	/// reference counting yet so ...
114	pub fn l3_get_ref(&self) -> &T {
115		debug_assert!(
116			!is_int_enabled(),
117			"trying to get a ref to L3 synced object with interrupt enabled"
118		);
119		unsafe { &*self.data.get() }
120	}
121	/// get a mutable reference to the protected data. will panic if called with
122	/// interrupt enabled
123	pub fn l3_get_ref_mut(&self) -> &mut T {
124		debug_assert!(
125			!is_int_enabled(),
126			"trying to get a mut ref to L3 synced object with interrupt enabled"
127		);
128		unsafe { &mut *self.data.get() }
129	}
130	/// get a mutable reference without checking sync/borrow conditions.
131	pub unsafe fn l3_get_ref_mut_unchecked(&self) -> &mut T {
132		unsafe { &mut *self.data.get() }
133	}
134}