rustubs/proc/
sync.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
//! the sync module defines the OOStuBS prologue/epilogue synchronization model
//! for interrupt and preemptive scheduling. Read `docs/sync_model.md` for
//! details
#![doc = include_str!("../../docs/sync_model.md")]
pub mod bellringer;
pub mod irq;
pub mod semaphore;
use crate::arch::x86_64::is_int_enabled;
use crate::black_magic::Void;
use core::cell::SyncUnsafeCell;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering};
pub use irq::*;
/// indicates whether a task is running in L2. Maybe make it L3SyncCell as well.
static L2_AVAILABLE: AtomicBool = AtomicBool::new(true);
/// RAII lock guard for the global L2 flag, the u64 is not to be used.
static L2_GUARD: L2Sync<Void> = L2Sync::new(Void::new());

#[inline(always)]
#[allow(non_snake_case)]
pub fn IS_L2_AVAILABLE() -> bool {
	return L2_AVAILABLE.load(Ordering::Relaxed);
}

#[allow(non_snake_case)]
#[inline(always)]
pub fn ENTER_L2() {
	let r = L2_AVAILABLE.compare_exchange(
		true,
		false,
		Ordering::Relaxed,
		Ordering::Relaxed,
	);
	debug_assert_eq!(r, Ok(true));
}

#[inline(always)]
#[allow(non_snake_case)]
pub fn LEAVE_L2() {
	let r = L2_AVAILABLE.compare_exchange(
		false,
		true,
		Ordering::Relaxed,
		Ordering::Relaxed,
	);
	debug_assert_eq!(r, Ok(false));
}

/// also clear the epilogue queue before really leaving.
#[inline(always)]
#[allow(non_snake_case)]
pub fn LEAVE_L2_CLEAR_QUEUE() {
	todo!();
}

/// RAII guard for L2Sync objects
pub struct L2Guard<'a, T: 'a> {
	lock: &'a L2Sync<T>,
	// poison is implicit (using the L2_AVAILABLE flag)
}

impl<T> Deref for L2Guard<'_, T> {
	type Target = T;
	fn deref(&self) -> &T { unsafe { &*self.lock.data.get() } }
}

impl<T> DerefMut for L2Guard<'_, T> {
	fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.lock.data.get() } }
}

impl<T> Drop for L2Guard<'_, T> {
	fn drop(&mut self) { LEAVE_L2(); }
}

/// All L2Sync objects are guaranteed to be synchronized on the epilogue level.
pub struct L2Sync<T> {
	data: SyncUnsafeCell<T>,
}

impl<T> L2Sync<T> {
	pub const fn new(data: T) -> Self {
		Self { data: SyncUnsafeCell::new(data) }
	}
	pub fn lock(&self) -> L2Guard<T> {
		ENTER_L2();
		L2Guard { lock: self }
	}

	/// This breaks synchronization, the caller is responsible of checking the
	/// global L2_AVAILABLE flag, and do other stuffs (like relaying) when
	/// epilogue level is occupied.
	pub unsafe fn get_ref_unguarded(&self) -> &T { &*self.data.get() }

	pub unsafe fn get_ref_mut_unguarded(&self) -> &mut T {
		&mut *self.data.get()
	}
}

/// L3Sync is like RefCell, instead of counting the reference numbers, we check
/// that the interrupt must be disabled. e.g. epilogue queue
///
/// TODO: implement reference counting to make sure the sync model is followed
pub struct L3Sync<T> {
	data: SyncUnsafeCell<T>,
}

impl<T> L3Sync<T> {
	pub const fn new(data: T) -> Self {
		Self { data: SyncUnsafeCell::new(data) }
	}
	/// get a readonly reference to the protected data. It should be fine to get
	/// a read only ref without masking interrupts but we haven't implemented
	/// reference counting yet so ...
	pub fn l3_get_ref(&self) -> &T {
		debug_assert!(
			!is_int_enabled(),
			"trying to get a ref to L3 synced object with interrupt enabled"
		);
		unsafe { &*self.data.get() }
	}
	/// get a mutable reference to the protected data. will panic if called with
	/// interrupt enabled
	pub fn l3_get_ref_mut(&self) -> &mut T {
		debug_assert!(
			!is_int_enabled(),
			"trying to get a mut ref to L3 synced object with interrupt enabled"
		);
		unsafe { &mut *self.data.get() }
	}
	/// get a mutable reference without checking sync/borrow conditions.
	pub unsafe fn l3_get_ref_mut_unchecked(&self) -> &mut T {
		unsafe { &mut *self.data.get() }
	}
}