rustubs/
mm.rs

1//! memory management unit
2
3mod pma;
4pub mod vmm;
5
6use crate::arch::x86_64::paging::{get_root, Pagetable};
7use crate::defs::*;
8use crate::machine::multiboot;
9use alloc::alloc::{alloc, alloc_zeroed, dealloc, Layout};
10use alloc::vec::Vec;
11use core::arch::asm;
12use core::ops::Range;
13use lazy_static::lazy_static;
14use linked_list_allocator::LockedHeap;
15use spin::Mutex;
16
17#[global_allocator]
18static ALLOCATOR: LockedHeap = LockedHeap::empty();
19
20lazy_static! {
21	pub static ref KSTACK_ALLOCATOR: Mutex<KStackAllocator> =
22		Mutex::new(KStackAllocator::new());
23}
24
25/// half measure: simply initialize the linkedlist allocator
26pub fn init() {
27	let mbi = multiboot::get_mb_info().unwrap();
28	let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
29	let buf_start = mmapinfo.mmap_addr;
30	let buf_len = mmapinfo.mmap_length;
31	let buf_end = buf_start + buf_len;
32	let mut curr = buf_start as u64;
33	// initialize the heap allocator with the largest physical memory block
34	let mut largest_phy_range: Option<Range<u64>> = None;
35	loop {
36		if curr >= buf_end as u64 {
37			break;
38		}
39		let mblock = unsafe { &*(curr as *const multiboot::MultibootMmap) };
40		curr += mblock.size as u64;
41		curr += 4;
42		if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
43			continue;
44		}
45		if mblock.get_end() <= ExternSyms::___KERNEL_PM_START__ as u64 {
46			continue;
47		}
48		let mut r = mblock.get_range();
49		if r.contains(&(ExternSyms::___KERNEL_PM_END__ as u64)) {
50			assert!(
51				r.contains(&(ExternSyms::___KERNEL_PM_START__ as u64)),
52				"FATAL: kernel physical map cross physical blocks, how?"
53			);
54			r.start = ExternSyms::___KERNEL_PM_END__ as u64;
55		}
56		// TODO this is pretty ugly...
57		match largest_phy_range {
58			None => largest_phy_range = Some(r),
59			Some(ref lr) => {
60				if (r.end - r.start) > (lr.end - lr.start) {
61					largest_phy_range = Some(r);
62				}
63			}
64		}
65	}
66
67	let pr = &largest_phy_range.expect("Can't find usable physical block");
68	assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ...");
69	// init heap allocator on id map
70	unsafe {
71		ALLOCATOR.lock().init(
72			P2V(pr.start).unwrap() as *mut u8,
73			(pr.end - pr.start) as usize,
74		);
75	}
76	println!(
77		"[init] mm: heap alloc initialized @ {:#X} - {:#X}",
78		P2V(pr.start).unwrap(),
79		P2V(pr.end).unwrap()
80	);
81}
82
83/// wrapper around the global allocator with caching
84pub struct KStackAllocator {
85	pool: Vec<u64>,
86}
87
88/// TODO: the heap allocator is primitive atm and it may fail to allocate new
89/// kernel stack (64K here) due to fragmentation. It may be a good idea to
90/// reserve some memory during system init to guarantee that we can at least
91impl KStackAllocator {
92	const KSTACK_ALLOC_POOL_CAP: usize = 16;
93	const KSTACK_LAYOUT: Layout = unsafe {
94		Layout::from_size_align_unchecked(
95			Mem::KERNEL_STACK_SIZE as usize,
96			Mem::KERNEL_STACK_SIZE as usize,
97		)
98	};
99
100	pub fn new() -> Self {
101		let p = Vec::with_capacity(Self::KSTACK_ALLOC_POOL_CAP);
102		Self { pool: p }
103	}
104
105	/// unsafe because this may fail (same as populate)
106	pub unsafe fn allocate(&mut self) -> u64 {
107		if let Some(addr) = self.pool.pop() {
108			return addr;
109		} else {
110			return alloc(Self::KSTACK_LAYOUT) as u64;
111		}
112	}
113
114	/// unsafe because you must make sure you give back something the allocator gave
115	/// you. Otherwise you break the kernel heap allocator.
116	pub unsafe fn free(&mut self, addr: u64) {
117		if self.pool.len() < Self::KSTACK_ALLOC_POOL_CAP {
118			self.pool.push(addr);
119		} else {
120			dealloc(addr as *mut u8, Self::KSTACK_LAYOUT);
121		}
122	}
123
124	/// unsafe because this could OOM if you stress the allocator too much
125	/// (although unlikely)
126	pub unsafe fn populate(&mut self) {
127		for _ in 0..Self::KSTACK_ALLOC_POOL_CAP {
128			self.pool.push(alloc(Self::KSTACK_LAYOUT) as u64);
129		}
130	}
131}
132
133const LAYOUT_4K_ALIGNED: Layout =
134	unsafe { Layout::from_size_align_unchecked(0x1000, 0x1000) };
135/// allocate 4k aligned memory.
136/// TODO create a buffer (like in KStackAllocator) for performance.
137pub fn allocate_4k() -> u64 {
138	return unsafe { alloc(LAYOUT_4K_ALIGNED) } as u64;
139}
140pub fn allocate_4k_zeroed() -> u64 {
141	return unsafe { alloc_zeroed(LAYOUT_4K_ALIGNED) } as u64;
142}
143
144/// invalidate a single page mapping in tlb
145pub fn invlpg(va: u64) { unsafe { asm!("invlpg [{0}]", in(reg) va) }; }
146
147/// flush the whole tlb
148pub fn flush_tlb() {
149	unsafe {
150		asm!(
151			"
152		push rax;
153		mov rax, cr3;
154		mov cr3, rax;
155		pop rax;
156		"
157		)
158	}
159}
160
161/// drop the low memory mapping from the current pagetable by removing the first
162/// entry from pml4 table (which mapps to 0~512G). The PDP table is unchanged,
163/// wasting 4K of memory but there is nothing we can do now since the heap
164/// allocator doesn't manage this address.
165///
166/// after calling this function, the system can no longer directly access memory
167/// by physical address
168pub unsafe fn drop_init_mapping() {
169	let pt: &mut Pagetable = unsafe { &mut *(get_root() as *mut Pagetable) };
170	pt.entries[0].set_unused();
171	flush_tlb();
172}