1mod pma;
4pub mod vmm;
5
6use crate::arch::x86_64::paging::{get_root, Pagetable};
7use crate::defs::*;
8use crate::machine::multiboot;
9use alloc::alloc::{alloc, alloc_zeroed, dealloc, Layout};
10use alloc::vec::Vec;
11use core::arch::asm;
12use core::ops::Range;
13use lazy_static::lazy_static;
14use linked_list_allocator::LockedHeap;
15use spin::Mutex;
16
17#[global_allocator]
18static ALLOCATOR: LockedHeap = LockedHeap::empty();
19
20lazy_static! {
21 pub static ref KSTACK_ALLOCATOR: Mutex<KStackAllocator> =
22 Mutex::new(KStackAllocator::new());
23}
24
25pub fn init() {
27 let mbi = multiboot::get_mb_info().unwrap();
28 let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
29 let buf_start = mmapinfo.mmap_addr;
30 let buf_len = mmapinfo.mmap_length;
31 let buf_end = buf_start + buf_len;
32 let mut curr = buf_start as u64;
33 let mut largest_phy_range: Option<Range<u64>> = None;
35 loop {
36 if curr >= buf_end as u64 {
37 break;
38 }
39 let mblock = unsafe { &*(curr as *const multiboot::MultibootMmap) };
40 curr += mblock.size as u64;
41 curr += 4;
42 if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
43 continue;
44 }
45 if mblock.get_end() <= ExternSyms::___KERNEL_PM_START__ as u64 {
46 continue;
47 }
48 let mut r = mblock.get_range();
49 if r.contains(&(ExternSyms::___KERNEL_PM_END__ as u64)) {
50 assert!(
51 r.contains(&(ExternSyms::___KERNEL_PM_START__ as u64)),
52 "FATAL: kernel physical map cross physical blocks, how?"
53 );
54 r.start = ExternSyms::___KERNEL_PM_END__ as u64;
55 }
56 match largest_phy_range {
58 None => largest_phy_range = Some(r),
59 Some(ref lr) => {
60 if (r.end - r.start) > (lr.end - lr.start) {
61 largest_phy_range = Some(r);
62 }
63 }
64 }
65 }
66
67 let pr = &largest_phy_range.expect("Can't find usable physical block");
68 assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ...");
69 unsafe {
71 ALLOCATOR.lock().init(
72 P2V(pr.start).unwrap() as *mut u8,
73 (pr.end - pr.start) as usize,
74 );
75 }
76 println!(
77 "[init] mm: heap alloc initialized @ {:#X} - {:#X}",
78 P2V(pr.start).unwrap(),
79 P2V(pr.end).unwrap()
80 );
81}
82
83pub struct KStackAllocator {
85 pool: Vec<u64>,
86}
87
88impl KStackAllocator {
92 const KSTACK_ALLOC_POOL_CAP: usize = 16;
93 const KSTACK_LAYOUT: Layout = unsafe {
94 Layout::from_size_align_unchecked(
95 Mem::KERNEL_STACK_SIZE as usize,
96 Mem::KERNEL_STACK_SIZE as usize,
97 )
98 };
99
100 pub fn new() -> Self {
101 let p = Vec::with_capacity(Self::KSTACK_ALLOC_POOL_CAP);
102 Self { pool: p }
103 }
104
105 pub unsafe fn allocate(&mut self) -> u64 {
107 if let Some(addr) = self.pool.pop() {
108 return addr;
109 } else {
110 return alloc(Self::KSTACK_LAYOUT) as u64;
111 }
112 }
113
114 pub unsafe fn free(&mut self, addr: u64) {
117 if self.pool.len() < Self::KSTACK_ALLOC_POOL_CAP {
118 self.pool.push(addr);
119 } else {
120 dealloc(addr as *mut u8, Self::KSTACK_LAYOUT);
121 }
122 }
123
124 pub unsafe fn populate(&mut self) {
127 for _ in 0..Self::KSTACK_ALLOC_POOL_CAP {
128 self.pool.push(alloc(Self::KSTACK_LAYOUT) as u64);
129 }
130 }
131}
132
133const LAYOUT_4K_ALIGNED: Layout =
134 unsafe { Layout::from_size_align_unchecked(0x1000, 0x1000) };
135pub fn allocate_4k() -> u64 {
138 return unsafe { alloc(LAYOUT_4K_ALIGNED) } as u64;
139}
140pub fn allocate_4k_zeroed() -> u64 {
141 return unsafe { alloc_zeroed(LAYOUT_4K_ALIGNED) } as u64;
142}
143
144pub fn invlpg(va: u64) { unsafe { asm!("invlpg [{0}]", in(reg) va) }; }
146
147pub fn flush_tlb() {
149 unsafe {
150 asm!(
151 "
152 push rax;
153 mov rax, cr3;
154 mov cr3, rax;
155 pop rax;
156 "
157 )
158 }
159}
160
161pub unsafe fn drop_init_mapping() {
169 let pt: &mut Pagetable = unsafe { &mut *(get_root() as *mut Pagetable) };
170 pt.entries[0].set_unused();
171 flush_tlb();
172}