rustubs/mm/pma.rs
1//! (deprecated) a simple stack based 4K physical frame allocator.
2//!
3//! Adcantages
4//! 1. allocation and deallocation is O(1).
5//! 2. can manage fragmented (non-consecutive) memory regions and doesn't suffer
6//! fragmentation because memory is fully virtualized.
7//!
8//! Limitations
9//! 1. slow initialization: needs to traverse all available pages and push their
10//! address one by one
11//! 2. can't give continous physical memory of more than 4K
12//! 3. the allocator stack itself requires continous space (it's an array), and
13//! has a higher storage overhead: every page requires 8 bytes for the
14//! address, that's `8/4096 == 1/512`.
15//! 4. (kinda?) conflicts kmalloc/vmalloc design in e.g. linux kernel, kmalloc
16//! manages the __identically mapped__ virtual memory in the kernel address
17//! space, that is `0xffff800000000000 + 64G` in rustubs, in another word,
18//! the kmalloc manages the physical memory: obviously this one only gives 4K
19//! memory and can't work as a kmalloc backend!
20
21use crate::defs::*;
22use core::ops::Range;
23use core::slice;
24// disabled for now
25// lazy_static! {
26// pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
27// Mutex::new(pma::PageStackAllocator::new());
28// }
29
30/// There should only be one global instance of this.
31pub struct PageStackAllocator {
32 page_stack: &'static mut [u64],
33 size: usize,
34 head: usize,
35}
36
37#[allow(dead_code)]
38impl PageStackAllocator {
39 // covering 4GiB physical memory of 4K frames
40 const STACK_SIZE: usize = 0x100000;
41
42 pub fn new() -> Self {
43 let ps = Self {
44 page_stack: unsafe {
45 slice::from_raw_parts_mut(
46 P2V(ExternSyms::___FREE_PAGE_STACK__ as u64).unwrap()
47 as *mut u64,
48 Self::STACK_SIZE,
49 )
50 },
51 size: Self::STACK_SIZE,
52 head: 0,
53 };
54 return ps;
55 }
56
57 /// push an addr into the free page stack
58 /// MUST be atomic or bad things happen...
59 pub fn free_page(&mut self, addr: u64) -> bool {
60 if self.head >= self.size {
61 return false;
62 }
63 self.page_stack[self.head] = addr;
64 self.head += 1;
65 return true;
66 }
67
68 pub fn alloc_page(&mut self) -> Option<u64> {
69 if self.head == 0 {
70 return None;
71 }
72 self.head -= 1;
73 Some(self.page_stack[self.head])
74 }
75
76 /// 4k page only?
77 pub fn insert_range(&mut self, r: &Range<u64>) -> u64 {
78 // r.contains(&1);
79 let mut inserted = 0;
80 let mut page = roundup_4k(r.start);
81 loop {
82 if !r.contains(&page) {
83 break;
84 }
85 if !self.free_page(page) {
86 break;
87 } else {
88 inserted += 1;
89 }
90 page += 0x1000;
91 }
92 return inserted;
93 }
94}