rustubs/arch/x86_64/
paging.rs1pub mod fault;
2pub mod pagetable;
3use crate::defs;
4use crate::defs::rounddown_4k;
5use crate::defs::P2V;
6use crate::io::*;
7use crate::mm::allocate_4k_zeroed;
8use crate::mm::vmm::VMArea;
9use crate::mm::vmm::VMType;
10use core::arch::asm;
11use core::ops::Range;
12use core::ptr;
13pub use pagetable::*;
14#[inline]
18pub fn get_cr3() -> u64 {
19 let cr3: u64;
20 unsafe { asm!("mov {}, cr3", out(reg) cr3) };
21 cr3
22}
23
24#[inline]
27pub fn get_root() -> u64 { P2V(get_cr3()).unwrap() }
28
29pub unsafe fn map_vma(pt_root: u64, vma: &VMArea, do_copy: bool) -> bool {
33 let flags = PTEFlags::PRESENT | PTEFlags::WRITABLE | PTEFlags::USER;
35 if !map_range(pt_root, &vma.vm_range, flags) {
36 println!("failed to map range");
37 return false;
38 }
39 if !do_copy {
40 return true;
41 }
42 match vma.backing {
43 VMType::ANOM => {
44 return true;
45 }
46 VMType::FILE(f) => {
47 if !do_copy {
48 return true;
49 }
50 let sz = (vma.vm_range.end - vma.vm_range.start) as usize;
51 sprintln!("copy from {:p} to {:#X}", &f[0], vma.vm_range.start);
52 unsafe {
53 ptr::copy_nonoverlapping(
54 &f[0] as *const u8,
55 vma.vm_range.start as *mut u8,
56 sz,
57 )
58 }
59 return true;
60 }
61 _ => {
62 println!("unknown backing");
63 return false;
64 }
65 }
66}
67
68pub fn map_range(pt_root: u64, r: &Range<u64>, flags: PTEFlags) -> bool {
69 let mut va_aligned = rounddown_4k(r.start);
70 while va_aligned < r.end {
71 if !map_page(pt_root, va_aligned, flags) {
72 println!("failed to map page @ {:#X}", va_aligned);
73 return false;
74 }
75 va_aligned += defs::Mem::PAGE_SIZE;
76 }
77 return true;
78}
79
80pub fn map_page(pt_root: u64, va: u64, _flags: PTEFlags) -> bool {
82 let pt = pt_root as *mut Pagetable;
83 if !defs::is_aligned_4k(va) {
84 println!("not aligned");
85 return false;
86 }
87 let flags: u64 = _flags.bits();
88 let l4idx = pagetable::p4idx(va) as usize;
89 let l3idx = pagetable::p3idx(va) as usize;
90 let l2idx = pagetable::p2idx(va) as usize;
91 let l1idx = pagetable::p1idx(va) as usize;
92 let mut require_new = false;
93 unsafe {
94 let l4_ent = &mut (*pt).entries[l4idx];
95 let l3_tbl: *mut Pagetable;
96 if l4_ent.is_unused() || require_new {
97 l3_tbl = allocate_4k_zeroed() as *mut Pagetable;
98 l4_ent.entry = defs::V2P(l3_tbl as u64).unwrap() | flags;
99 require_new = true
100 } else {
101 l3_tbl = defs::P2V(l4_ent.addr()).unwrap() as *mut Pagetable;
102 }
103 let l3_ent = &mut (*l3_tbl).entries[l3idx];
104 let l2_tbl: *mut Pagetable;
105 if l3_ent.is_unused() || require_new {
106 l2_tbl = allocate_4k_zeroed() as *mut Pagetable;
107 l3_ent.entry = defs::V2P(l2_tbl as u64).unwrap() | flags;
108 require_new = true
109 } else {
110 l2_tbl = defs::P2V(l3_ent.addr()).unwrap() as *mut Pagetable;
111 }
112 let l2_ent = &mut (*l2_tbl).entries[l2idx];
113 let l1_tbl: *mut Pagetable;
114 if l2_ent.is_unused() || require_new {
115 l1_tbl = allocate_4k_zeroed() as *mut Pagetable;
116 l2_ent.entry = defs::V2P(l1_tbl as u64).unwrap() | flags;
117 require_new = true
118 } else {
119 l1_tbl = defs::P2V(l2_ent.addr()).unwrap() as *mut Pagetable;
120 }
121 let pte = &mut (*l1_tbl).entries[l1idx];
122 if pte.is_unused() || require_new {
123 let page = allocate_4k_zeroed();
124 pte.entry = defs::V2P(page).unwrap() | flags;
125 } else {
126 panic!("PTE already taken: {:#X}", pte.entry);
128 }
129 asm!("invlpg [{0}]", in(reg) va);
131 }
132 return true;
133}