kernel/arch/riscv64/vm/mmu/
sv48.rs1use core::arch::asm;
2use core::result::Result;
3
4use crate::arch::vm::new_raw_pagetable;
5use crate::environment::PAGE_SIZE;
6use crate::vm::vmem::VirtualMemoryMap;
7use crate::vm::vmem::VirtualMemoryPermission;
8
9const MAX_PAGING_LEVEL: usize = 3;
10
11#[repr(align(8))]
12#[derive(Clone, Copy, Debug)]
13pub struct PageTableEntry {
14 pub entry: u64,
15}
16
17impl PageTableEntry {
18 pub const fn new() -> Self {
19 PageTableEntry { entry: 0 }
20 }
21
22 pub fn get_ppn(&self) -> usize {
23 ((self.entry >> 10) & 0x3ffffffffff) as usize }
26
27 pub fn get_flags(&self) -> u64 {
28 self.entry & 0x3ff
29 }
30
31 pub fn is_valid(&self) -> bool {
32 self.entry & 1 == 1
33 }
34
35 pub fn is_leaf(&self) -> bool {
36 if !self.is_valid() {
38 return false;
39 }
40 let r_bit = (self.entry >> 1) & 1; let x_bit = (self.entry >> 3) & 1; r_bit == 1 || x_bit == 1
43 }
44
45 pub fn validate(&mut self) {
46 self.entry |= 1;
47 }
48
49 pub fn invalidate(&mut self) {
50 self.entry &= !1;
51 }
52
53 pub fn set_ppn(&mut self, ppn: usize) -> &mut Self {
54 let ppn_mask = 0x3ffffffffff; let masked_ppn = (ppn as u64) & ppn_mask; self.entry &= !(ppn_mask << 10); self.entry |= masked_ppn << 10; self
60 }
61
62 pub fn set_flags(&mut self, flags: u64) -> &mut Self {
63 let mask = 0x3ff;
64 self.entry |= flags & mask;
65 self
66 }
67
68 pub fn clear_flags(&mut self) -> &mut Self {
69 self.entry &= !0x3E; self
72 }
73
74 pub fn clear_all(&mut self) -> &mut Self {
75 self.entry = 0;
76 self
77 }
78
79 pub fn writable(&mut self) -> &mut Self {
80 self.entry |= 0x4;
81 self
82 }
83
84 pub fn readable(&mut self) -> &mut Self {
85 self.entry |= 0x2;
86 self
87 }
88
89 pub fn executable(&mut self) -> &mut Self {
90 self.entry |= 0x8;
91 self
92 }
93
94 pub fn accesible_from_user(&mut self) -> &mut Self {
95 self.entry |= 0x10;
96 self
97 }
98
99 pub fn accessed(&mut self) -> &mut Self {
100 self.entry |= 0x40;
101 self
102 }
103
104 pub fn dirty(&mut self) -> &mut Self {
105 self.entry |= 0x80;
106 self
107 }
108}
109
110#[repr(align(4096))]
111#[derive(Debug)]
112pub struct PageTable {
113 pub entries: [PageTableEntry; 512],
114}
115
116impl PageTable {
117 pub fn new() -> Self {
119 PageTable {
120 entries: [PageTableEntry::new(); 512],
121 }
122 }
123
124 pub fn switch(&self, asid: u16) {
125 let satp = self.get_val_for_satp(asid);
126 unsafe {
127 asm!(
128 "
129 csrw satp, {0}
130 sfence.vma zero, zero
131 ",
132
133 in(reg) satp,
134 );
135 }
136 }
137
138 pub fn get_val_for_satp(&self, asid: u16) -> u64 {
144 let asid = asid as usize;
145 let mode = 9;
146 let ppn = self as *const _ as usize >> 12;
147 (mode << 60 | asid << 44 | ppn) as u64
148 }
149
150 pub fn map_memory_area(
151 &mut self,
152 asid: u16,
153 mmap: VirtualMemoryMap,
154 accessed: bool,
155 dirty: bool,
156 ) -> Result<(), &'static str> {
157 if mmap.vmarea.start % PAGE_SIZE != 0
159 || mmap.pmarea.start % PAGE_SIZE != 0
160 || mmap.vmarea.size() % PAGE_SIZE != 0
161 || mmap.pmarea.size() % PAGE_SIZE != 0
162 {
163 return Err("Address is not aligned to PAGE_SIZE");
164 }
165
166 let mut vaddr = mmap.vmarea.start;
167 let mut paddr = mmap.pmarea.start;
168 while vaddr + (PAGE_SIZE - 1) <= mmap.vmarea.end {
169 self.map(asid, vaddr, paddr, mmap.permissions, accessed, dirty);
170 match vaddr.checked_add(PAGE_SIZE) {
171 Some(addr) => vaddr = addr,
172 None => break,
173 }
174 match paddr.checked_add(PAGE_SIZE) {
175 Some(addr) => paddr = addr,
176 None => break,
177 }
178 }
179
180 Ok(())
181 }
182
183 pub fn map(
185 &mut self,
186 asid: u16,
187 vaddr: usize,
188 paddr: usize,
189 permissions: usize,
190 accessed: bool,
191 dirty: bool,
192 ) {
193 let canonical_check = (vaddr >> 47) & 1;
195 let upper_bits = (vaddr >> 48) & 0xffff;
196 if canonical_check == 1 && upper_bits != 0xffff {
197 panic!("Non-canonical virtual address: {:#x}", vaddr);
198 } else if canonical_check == 0 && upper_bits != 0 {
199 panic!("Non-canonical virtual address: {:#x}", vaddr);
200 }
201
202 let vaddr = vaddr & 0xffff_ffff_ffff_f000; let paddr = paddr & 0xffff_ffff_ffff_f000;
204
205 let pte = match self.walk(vaddr, true, asid) {
206 Some(pte) => pte,
207 None => panic!("map: walk() couldn't allocate a needed page-table page"),
208 };
209
210 let ppn = (paddr >> 12) & 0xfffffffffff;
212
213 pte.clear_all();
215
216 if VirtualMemoryPermission::Read.contained_in(permissions) {
217 pte.readable();
218 }
219 if VirtualMemoryPermission::Write.contained_in(permissions) {
220 pte.writable();
221 }
222 if VirtualMemoryPermission::Execute.contained_in(permissions) {
223 pte.executable();
224 }
225 if VirtualMemoryPermission::User.contained_in(permissions) {
226 pte.accesible_from_user();
227 }
228 if accessed {
229 pte.accessed();
230 }
231 if dirty {
232 pte.dirty();
233 }
234
235 pte.set_ppn(ppn);
236 pte.validate();
237 unsafe { asm!("sfence.vma zero,zero") };
238 }
239
240 pub fn walk(&mut self, vaddr: usize, alloc: bool, asid: u16) -> Option<&mut PageTableEntry> {
254 let mut pagetable = self as *mut PageTable;
255
256 let canonical_check = (vaddr >> 47) & 1;
258 let upper_bits = (vaddr >> 48) & 0xffff;
259 if canonical_check == 1 && upper_bits != 0xffff {
260 return None;
261 } else if canonical_check == 0 && upper_bits != 0 {
262 return None;
263 }
264
265 unsafe {
266 for level in (1..=MAX_PAGING_LEVEL).rev() {
268 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
269 let pte = &mut (*pagetable).entries[vpn];
270
271 if pte.is_valid() {
272 if pte.is_leaf() {
274 return None; }
276 pagetable = (pte.get_ppn() << 12) as *mut PageTable;
278 } else {
279 if !alloc {
280 return None;
281 }
282 let new_table = new_raw_pagetable(asid);
284 if new_table.is_null() {
285 return None;
286 }
287 pte.clear_all(); pte.set_ppn(new_table as usize >> 12);
289 pte.validate();
290 pagetable = new_table;
291 }
292 }
293
294 let vpn = (vaddr >> 12) & 0x1ff;
296 Some(&mut (*pagetable).entries[vpn])
297 }
298 }
299
300 pub fn unmap(&mut self, _asid: u16, vaddr: usize) {
301 let canonical_check = (vaddr >> 47) & 1;
303 let upper_bits = (vaddr >> 48) & 0xffff;
304 if canonical_check == 1 && upper_bits != 0xffff {
305 panic!("Non-canonical virtual address: {:#x}", vaddr);
306 } else if canonical_check == 0 && upper_bits != 0 {
307 panic!("Non-canonical virtual address: {:#x}", vaddr);
308 }
309
310 let vaddr = vaddr & 0xffff_ffff_ffff_f000; match self.walk(vaddr, false, 0) {
313 Some(pte) => {
314 if pte.is_valid() {
315 pte.clear_all();
316 unsafe { asm!("sfence.vma zero,zero") };
317 }
318 }
319 None => {
320 }
322 }
323 }
324
325 pub fn unmap_all(&mut self) {
326 for i in 0..512 {
327 let entry = &mut self.entries[i];
328 entry.clear_all();
329 }
330 unsafe { asm!("sfence.vma zero,zero") };
332 }
333}