kernel/arch/riscv64/vm/mmu/
sv48.rs

1use core::arch::asm;
2use core::result::Result;
3
4use crate::arch::vm::new_raw_pagetable;
5use crate::environment::PAGE_SIZE;
6use crate::vm::vmem::VirtualMemoryMap;
7use crate::vm::vmem::VirtualMemoryPermission;
8
9const MAX_PAGING_LEVEL: usize = 3;
10
11#[repr(align(8))]
12#[derive(Clone, Copy, Debug)]
13pub struct PageTableEntry {
14    pub entry: u64,
15}
16
17impl PageTableEntry {
18    pub const fn new() -> Self {
19        PageTableEntry { entry: 0 }
20    }
21
22    pub fn get_ppn(&self) -> usize {
23        ((self.entry >> 10) & 0x3ffffffffff) as usize // Mask to get the PPN bits (44 bits)
24        // (self.entry >> 10) as usize
25    }
26
27    pub fn get_flags(&self) -> u64 {
28        self.entry & 0x3ff
29    }
30
31    pub fn is_valid(&self) -> bool {
32        self.entry & 1 == 1
33    }
34
35    pub fn is_leaf(&self) -> bool {
36        // An entry is a leaf if it's valid and has R=1 or X=1 (RISC-V spec step 4)
37        if !self.is_valid() {
38            return false;
39        }
40        let r_bit = (self.entry >> 1) & 1; // Read bit
41        let x_bit = (self.entry >> 3) & 1; // Execute bit
42        r_bit == 1 || x_bit == 1
43    }
44
45    pub fn validate(&mut self) {
46        self.entry |= 1;
47    }
48
49    pub fn invalidate(&mut self) {
50        self.entry &= !1;
51    }
52
53    pub fn set_ppn(&mut self, ppn: usize) -> &mut Self {
54        let ppn_mask = 0x3ffffffffff; // Mask for the PPN bits
55        let masked_ppn = (ppn as u64) & ppn_mask; // Mask the PPN to fit in the entry
56
57        self.entry &= !(ppn_mask << 10); // Clear the PPN bits in the entry
58        self.entry |= masked_ppn << 10; // Set the new PPN bits
59        self
60    }
61
62    pub fn set_flags(&mut self, flags: u64) -> &mut Self {
63        let mask = 0x3ff;
64        self.entry |= flags & mask;
65        self
66    }
67
68    pub fn clear_flags(&mut self) -> &mut Self {
69        // Only clear the permission bits (R, W, X, U, G), keep V, A, D and PPN
70        self.entry &= !0x3E; // Clear bits 1-5 (R, W, X, U, G)
71        self
72    }
73
74    pub fn clear_all(&mut self) -> &mut Self {
75        self.entry = 0;
76        self
77    }
78
79    pub fn writable(&mut self) -> &mut Self {
80        self.entry |= 0x4;
81        self
82    }
83
84    pub fn readable(&mut self) -> &mut Self {
85        self.entry |= 0x2;
86        self
87    }
88
89    pub fn executable(&mut self) -> &mut Self {
90        self.entry |= 0x8;
91        self
92    }
93
94    pub fn accesible_from_user(&mut self) -> &mut Self {
95        self.entry |= 0x10;
96        self
97    }
98
99    pub fn accessed(&mut self) -> &mut Self {
100        self.entry |= 0x40;
101        self
102    }
103
104    pub fn dirty(&mut self) -> &mut Self {
105        self.entry |= 0x80;
106        self
107    }
108}
109
110#[repr(align(4096))]
111#[derive(Debug)]
112pub struct PageTable {
113    pub entries: [PageTableEntry; 512],
114}
115
116impl PageTable {
117    /// Create a new page table with all entries initialized to zero
118    pub fn new() -> Self {
119        PageTable {
120            entries: [PageTableEntry::new(); 512],
121        }
122    }
123
124    pub fn switch(&self, asid: u16) {
125        let satp = self.get_val_for_satp(asid);
126        unsafe {
127            asm!(
128                "
129                csrw satp, {0}
130                sfence.vma zero, zero
131                ",
132
133                in(reg) satp,
134            );
135        }
136    }
137
138    /// Get the value for the satp register.
139    ///
140    /// # Note
141    ///
142    /// Only for RISC-V (Sv48).
143    pub fn get_val_for_satp(&self, asid: u16) -> u64 {
144        let asid = asid as usize;
145        let mode = 9;
146        let ppn = self as *const _ as usize >> 12;
147        (mode << 60 | asid << 44 | ppn) as u64
148    }
149
150    pub fn map_memory_area(
151        &mut self,
152        asid: u16,
153        mmap: VirtualMemoryMap,
154        accessed: bool,
155        dirty: bool,
156    ) -> Result<(), &'static str> {
157        // Check if the address and size is aligned to PAGE_SIZE
158        if mmap.vmarea.start % PAGE_SIZE != 0
159            || mmap.pmarea.start % PAGE_SIZE != 0
160            || mmap.vmarea.size() % PAGE_SIZE != 0
161            || mmap.pmarea.size() % PAGE_SIZE != 0
162        {
163            return Err("Address is not aligned to PAGE_SIZE");
164        }
165
166        let mut vaddr = mmap.vmarea.start;
167        let mut paddr = mmap.pmarea.start;
168        while vaddr + (PAGE_SIZE - 1) <= mmap.vmarea.end {
169            self.map(asid, vaddr, paddr, mmap.permissions, accessed, dirty);
170            match vaddr.checked_add(PAGE_SIZE) {
171                Some(addr) => vaddr = addr,
172                None => break,
173            }
174            match paddr.checked_add(PAGE_SIZE) {
175                Some(addr) => paddr = addr,
176                None => break,
177            }
178        }
179
180        Ok(())
181    }
182
183    /* Only for root page table */
184    pub fn map(
185        &mut self,
186        asid: u16,
187        vaddr: usize,
188        paddr: usize,
189        permissions: usize,
190        accessed: bool,
191        dirty: bool,
192    ) {
193        // Check if the virtual address is properly canonicalized for Sv48
194        let canonical_check = (vaddr >> 47) & 1;
195        let upper_bits = (vaddr >> 48) & 0xffff;
196        if canonical_check == 1 && upper_bits != 0xffff {
197            panic!("Non-canonical virtual address: {:#x}", vaddr);
198        } else if canonical_check == 0 && upper_bits != 0 {
199            panic!("Non-canonical virtual address: {:#x}", vaddr);
200        }
201
202        let vaddr = vaddr & 0xffff_ffff_ffff_f000; // Page align
203        let paddr = paddr & 0xffff_ffff_ffff_f000;
204
205        let pte = match self.walk(vaddr, true, asid) {
206            Some(pte) => pte,
207            None => panic!("map: walk() couldn't allocate a needed page-table page"),
208        };
209
210        // Allow remapping - just update the existing entry
211        let ppn = (paddr >> 12) & 0xfffffffffff;
212
213        // Clear existing flags before setting new ones
214        pte.clear_all();
215
216        if VirtualMemoryPermission::Read.contained_in(permissions) {
217            pte.readable();
218        }
219        if VirtualMemoryPermission::Write.contained_in(permissions) {
220            pte.writable();
221        }
222        if VirtualMemoryPermission::Execute.contained_in(permissions) {
223            pte.executable();
224        }
225        if VirtualMemoryPermission::User.contained_in(permissions) {
226            pte.accesible_from_user();
227        }
228        if accessed {
229            pte.accessed();
230        }
231        if dirty {
232            pte.dirty();
233        }
234
235        pte.set_ppn(ppn);
236        pte.validate();
237        unsafe { asm!("sfence.vma zero,zero") };
238    }
239
240    // Find the address of the PTE in page table that corresponds to virtual address vaddr.
241    // If alloc == true, create any required page-table pages.
242    // Returns None if walk() couldn't allocate a needed page-table page.
243    //
244    // The RISC-V Sv48 scheme has four levels of page-table pages.
245    // A page-table page contains 512 64-bit PTEs.
246    // A 48-bit virtual address is split into five fields:
247    //   47..48 -- must be zero.
248    //   39..47 -- 9 bits of level-3 index.
249    //   30..38 -- 9 bits of level-2 index.
250    //   21..29 -- 9 bits of level-1 index.
251    //   12..20 -- 9 bits of level-0 index.
252    //    0..11 -- 12 bits of byte offset within the page.
253    pub fn walk(&mut self, vaddr: usize, alloc: bool, asid: u16) -> Option<&mut PageTableEntry> {
254        let mut pagetable = self as *mut PageTable;
255
256        // Check if virtual address is within valid canonical range for Sv48
257        let canonical_check = (vaddr >> 47) & 1;
258        let upper_bits = (vaddr >> 48) & 0xffff;
259        if canonical_check == 1 && upper_bits != 0xffff {
260            return None;
261        } else if canonical_check == 0 && upper_bits != 0 {
262            return None;
263        }
264
265        unsafe {
266            // Walk through levels 3, 2, 1
267            for level in (1..=MAX_PAGING_LEVEL).rev() {
268                let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
269                let pte = &mut (*pagetable).entries[vpn];
270
271                if pte.is_valid() {
272                    // At an intermediate level, a PTE must not be a leaf (no huge page support).
273                    if pte.is_leaf() {
274                        return None; // Fail because it's an invalid state.
275                    }
276                    // If not a leaf, it's a pointer to the next level table.
277                    pagetable = (pte.get_ppn() << 12) as *mut PageTable;
278                } else {
279                    if !alloc {
280                        return None;
281                    }
282                    // Allocate a new page table
283                    let new_table = new_raw_pagetable(asid);
284                    if new_table.is_null() {
285                        return None;
286                    }
287                    pte.clear_all(); // Clear the entry
288                    pte.set_ppn(new_table as usize >> 12);
289                    pte.validate();
290                    pagetable = new_table;
291                }
292            }
293
294            // Return the PTE at level 0
295            let vpn = (vaddr >> 12) & 0x1ff;
296            Some(&mut (*pagetable).entries[vpn])
297        }
298    }
299
300    pub fn unmap(&mut self, _asid: u16, vaddr: usize) {
301        // Check if the virtual address is properly canonicalized for Sv48
302        let canonical_check = (vaddr >> 47) & 1;
303        let upper_bits = (vaddr >> 48) & 0xffff;
304        if canonical_check == 1 && upper_bits != 0xffff {
305            panic!("Non-canonical virtual address: {:#x}", vaddr);
306        } else if canonical_check == 0 && upper_bits != 0 {
307            panic!("Non-canonical virtual address: {:#x}", vaddr);
308        }
309
310        let vaddr = vaddr & 0xffff_ffff_ffff_f000; // Page align
311
312        match self.walk(vaddr, false, 0) {
313            Some(pte) => {
314                if pte.is_valid() {
315                    pte.clear_all();
316                    unsafe { asm!("sfence.vma zero,zero") };
317                }
318            }
319            None => {
320                // Mapping doesn't exist, nothing to unmap
321            }
322        }
323    }
324
325    pub fn unmap_all(&mut self) {
326        for i in 0..512 {
327            let entry = &mut self.entries[i];
328            entry.clear_all();
329        }
330        // Ensure the TLB flush instruction is not optimized away.
331        unsafe { asm!("sfence.vma zero,zero") };
332    }
333}