kernel/arch/riscv64/vm/
mod.rs

1//! Virtual memory module for RISC-V architecture.
2//!
3//! The virtual memory module is responsible for managing the virtual memory of the system.
4//! The module provides functions to initialize the virtual memory system, map physical memory to
5//! virtual memory, and switch page tables.
6//!
7
8pub mod mmu;
9
10extern crate alloc;
11
12use alloc::vec::Vec;
13use alloc::{boxed::Box, vec};
14use hashbrown::HashMap;
15use mmu::PageTable;
16use spin::Once;
17use spin::RwLock;
18
19use crate::mem::page::allocate_raw_pages;
20
21use crate::arch::Arch;
22use crate::arch::get_cpu;
23use crate::arch::get_user_trapvector_paddr;
24use crate::early_println;
25use crate::environment::{KERNEL_KSTACK_REGION_END, KERNEL_KSTACK_REGION_START, TRAMPOLINE_VA_END};
26use crate::vm::manager::VirtualMemoryManager;
27use crate::vm::vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryPermission};
28
29unsafe extern "C" {
30    static __TRAMPOLINE_START: usize;
31    static __TRAMPOLINE_END: usize;
32}
33
34const NUM_OF_ASID: usize = u16::MAX as usize + 1; // Maximum ASID value
35static ASID_BITMAP_TABLES: Once<RwLock<Box<[u64]>>> = Once::new();
36
37fn get_asid_tables() -> &'static RwLock<Box<[u64]>> {
38    ASID_BITMAP_TABLES.call_once(|| {
39        // Directly allocate on heap to avoid stack overflow
40        let mut tables = alloc::vec![0u64; NUM_OF_ASID / 64].into_boxed_slice();
41        tables[0] = 1; // Mark the first ASID as used to avoid returning 0, which is reserved
42        RwLock::new(tables)
43    })
44}
45// static mut ROOT_PAGE_TABLES: Lazy<RwLock<HashMap<u16, *mut PageTable>>> = Lazy::new(|| RwLock::new(HashMap::new()));
46static PAGE_TABLES: Once<RwLock<HashMap<u16, Vec<Box<PageTable>>>>> = Once::new();
47
48fn get_page_tables() -> &'static RwLock<HashMap<u16, Vec<Box<PageTable>>>> {
49    PAGE_TABLES.call_once(|| RwLock::new(HashMap::new()))
50}
51
52pub fn get_pagetable(ptr: *mut PageTable) -> Option<&'static mut PageTable> {
53    unsafe {
54        if ptr.is_null() {
55            return None;
56        }
57        Some(&mut *ptr)
58    }
59}
60
61fn new_boxed_pagetable() -> Box<PageTable> {
62    let ptr = allocate_raw_pages(1) as *mut PageTable;
63    if ptr.is_null() {
64        panic!("Failed to allocate a new page table");
65    }
66    unsafe { Box::from_raw(ptr) }
67}
68
69/// Allocates a new raw page table for the given ASID.
70///
71/// # Arguments
72/// * `asid` - The Address Space ID (ASID) for which the page table is allocated.
73///
74/// # Returns
75/// A raw pointer to the newly allocated page table.
76///
77/// # Safety
78/// This function is unsafe because it dereferences a raw pointer, which can lead to undefined behavior
79/// if the pointer is null or invalid.
80///
81#[allow(static_mut_refs)]
82pub unsafe fn new_raw_pagetable(asid: u16) -> *mut PageTable {
83    let boxed_pagetable = new_boxed_pagetable();
84    let ptr = boxed_pagetable.as_ref() as *const PageTable as *mut PageTable;
85
86    // Store the boxed page table in HashMap for proper lifecycle management
87    let mut page_tables = get_page_tables().write();
88    match page_tables.get_mut(&asid) {
89        Some(vec) => vec.push(boxed_pagetable),
90        None => {
91            // This should not happen if ASID allocation is correct
92            panic!("ASID {} not found in page tables", asid);
93        }
94    }
95
96    ptr
97}
98
99pub fn alloc_virtual_address_space() -> u16 {
100    let mut asid_table = get_asid_tables().write();
101    for word_idx in 0..(NUM_OF_ASID / 64) {
102        let word = asid_table[word_idx];
103        if word != u64::MAX {
104            // Check if there is a free ASID in this word
105            let bit_pos = (!word).trailing_zeros() as usize; // Find the first free bit (Must be < 64)
106            asid_table[word_idx] |= 1 << bit_pos; // Mark this ASID as used
107            let asid = (word_idx * 64 + bit_pos) as u16; // Calculate the ASID
108            let root_pagetable_ptr = Box::into_raw(new_boxed_pagetable());
109            let mut page_tables = get_page_tables().write();
110            // Insert the new root page table into the HashMap
111            unsafe {
112                page_tables.insert(asid, vec![Box::from_raw(root_pagetable_ptr)]);
113            }
114
115            if root_pagetable_ptr.is_null() {
116                panic!("Failed to allocate a new root page table");
117            }
118
119            return asid; // Return the allocated ASID
120        }
121    }
122    panic!("No available root page table");
123}
124
125pub fn free_virtual_address_space(asid: u16) {
126    let asid = asid as usize;
127    if asid < NUM_OF_ASID {
128        let bit_pos = asid % 64;
129        let word_idx = asid / 64;
130        let mut asid_table = get_asid_tables().write();
131        if asid_table[word_idx] & (1 << bit_pos) == 0 {
132            panic!("ASID {} is already free", asid);
133        }
134        let mut page_tables = get_page_tables().write();
135        page_tables.remove(&(asid as u16)); // Remove the page table associated with this ASID
136        asid_table[word_idx] &= !(1 << bit_pos); // Mark this ASID as free
137    } else {
138        panic!("Invalid ASID: {}", asid);
139    }
140}
141
142pub fn is_asid_used(asid: u16) -> bool {
143    let asid = asid as usize;
144    if asid < NUM_OF_ASID {
145        let word_idx = asid / 64;
146        let bit_pos = asid % 64;
147        let asid_table = get_asid_tables().read();
148        (asid_table[word_idx] & (1 << bit_pos)) != 0
149    } else {
150        false
151    }
152}
153
154pub fn get_root_pagetable_ptr(asid: u16) -> Option<*mut PageTable> {
155    if is_asid_used(asid) {
156        let page_tabels = get_page_tables().read();
157        // Root page table is always at index 0 for each ASID
158        let root_page_table = page_tabels.get(&asid)?[0].as_ref();
159        Some(root_page_table as *const PageTable as *mut PageTable)
160    } else {
161        None
162    }
163}
164
165pub fn get_root_pagetable(asid: u16) -> Option<&'static mut PageTable> {
166    let addr = get_root_pagetable_ptr(asid)?;
167    unsafe {
168        if addr.is_null() {
169            None
170        } else {
171            Some(&mut *addr)
172        }
173    }
174}
175
176fn setup_trampoline_at_end(manager: &VirtualMemoryManager, trampoline_vaddr_end: usize) {
177    let trampoline_start = unsafe { &__TRAMPOLINE_START as *const usize as usize };
178    let trampoline_end = unsafe { &__TRAMPOLINE_END as *const usize as usize } - 1;
179    let trampoline_size = trampoline_end - trampoline_start;
180
181    let arch = get_cpu().as_paddr_cpu();
182    let trampoline_vaddr_start = trampoline_vaddr_end - trampoline_size;
183
184    let trap_entry_paddr = get_user_trapvector_paddr();
185    let arch_paddr = arch as *const Arch as usize;
186    let trap_entry_offset = trap_entry_paddr - trampoline_start;
187    let arch_offset = arch_paddr - trampoline_start;
188
189    let trap_entry_vaddr = trampoline_vaddr_start + trap_entry_offset;
190    let arch_vaddr = trampoline_vaddr_start + arch_offset;
191
192    #[cfg(any(debug_assertions, test))]
193    {
194        early_println!(
195            "Trampoline space planned  : {:#x} - {:#x}",
196            trampoline_vaddr_start,
197            trampoline_vaddr_end
198        );
199        early_println!(
200            "  Trampoline paddr        : {:#x} - {:#x}",
201            trampoline_start,
202            trampoline_end
203        );
204        early_println!("  Trap entry paddr        : {:#x}", trap_entry_paddr);
205        early_println!("  Arch paddr              : {:#x}", arch_paddr);
206        early_println!("  Trap entry vaddr        : {:#x}", trap_entry_vaddr);
207        early_println!("  Arch vaddr              : {:#x}", arch_vaddr);
208    }
209
210    let trampoline_map = VirtualMemoryMap {
211        vmarea: MemoryArea {
212            start: trampoline_vaddr_start,
213            end: trampoline_vaddr_end,
214        },
215        pmarea: MemoryArea {
216            start: trampoline_start,
217            end: trampoline_end,
218        },
219        permissions: VirtualMemoryPermission::Read as usize
220            | VirtualMemoryPermission::Write as usize
221            | VirtualMemoryPermission::Execute as usize,
222        is_shared: true,
223        owner: None,
224    };
225
226    if let Err(e) = manager.add_memory_map(trampoline_map.clone()) {
227        #[cfg(any(debug_assertions, test))]
228        {
229            early_println!("[vm] add trampoline map failed: {}", e);
230            if let Some(m) = manager.search_memory_map(trampoline_vaddr_start) {
231                early_println!(
232                    "[vm] map@trampoline_start: {:#x}-{:#x}",
233                    m.vmarea.start,
234                    m.vmarea.end
235                );
236            } else {
237                early_println!("[vm] map@trampoline_start: <none>");
238            }
239            if let Some(m) = manager.search_memory_map(trampoline_vaddr_end) {
240                early_println!(
241                    "[vm] map@trampoline_end  : {:#x}-{:#x}",
242                    m.vmarea.start,
243                    m.vmarea.end
244                );
245            } else {
246                early_println!("[vm] map@trampoline_end  : <none>");
247            }
248            manager.with_memmaps(|mm| {
249                early_println!("[vm] current VMA count   : {}", mm.len());
250                for (_k, m) in mm.iter() {
251                    early_println!("[vm]   VMA {:#x}-{:#x}", m.vmarea.start, m.vmarea.end);
252                }
253            });
254        }
255        panic!("Failed to add trampoline memory map: {}", e);
256    }
257
258    manager
259        .get_root_page_table()
260        .unwrap()
261        .map_memory_area(manager.get_asid(), trampoline_map, true, true)
262        .map_err(|e| panic!("Failed to map trampoline memory area: {}", e))
263        .unwrap();
264
265    crate::vm::set_trampoline_trap_vector(trap_entry_vaddr);
266    crate::vm::set_trampoline_arch(arch.get_cpuid(), arch_vaddr);
267}
268
269pub fn setup_trampoline_for_kernel(manager: &VirtualMemoryManager) {
270    setup_trampoline_at_end(manager, TRAMPOLINE_VA_END);
271
272    #[cfg(any(debug_assertions, test))]
273    {
274        crate::early_println!(
275            "[vm] riscv64 high-va(kstack) region: {:#x}-{:#x}",
276            KERNEL_KSTACK_REGION_START,
277            KERNEL_KSTACK_REGION_END
278        );
279        debug_assert!(KERNEL_KSTACK_REGION_START <= KERNEL_KSTACK_REGION_END);
280        debug_assert!(KERNEL_KSTACK_REGION_END < TRAMPOLINE_VA_END);
281    }
282}
283
284pub fn setup_trampoline_for_user(manager: &VirtualMemoryManager) {
285    setup_trampoline_at_end(manager, TRAMPOLINE_VA_END);
286}
287
288#[cfg(test)]
289mod tests {
290    use super::*;
291
292    #[test_case]
293    fn test_get_page_table() {
294        let asid = alloc_virtual_address_space();
295        let ptr = unsafe { new_raw_pagetable(asid) };
296        let page_table = get_pagetable(ptr);
297        assert!(page_table.is_some());
298        free_virtual_address_space(asid);
299    }
300
301    #[test_case]
302    fn test_get_root_page_table_idx() {
303        let asid = alloc_virtual_address_space();
304        let root_page_table_idx = get_root_pagetable(asid as u16);
305        assert!(root_page_table_idx.is_some());
306    }
307
308    #[test_case]
309    fn test_alloc_virtual_address_space() {
310        let asid_0 = alloc_virtual_address_space();
311        crate::early_println!("Allocated ASID: {}", asid_0);
312        assert!(is_asid_used(asid_0));
313        let asid_1 = alloc_virtual_address_space();
314        crate::early_println!("Allocated ASID: {}", asid_1);
315        assert_eq!(asid_1, asid_0 + 1);
316        assert!(is_asid_used(asid_1));
317        free_virtual_address_space(asid_1);
318        assert!(!is_asid_used(asid_1));
319
320        free_virtual_address_space(asid_0);
321        assert!(!is_asid_used(asid_0));
322    }
323}