kernel/vm/
mod.rs

1//! Virtual memory module.
2//!
3//! This module provides the virtual memory abstraction for the kernel. It
4//! includes functions for managing virtual address spaces.
5
6use manager::VirtualMemoryManager;
7use vmem::MemoryArea;
8use vmem::VirtualMemoryMap;
9use vmem::VirtualMemoryPermission;
10
11use crate::arch::Arch;
12use crate::arch::get_device_memory_areas;
13use crate::arch::get_kernel_trapvector_paddr;
14use crate::arch::set_trapvector;
15use crate::arch::vm::alloc_virtual_address_space;
16use crate::arch::vm::get_root_pagetable;
17use crate::early_println;
18use crate::environment::KERNEL_VM_STACK_SIZE;
19use crate::environment::KERNEL_VM_STACK_START;
20use crate::environment::MAX_NUM_CPUS;
21use crate::environment::PAGE_SIZE;
22use crate::environment::USER_STACK_END;
23use crate::environment::{
24    KERNEL_KSTACK_REGION_END, KERNEL_KSTACK_REGION_START, KERNEL_KSTACK_SLOT_SIZE,
25    KERNEL_KSTACK_SLOTS, TASK_KERNEL_STACK_SIZE,
26};
27use crate::sched::scheduler::get_scheduler;
28use crate::task::Task;
29use core::sync::atomic::Ordering;
30use spin::{Mutex, Once};
31
32extern crate alloc;
33
34pub mod manager;
35pub mod vmem;
36
37static KERNEL_VM_MANAGER: Once<VirtualMemoryManager> = Once::new();
38
39pub fn get_kernel_vm_manager() -> &'static VirtualMemoryManager {
40    KERNEL_VM_MANAGER.call_once(|| VirtualMemoryManager::new())
41}
42
43static KERNEL_AREA: Once<MemoryArea> = Once::new();
44/* Initialize MMU and enable paging */
45#[allow(static_mut_refs)]
46pub fn kernel_vm_init(kernel_area: MemoryArea) {
47    let manager = get_kernel_vm_manager();
48
49    #[cfg(any(debug_assertions, test))]
50    early_println!("[vm] kernel_vm_init: start");
51
52    let asid = alloc_virtual_address_space(); /* Kernel ASID */
53    let root_page_table = get_root_pagetable(asid).unwrap();
54
55    manager.set_asid(asid);
56
57    /* Map kernel space */
58    let kernel_start = kernel_area.start;
59    let kernel_end = kernel_area.end;
60
61    let kernel_area = MemoryArea {
62        start: kernel_start,
63        end: kernel_end,
64    };
65    KERNEL_AREA.call_once(|| kernel_area);
66
67    let kernel_map = VirtualMemoryMap {
68        vmarea: kernel_area,
69        pmarea: kernel_area,
70        permissions: VirtualMemoryPermission::Read as usize
71            | VirtualMemoryPermission::Write as usize
72            | VirtualMemoryPermission::Execute as usize,
73        is_shared: true, // Kernel memory should be shared across all processes
74        owner: None,
75    };
76    get_kernel_vm_manager()
77        .add_memory_map(kernel_map.clone())
78        .map_err(|e| panic!("Failed to add kernel memory map: {}", e))
79        .unwrap();
80    /* Pre-map the kernel space */
81    root_page_table
82        .map_memory_area(asid, kernel_map, true, true)
83        .map_err(|e| panic!("Failed to map kernel memory area: {}", e))
84        .unwrap();
85
86    // Map device memory areas (architecture-specific)
87    for dev_area in get_device_memory_areas() {
88        let dev_map = VirtualMemoryMap {
89            vmarea: dev_area,
90            pmarea: dev_area,
91            permissions: VirtualMemoryPermission::Read as usize
92                | VirtualMemoryPermission::Write as usize,
93            is_shared: true, // Device memory should be shared
94            owner: None,
95        };
96        get_kernel_vm_manager()
97            .add_memory_map(dev_map.clone())
98            .map_err(|e| panic!("Failed to add device memory map: {}", e))
99            .unwrap();
100        root_page_table
101            .map_memory_area(asid, dev_map.clone(), true, true)
102            .map_err(|e| panic!("Failed to map device memory area: {}", e))
103            .unwrap();
104    }
105
106    early_println!(
107        "Kernel space mapped       : {:#018x} - {:#018x}",
108        kernel_area.start,
109        kernel_area.end
110    );
111    for dev_area in get_device_memory_areas() {
112        early_println!(
113            "Device space mapped       : {:#018x} - {:#018x}",
114            dev_area.start,
115            dev_area.end
116        );
117    }
118
119    #[cfg(any(debug_assertions, test))]
120    early_println!("[vm] kernel_vm_init: setup_trampoline_for_kernel...");
121
122    crate::arch::vm::setup_trampoline_for_kernel(get_kernel_vm_manager());
123
124    #[cfg(any(debug_assertions, test))]
125    early_println!("[vm] kernel_vm_init: trampoline ok");
126
127    #[cfg(any(debug_assertions, test))]
128    early_println!("[vm] kernel_vm_init: switch (ttbr0/arch-dependent)...");
129    root_page_table.switch(manager.get_asid());
130
131    #[cfg(any(debug_assertions, test))]
132    early_println!("[vm] kernel_vm_init: done");
133}
134
135pub fn user_vm_init(task: &Task) {
136    let asid = alloc_virtual_address_space();
137    task.vm_manager.set_asid(asid);
138
139    /* User stack page */
140    let num_of_stack_page = 256; // 1MB user stack (4KB pages)
141    let stack_start = USER_STACK_END - num_of_stack_page * PAGE_SIZE;
142    task.allocate_stack_pages(stack_start, num_of_stack_page)
143        .map_err(|e| panic!("Failed to allocate user stack pages: {}", e))
144        .unwrap();
145
146    /* Guard page */
147    task.allocate_guard_pages(stack_start - PAGE_SIZE, 1)
148        .map_err(|e| panic!("Failed to allocate guard page: {}", e))
149        .unwrap();
150
151    crate::arch::vm::setup_trampoline_for_user(&task.vm_manager);
152
153    // Trampoline-managed high-VA infrastructure also includes per-task kstack windows.
154    // Keep this in the VM init flow so callers don't need a separate map_* step.
155    setup_trampoline_for_task_kstack_window(task)
156        .map_err(|e| panic!("Failed to setup task kstack window: {}", e))
157        .unwrap();
158}
159
160pub fn user_kernel_vm_init(task: &Task) {
161    let asid = alloc_virtual_address_space();
162    let root_page_table = get_root_pagetable(asid).unwrap();
163    task.vm_manager.set_asid(asid);
164
165    let kernel_area = *KERNEL_AREA.get().expect("KERNEL_AREA not initialized");
166
167    let kernel_map = VirtualMemoryMap {
168        vmarea: kernel_area,
169        pmarea: kernel_area,
170        permissions: VirtualMemoryPermission::Read as usize
171            | VirtualMemoryPermission::Write as usize
172            | VirtualMemoryPermission::Execute as usize,
173        is_shared: true, // Kernel memory should be shared across all processes
174        owner: None,
175    };
176    task.vm_manager
177        .add_memory_map(kernel_map.clone())
178        .map_err(|e| {
179            panic!("Failed to add kernel memory map: {}", e);
180        })
181        .unwrap();
182    /* Pre-map the kernel space */
183    root_page_table
184        .map_memory_area(asid, kernel_map, true, true)
185        .map_err(|e| {
186            panic!("Failed to map kernel memory area: {}", e);
187        })
188        .unwrap();
189    task.data_size.store(kernel_area.end + 1, Ordering::SeqCst);
190
191    /* Stack page */
192    task.allocate_stack_pages(KERNEL_VM_STACK_START, KERNEL_VM_STACK_SIZE / PAGE_SIZE)
193        .map_err(|e| panic!("Failed to allocate kernel stack pages: {}", e))
194        .unwrap();
195
196    // Map device memory areas (architecture-specific)
197    for dev_area in get_device_memory_areas() {
198        let dev_map = VirtualMemoryMap {
199            vmarea: dev_area,
200            pmarea: dev_area,
201            permissions: VirtualMemoryPermission::Read as usize
202                | VirtualMemoryPermission::Write as usize,
203            is_shared: true, // Device memory should be shared
204            owner: None,
205        };
206        task.vm_manager
207            .add_memory_map(dev_map)
208            .map_err(|e| panic!("Failed to add device memory map: {}", e))
209            .unwrap();
210    }
211
212    crate::arch::vm::setup_trampoline_for_user(&task.vm_manager);
213
214    setup_trampoline_for_task_kstack_window(task)
215        .map_err(|e| panic!("Failed to setup task kstack window: {}", e))
216        .unwrap();
217}
218
219// --------------------
220// Kernel stack window allocator (shared kernel PT)
221// --------------------
222
223struct KernelKstackAllocator {
224    slots: alloc::vec::Vec<bool>,
225}
226
227impl KernelKstackAllocator {
228    fn new() -> Self {
229        Self {
230            slots: alloc::vec![false; KERNEL_KSTACK_SLOTS],
231        }
232    }
233
234    fn alloc_slot(&mut self) -> Option<(usize, usize, usize)> {
235        for (idx, used) in self.slots.iter_mut().enumerate() {
236            if !*used {
237                *used = true;
238                let base = KERNEL_KSTACK_REGION_START + idx * KERNEL_KSTACK_SLOT_SIZE;
239                let top = base + KERNEL_KSTACK_SLOT_SIZE; // exclusive top
240                return Some((idx, base, top));
241            }
242        }
243        None
244    }
245
246    fn free_slot(&mut self, idx: usize) {
247        if idx < self.slots.len() {
248            self.slots[idx] = false;
249        }
250    }
251
252    fn slot_index_for_base(&self, base: usize) -> Option<usize> {
253        if base < KERNEL_KSTACK_REGION_START || base > KERNEL_KSTACK_REGION_END {
254            return None;
255        }
256        let off = base - KERNEL_KSTACK_REGION_START;
257        if off % KERNEL_KSTACK_SLOT_SIZE != 0 {
258            return None;
259        }
260        Some(off / KERNEL_KSTACK_SLOT_SIZE)
261    }
262}
263
264static KSTACK_ALLOC_ONCE: Once<Mutex<KernelKstackAllocator>> = Once::new();
265
266fn kstack_alloc() -> &'static Mutex<KernelKstackAllocator> {
267    KSTACK_ALLOC_ONCE.call_once(|| Mutex::new(KernelKstackAllocator::new()))
268}
269
270/// Map the task's kernel stack physical pages into the shared kernel PT at a unique high VA window.
271/// Adds an unmapped guard page at the bottom of the window.
272#[allow(static_mut_refs)]
273pub fn setup_trampoline_for_task_kstack_window(task: &Task) -> Result<(), &'static str> {
274    // Allocate a window slot
275    let (slot_idx, base, _top) = kstack_alloc()
276        .lock()
277        .alloc_slot()
278        .ok_or("No free kernel stack window slots")?;
279
280    // Physical (identity) address range of the task's kernel stack
281    let km_area = task.get_kernel_stack_memory_area_paddr();
282    let paddr_start = km_area.start;
283    let paddr_end = paddr_start + TASK_KERNEL_STACK_SIZE - 1;
284
285    // Virtual window (skip guard page at the bottom)
286    let vaddr_start = base + crate::environment::PAGE_SIZE;
287    let vaddr_end = vaddr_start + TASK_KERNEL_STACK_SIZE - 1;
288
289    // Map into shared kernel PT
290    let kman = get_kernel_vm_manager();
291    let mmap = VirtualMemoryMap {
292        vmarea: MemoryArea {
293            start: vaddr_start,
294            end: vaddr_end,
295        },
296        pmarea: MemoryArea {
297            start: paddr_start,
298            end: paddr_end,
299        },
300        permissions: VirtualMemoryPermission::Read as usize
301            | VirtualMemoryPermission::Write as usize,
302        is_shared: true,
303        owner: None,
304    };
305
306    kman.add_memory_map(mmap.clone())
307        .map_err(|_| "Failed to add kernel stack mmap")?;
308    let root = kman
309        .get_root_page_table()
310        .ok_or("Kernel root page table not set")?;
311    root.map_memory_area(kman.get_asid(), mmap, true, true)
312        .map_err(|_| "Failed to map kernel stack window")?;
313
314    // Record base for later SP and teardown
315    task.set_kernel_stack_window_base(Some((slot_idx, base)));
316
317    // Update the task's kernel context SP to point into the high-VA window.
318    // After boot, tasks are scheduled via `switch_to` which restores KernelContext.sp.
319    // If we keep SP pointing to the raw allocated stack pointer, AArch64 can fault at
320    // exception entry (SP_EL1) and the kernel may also miss the intended trampoline-managed
321    // stack window. The window top is page-aligned, so stack alignment is also guaranteed.
322    let stack_top = (base + crate::environment::PAGE_SIZE + TASK_KERNEL_STACK_SIZE) as u64;
323    let tf_size = core::mem::size_of::<crate::arch::Trapframe>() as u64;
324    let tf_align = core::mem::align_of::<crate::arch::Trapframe>() as u64;
325    debug_assert!(tf_align.is_power_of_two());
326    let sp = (stack_top - tf_size) & !(tf_align - 1);
327    task.with_kernel_context(|kctx| {
328        kctx.set_sp(sp);
329    });
330
331    #[cfg(any(debug_assertions, test))]
332    crate::early_println!(
333        "Mapped kernel stack window for Task (allocating): slot {} {:#x} - {:#x}",
334        slot_idx,
335        base,
336        base + KERNEL_KSTACK_SLOT_SIZE - 1
337    );
338
339    // NOTE: vcpu.sp (user sp) is set separately; this is kernel SP for `switch_to`/traps.
340
341    // Debug verification in test / debug builds: ensure guard page is unmapped
342    #[cfg(any(debug_assertions, test))]
343    {
344        if verify_task_kernel_stack_guard(task) {
345            early_println!("Kernel stack guard OK (slot {})", slot_idx);
346        } else {
347            early_println!(
348                "WARN: Kernel stack guard mapping anomaly (slot {})",
349                slot_idx
350            );
351        }
352    }
353    Ok(())
354}
355
356/// Unmap and free the task's kernel stack window from the shared kernel PT.
357#[allow(static_mut_refs)]
358pub fn teardown_trampoline_for_task_kstack_window(task: &mut Task) {
359    if let Some((slot_idx, base)) = task.get_kernel_stack_window_base() {
360        let vstart = base + crate::environment::PAGE_SIZE;
361        let vend = vstart + TASK_KERNEL_STACK_SIZE - 1;
362
363        // Remove from kernel VM manager and unmap pages
364        let kman = get_kernel_vm_manager();
365        let asid = kman.get_asid();
366        if let Some(root) = kman.get_root_page_table() {
367            let mut v = vstart;
368            while v <= vend {
369                root.unmap(asid, v);
370                v += PAGE_SIZE;
371            }
372        }
373        // Best-effort remove VMA entries
374        let mut v = vstart;
375        while v <= vend {
376            let _ = kman.remove_memory_map_by_addr(v);
377            v += PAGE_SIZE;
378        }
379        // Free slot
380        kstack_alloc().lock().free_slot(slot_idx);
381        task.set_kernel_stack_window_base(None);
382    }
383}
384
385/// Verify that a task's kernel stack guard page is unmapped and stack pages are mapped.
386/// Returns true if the guard page has no associated memory map and a sample stack address is mapped.
387pub fn verify_task_kernel_stack_guard(task: &Task) -> bool {
388    let (slot_idx, base) = match task.get_kernel_stack_window_base() {
389        Some(v) => v,
390        None => return false,
391    };
392    let guard_start = base;
393    let guard_sample = guard_start; // Any address in guard page
394    let stack_first = base + PAGE_SIZE; // First mapped byte of stack window
395    let stack_sample = stack_first + (PAGE_SIZE / 2); // Sample inside first page
396
397    let kman = get_kernel_vm_manager();
398    let guard_map = kman.search_memory_map(guard_sample);
399    let stack_map = kman.search_memory_map(stack_sample);
400
401    let guard_ok = guard_map.is_none();
402    let stack_ok = stack_map
403        .map(|m| m.vmarea.start <= stack_sample && stack_sample <= m.vmarea.end)
404        .unwrap_or(false);
405
406    if !(guard_ok && stack_ok) {
407        early_println!(
408            "[verify_kstack_guard] slot {} guard_ok={} stack_ok={} guard_map_start={:?}",
409            slot_idx,
410            guard_ok,
411            stack_ok,
412            guard_map.map(|m| m.vmarea.start)
413        );
414    }
415    guard_ok && stack_ok
416}
417
418pub fn setup_user_stack(task: &Task) -> (usize, usize) {
419    /* User stack page */
420    let num_of_stack_page = 256; // 1MB user stack (4KB pages)
421    let stack_base = USER_STACK_END - num_of_stack_page * PAGE_SIZE;
422    task.allocate_stack_pages(stack_base, num_of_stack_page)
423        .map_err(|e| panic!("Failed to allocate user stack pages: {}", e))
424        .unwrap();
425    /* Guard page */
426    task.allocate_guard_pages(stack_base - PAGE_SIZE, 1)
427        .map_err(|e| panic!("Failed to allocate guard page: {}", e))
428        .unwrap();
429
430    (stack_base, USER_STACK_END)
431}
432
433static TRAMPOLINE_TRAP_VECTOR: Once<usize> = Once::new();
434static TRAMPOLINE_ARCH: Mutex<[Option<usize>; MAX_NUM_CPUS]> = Mutex::new([None; MAX_NUM_CPUS]);
435
436pub fn set_trampoline_trap_vector(trap_vector: usize) {
437    TRAMPOLINE_TRAP_VECTOR.call_once(|| trap_vector);
438}
439
440pub fn get_trampoline_trap_vector() -> usize {
441    *TRAMPOLINE_TRAP_VECTOR
442        .get()
443        .expect("Trampoline is not initialized")
444}
445
446pub fn set_trampoline_arch(cpu_id: usize, arch: usize) {
447    let mut trampolines = TRAMPOLINE_ARCH.lock();
448    trampolines[cpu_id] = Some(arch);
449}
450
451pub fn get_trampoline_arch(cpu_id: usize) -> usize {
452    let trampolines = TRAMPOLINE_ARCH.lock();
453    trampolines[cpu_id].expect("Trampoline is not initialized")
454}
455
456pub fn switch_to_kernel_vm() {
457    let manager = get_kernel_vm_manager();
458    let root_page_table = manager
459        .get_root_page_table()
460        .expect("Root page table is not set");
461    set_trapvector(get_kernel_trapvector_paddr());
462    root_page_table.switch(manager.get_asid());
463}
464
465pub fn switch_to_user_vm(cpu: &mut Arch) {
466    let cpu_id = cpu.get_cpuid();
467    let task = get_scheduler()
468        .get_current_task(cpu_id)
469        .expect("No current task found");
470    let manager = &task.vm_manager;
471    let root_page_table = manager
472        .get_root_page_table()
473        .expect("Root page table is not set");
474    set_trapvector(get_trampoline_trap_vector());
475    root_page_table.switch(manager.get_asid());
476}