kernel/vm/
manager.rs

1//! Virtual Memory Manager module.
2//!
3//! This module provides the core functionality for managing virtual memory in the kernel.
4//! It handles address space management, memory mappings, and page table operations.
5//!
6//! # Key Components
7//!
8//! - `VirtualMemoryManager`: Main structure for managing virtual memory mappings and address spaces
9//! - Memory maps: Track mappings between virtual and physical memory areas
10//! - ASID (Address Space ID): Identifies different address spaces
11//!
12//! # Functionality
13//!
14//! The manager enables:
15//! - Creating and tracking virtual to physical memory mappings
16//! - Managing different address spaces via ASIDs
17//! - Searching for memory mappings by virtual address
18//! - Accessing the root page table for the current address space
19//!
20//! # Examples
21//!
22//! ```
23//! let mut manager = VirtualMemoryManager::new();
24//! manager.set_asid(42);
25//!
26//! // Add a memory mapping
27//! let vm_area = MemoryArea { start: 0x0, end: 0x1000 };
28//! let pm_area = MemoryArea { start: 0x80000000, end: 0x80001000 };
29//! let map = VirtualMemoryMap { vmarea: vm_area, pmarea: pm_area };
30//! manager.add_memory_map(map);
31//!
32//! // Search for a memory mapping
33//! if let Some(found_map) = manager.search_memory_map(0x500) {
34//!     // Found the mapping
35//! }
36//!
37
38extern crate alloc;
39use alloc::collections::btree_map::Values;
40use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
41use spin::RwLock;
42
43use crate::object::capability::memory_mapping::AccessOp;
44use crate::{
45    arch::vm::{free_virtual_address_space, get_root_pagetable, is_asid_used, mmu::PageTable},
46    environment::PAGE_SIZE,
47};
48
49use super::vmem::{MemoryArea, VirtualMemoryMap};
50
51#[derive(Debug, Clone)]
52pub struct VirtualMemoryManager {
53    inner: Arc<RwLock<InnerVmm>>, // shared, internally synchronized
54}
55
56#[derive(Debug, Clone)]
57struct InnerVmm {
58    memmap: BTreeMap<usize, VirtualMemoryMap>,
59    asid: u16,
60    mmap_base: usize,
61    page_tables: Vec<Arc<PageTable>>,
62    last_search_cache: Option<(usize, usize, usize)>,
63}
64
65impl VirtualMemoryManager {
66    /// Creates a new virtual memory manager.
67    ///
68    /// # Returns
69    /// A new virtual memory manager with default values.
70    pub fn new() -> Self {
71        let inner = InnerVmm {
72            memmap: BTreeMap::new(),
73            asid: 0,
74            mmap_base: 0x40000000, // 1 GB base address for mmap (Default)
75            page_tables: Vec::new(),
76            last_search_cache: None,
77        };
78        VirtualMemoryManager {
79            inner: Arc::new(RwLock::new(inner)),
80        }
81    }
82
83    /// Sets the ASID (Address Space ID) for the virtual memory manager.
84    ///
85    /// # Arguments
86    /// * `asid` - The ASID to set
87    pub fn set_asid(&self, asid: u16) {
88        let mut g = self.inner.write();
89        if g.asid == asid {
90            return;
91        }
92        if g.asid != 0 && is_asid_used(g.asid) {
93            free_virtual_address_space(g.asid);
94        }
95        g.asid = asid;
96    }
97
98    /// Returns the ASID (Address Space ID) for the virtual memory manager.
99    ///
100    /// # Returns
101    /// The ASID for the virtual memory manager.
102    pub fn get_asid(&self) -> u16 {
103        self.inner.read().asid
104    }
105
106    /// Returns a mutable iterator over all memory maps.
107    ///
108    /// # Returns
109    /// A mutable iterator over references to all memory maps.
110    // Mutable iterator is removed in favor of snapshot-based API.
111
112    /// Returns the number of memory maps.
113    ///
114    /// # Returns
115    /// The number of memory maps.
116    pub fn memmap_len(&self) -> usize {
117        self.inner.read().memmap.len()
118    }
119
120    /// Returns true if there are no memory maps.
121    ///
122    /// # Returns
123    /// True if there are no memory maps.
124    pub fn memmap_is_empty(&self) -> bool {
125        self.inner.read().memmap.is_empty()
126    }
127
128    /// Execute a read-only operation while holding a read lock on memmaps.
129    /// This avoids cloning and provides high-performance access.
130    pub fn with_memmaps<R>(&self, f: impl FnOnce(&BTreeMap<usize, VirtualMemoryMap>) -> R) -> R {
131        let g = self.inner.read();
132        f(&g.memmap)
133    }
134
135    /// Execute a mutable operation while holding a write lock on memmaps.
136    /// Prefer using provided APIs; expose for advanced use-cases.
137    pub fn with_memmaps_mut<R>(
138        &self,
139        f: impl FnOnce(&mut BTreeMap<usize, VirtualMemoryMap>) -> R,
140    ) -> R {
141        let mut g = self.inner.write();
142        f(&mut g.memmap)
143    }
144
145    /// Execute a read-only iteration over memory maps while holding the lock.
146    /// This returns an iterator of `&VirtualMemoryMap` valid only inside the closure.
147    pub fn memmaps_iter_with<R, F>(&self, f: F) -> R
148    where
149        F: for<'a> FnOnce(Values<'a, usize, VirtualMemoryMap>) -> R,
150    {
151        let g = self.inner.read();
152        let iter = g.memmap.values();
153        f(iter)
154    }
155
156    /// Gets a memory map by its start address.
157    ///
158    /// # Arguments
159    /// * `start_addr` - The start address of the memory map
160    ///
161    /// # Returns
162    /// The memory map with the given start address, if it exists.
163    pub fn get_memory_map_by_addr(&self, start_addr: usize) -> Option<VirtualMemoryMap> {
164        self.inner.read().memmap.get(&start_addr).cloned()
165    }
166
167    /// Gets a mutable memory map by its start address.
168    ///
169    /// # Arguments
170    /// * `start_addr` - The start address of the memory map
171    ///
172    /// # Returns
173    /// The mutable memory map with the given start address, if it exists.
174    // Removed: use snapshot + fixed update methods instead
175
176    /// Adds a memory map to the virtual memory manager with overlap checking.
177    ///
178    /// This method performs overlap detection before adding the mapping.
179    /// Use this for:
180    /// - User-initiated memory allocation (mmap, malloc, etc.)
181    /// - Dynamic memory allocation where overlap is possible
182    /// - Any case where memory range conflicts are uncertain
183    ///
184    /// This method uses efficient overlap detection with ordered data structures.
185    ///
186    /// # Arguments
187    /// * `map` - The memory map to add
188    ///
189    /// # Returns
190    /// A result indicating success or failure.
191    ///
192    pub fn add_memory_map(&self, map: VirtualMemoryMap) -> Result<(), &'static str> {
193        // Check if the address and size is aligned
194        if map.vmarea.start % PAGE_SIZE != 0
195            || map.pmarea.start % PAGE_SIZE != 0
196            || map.vmarea.size() % PAGE_SIZE != 0
197            || map.pmarea.size() % PAGE_SIZE != 0
198        {
199            return Err("Address or size is not aligned to PAGE_SIZE");
200        }
201
202        let mut g = self.inner.write();
203        // 1. prev adjacency check
204        if let Some((_, prev_map)) = g.memmap.range(..map.vmarea.start).next_back() {
205            if prev_map.vmarea.end > map.vmarea.start {
206                return Err("Memory mapping overlaps with a preceding map");
207            }
208        }
209        // 2. next adjacency check
210        if let Some((_, next_map)) = g.memmap.range(map.vmarea.start..).next() {
211            if next_map.vmarea.start < map.vmarea.end {
212                return Err("Memory mapping overlaps with a succeeding map");
213            }
214        }
215
216        g.last_search_cache = None;
217        g.memmap.insert(map.vmarea.start, map);
218        Ok(())
219    }
220
221    /// Removes the memory map containing the given virtual address.
222    ///
223    /// This method uses efficient search with caching to locate the target mapping.
224    ///
225    /// # Arguments
226    /// * `vaddr` - The virtual address contained in the memory map to remove
227    ///
228    /// # Returns
229    /// The removed memory map, if it exists.
230    pub fn remove_memory_map_by_addr(&self, vaddr: usize) -> Option<VirtualMemoryMap> {
231        let mut g = self.inner.write();
232        let start_addr = find_memory_map_key_with_cache_update(&mut *g, vaddr)?;
233        if let Some((_, _, cache_key)) = g.last_search_cache {
234            if cache_key == start_addr {
235                g.last_search_cache = None;
236            }
237        }
238        let removed_map = g.memmap.remove(&start_addr);
239        drop(g);
240        if let Some(m) = removed_map {
241            self.unmap_range_from_mmu(m.vmarea.start, m.vmarea.end);
242            Some(m)
243        } else {
244            None
245        }
246    }
247
248    /// Removes all memory maps.
249    ///
250    /// # Returns
251    /// The removed memory maps.
252    ///
253    /// # Note
254    /// This method returns an iterator instead of a cloned Vec for efficiency.
255    pub fn remove_all_memory_maps(&self) -> impl Iterator<Item = VirtualMemoryMap> {
256        let mut g = self.inner.write();
257        g.last_search_cache = None;
258        let memmap = core::mem::take(&mut g.memmap);
259        memmap.into_values()
260    }
261
262    /// Restores the memory maps from a given iterator.
263    ///
264    /// # Arguments
265    /// * `maps` - The iterator of memory maps to restore
266    ///
267    /// # Returns
268    /// A result indicating success or failure.
269    ///
270    pub fn restore_memory_maps<I>(&self, maps: I) -> Result<(), &'static str>
271    where
272        I: IntoIterator<Item = VirtualMemoryMap>,
273    {
274        for map in maps {
275            if let Err(e) = self.add_memory_map(map) {
276                return Err(e);
277            }
278        }
279        Ok(())
280    }
281
282    /// Searches for a memory map containing the given virtual address.
283    /// Implements caching for efficient range search in memory mappings.
284    ///
285    /// # Arguments
286    /// * `vaddr` - The virtual address to search for
287    ///
288    /// # Returns
289    /// The memory map containing the given virtual address, if it exists.
290    pub fn search_memory_map(&self, vaddr: usize) -> Option<VirtualMemoryMap> {
291        let mut g = self.inner.write();
292        if let Some((cache_start, cache_end, cache_key)) = g.last_search_cache {
293            if cache_start <= vaddr && vaddr <= cache_end {
294                return g.memmap.get(&cache_key).cloned();
295            }
296        }
297        if let Some((_k, map)) = g.memmap.range(..=vaddr).next_back() {
298            if vaddr <= map.vmarea.end {
299                let start = map.vmarea.start;
300                let end = map.vmarea.end;
301                let out = map.clone();
302                g.last_search_cache = Some((start, end, start));
303                return Some(out);
304            }
305        }
306        None
307    }
308
309    /// Efficient memory map search using BTreeMap's ordered nature
310    ///
311    /// This method uses the ordered property of BTreeMap to efficiently find
312    /// the memory mapping containing the given address.
313    ///
314    /// # Arguments
315    /// * `vaddr` - Virtual address to search for
316    ///
317    /// # Returns
318    /// The memory map containing the address, if found
319    // Removed: replaced by search_memory_map with caching in write lock
320
321    /// Searches for a memory map containing the given virtual address (mutable version).
322    ///
323    /// This version allows mutable access and updates the search cache.
324    ///
325    /// # Arguments
326    /// * `vaddr` - The virtual address to search for
327    ///
328    /// # Returns
329    /// Mutable reference to the memory map containing the given virtual address, if it exists.
330    // Removed mutable accessor; use fixed operations instead
331
332    /// Helper method that finds memory map and updates cache
333    ///
334    /// # Arguments
335    /// * `vaddr` - Virtual address to search for
336    ///
337    /// # Returns
338    /// The start address key of the found memory map, if any
339    // Helper moved out to work with inner lock directly
340
341    /// Adds a page table to the virtual memory manager.
342    pub fn add_page_table(&self, page_table: Arc<PageTable>) {
343        self.inner.write().page_tables.push(page_table);
344    }
345
346    /// Returns the root page table for the current address space.
347    ///
348    /// # Returns
349    /// The root page table for the current address space, if it exists.
350    pub fn get_root_page_table(&self) -> Option<&mut PageTable> {
351        get_root_pagetable(self.get_asid())
352    }
353
354    /// Lazy map a virtual address to MMU on demand (called from page fault handler)
355    ///
356    /// This method finds the memory mapping for the given virtual address and
357    /// maps only the specific page to the MMU on demand.
358    ///
359    /// # Arguments
360    /// * `vaddr` - The virtual address that caused the page fault
361    ///
362    /// # Returns
363    /// * `Ok(())` - Successfully mapped the page
364    /// * `Err(&'static str)` - Failed to map (no mapping found or MMU error)
365    pub fn lazy_map_page(&self, vaddr: usize) -> Result<(), &'static str> {
366        // Backward-compat shim: default to Load with unknown size
367        let access = crate::object::capability::memory_mapping::AccessKind {
368            op: crate::object::capability::memory_mapping::AccessOp::Load,
369            vaddr,
370            size: None,
371        };
372        self.lazy_map_page_with(access)
373    }
374
375    /// Lazy map with access context (instruction/load/store and optional size)
376    pub fn lazy_map_page_with(
377        &self,
378        access: crate::object::capability::memory_mapping::AccessKind,
379    ) -> Result<(), &'static str> {
380        let vaddr = access.vaddr;
381        // Find the memory mapping for this virtual address
382        let memory_map = match self.search_memory_map(vaddr) {
383            Some(map) => map,
384            None => {
385                // Try to find a mapping that contains an address just before this one
386                // which might have an owner that supports dynamic extension
387                return self.try_extend_mapping_for_access(&access);
388            }
389        };
390
391        // Calculate the page-aligned virtual and physical addresses
392        let page_vaddr = vaddr & !(PAGE_SIZE - 1);
393        let offset_in_mapping = page_vaddr - memory_map.vmarea.start;
394        let mut page_paddr = memory_map.pmarea.start + offset_in_mapping;
395        let mut perms = memory_map.permissions;
396
397        // If there is an owner, allow it to adjust mapping and tell if this is a tail page
398        if let Some(owner_weak) = &memory_map.owner {
399            if let Some(owner) = owner_weak.upgrade() {
400                let owner_name = owner.mmap_owner_name();
401                let _should_log = owner_name.contains("xkb");
402                match owner.resolve_fault(&access, &memory_map) {
403                    Ok(res) => {
404                        page_paddr = res.paddr_page_base;
405                        if res.is_tail {
406                            // Drop Read and Write for tail page
407                            perms &= !0x1; // Read
408                            perms &= !0x2; // Write
409                        }
410                    }
411                    Err(_e) => {
412                        return Err("Owner failed to resolve fault");
413                    }
414                }
415            }
416        }
417
418        // Map this single page to the MMU (not device memory)
419        if let Some(root_pagetable) = self.get_root_page_table() {
420            root_pagetable.map(
421                self.get_asid(),
422                page_vaddr,
423                page_paddr,
424                perms,
425                true,
426                access.op == AccessOp::Store,
427            );
428            Ok(())
429        } else {
430            Err("No root page table available")
431        }
432    }
433
434    /// Try to extend a mapping for an access that falls outside the current vmarea
435    ///
436    /// This handles the case where a SharedMemory has been resized via ftruncate
437    /// but the VirtualMemoryMap.vmarea.end hasn't been updated.
438    fn try_extend_mapping_for_access(
439        &self,
440        access: &crate::object::capability::memory_mapping::AccessKind,
441    ) -> Result<(), &'static str> {
442        let vaddr = access.vaddr;
443        let page_vaddr = vaddr & !(PAGE_SIZE - 1);
444
445        // Result of successful extend: (paddr_page_base, permissions)
446        let extend_result: Option<(usize, usize)>;
447
448        {
449            // Lock scope
450            let mut g = self.inner.write();
451
452            // Find a mapping whose vmarea.end < vaddr but might have an owner that has grown
453            let mut found = None;
454            for (_, map) in g.memmap.iter_mut() {
455                // Check if vaddr is just past this mapping's end
456                if map.vmarea.end < vaddr {
457                    // Check if there's an owner that might support extended access
458                    if let Some(owner_weak) = &map.owner {
459                        if let Some(owner) = owner_weak.upgrade() {
460                            // Try resolve_fault to see if owner supports this offset
461                            let test_access =
462                                crate::object::capability::memory_mapping::AccessKind {
463                                    vaddr: page_vaddr,
464                                    op: access.op,
465                                    size: access.size,
466                                };
467
468                            match owner.resolve_fault(&test_access, map) {
469                                Ok(res) => {
470                                    // Owner says this offset is valid - extend vmarea.end
471                                    let new_end = page_vaddr + PAGE_SIZE - 1;
472                                    crate::println!(
473                                        "[VmManager] Extending mapping vmarea.end from {:#x} to {:#x} for owner={}",
474                                        map.vmarea.end,
475                                        new_end,
476                                        owner.mmap_owner_name()
477                                    );
478                                    map.vmarea.end = new_end;
479
480                                    // Also extend pmarea proportionally
481                                    let pmarea_growth = new_end
482                                        - map.vmarea.start
483                                        - (map.pmarea.end - map.pmarea.start);
484                                    map.pmarea.end += pmarea_growth;
485
486                                    found = Some((res.paddr_page_base, map.permissions));
487                                    break;
488                                }
489                                Err(_) => {
490                                    // Owner doesn't support this offset, continue searching
491                                }
492                            }
493                        }
494                    }
495                }
496            }
497            extend_result = found;
498        } // Lock released here
499
500        // Now map the page outside the lock
501        if let Some((paddr_page_base, permissions)) = extend_result {
502            if let Some(root_pagetable) = self.get_root_page_table() {
503                root_pagetable.map(
504                    self.get_asid(),
505                    page_vaddr,
506                    paddr_page_base,
507                    permissions,
508                    true,
509                    access.op == AccessOp::Store,
510                );
511                return Ok(());
512            } else {
513                return Err("No root page table available");
514            }
515        }
516
517        Err("No extendable memory mapping found for virtual address")
518    }
519
520    /// Unmap a virtual address range from MMU
521    ///
522    /// This method unmaps the specified virtual address range from the MMU.
523    /// Used when memory mappings are removed.
524    ///
525    /// # Arguments
526    /// * `vaddr_start` - Start of virtual address range
527    /// * `vaddr_end` - End of virtual address range (inclusive)
528    pub fn unmap_range_from_mmu(&self, vaddr_start: usize, vaddr_end: usize) {
529        if let Some(root_pagetable) = self.get_root_page_table() {
530            let num_pages = (vaddr_end - vaddr_start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
531
532            for i in 0..num_pages {
533                let page_vaddr = (vaddr_start & !(PAGE_SIZE - 1)) + i * PAGE_SIZE;
534                if page_vaddr <= vaddr_end {
535                    root_pagetable.unmap(self.get_asid(), page_vaddr);
536                }
537            }
538        }
539    }
540
541    /// Translate a virtual address to physical address
542    ///
543    /// This method uses efficient search with caching for optimal performance.
544    ///
545    /// # Arguments
546    ///
547    /// * `vaddr` - The virtual address to translate
548    ///
549    /// # Returns
550    ///
551    /// The translated physical address. Returns None if no mapping exists for the address
552    pub fn translate_vaddr(&self, vaddr: usize) -> Option<usize> {
553        if let Some(map) = self.search_memory_map(vaddr) {
554            // Calculate offset within the memory area
555            let offset = vaddr - map.vmarea.start;
556            // Calculate and return physical address
557            Some(map.pmarea.start + offset)
558        } else {
559            None
560        }
561    }
562
563    /// Gets the mmap base address
564    ///
565    /// # Returns
566    /// The base address for mmap operations
567    pub fn get_mmap_base(&self) -> usize {
568        self.inner.read().mmap_base
569    }
570
571    /// Sets the mmap base address
572    /// This allows dynamic adjustment of the mmap region
573    ///
574    /// # Arguments
575    /// * `base` - New base address for mmap operations
576    pub fn set_mmap_base(&self, base: usize) {
577        self.inner.write().mmap_base = base;
578    }
579
580    /// Find a suitable address for new memory mapping
581    ///
582    /// # Arguments
583    /// * `size` - Size of the mapping needed
584    /// * `alignment` - Required alignment (typically PAGE_SIZE)
585    ///
586    /// # Returns
587    /// A suitable virtual address for the new mapping, or None if no space available
588    pub fn find_unmapped_area(&self, size: usize, alignment: usize) -> Option<usize> {
589        let aligned_size = (size + alignment - 1) & !(alignment - 1);
590        let g = self.inner.read();
591        let mut search_addr = (g.mmap_base + alignment - 1) & !(alignment - 1);
592
593        // If there is a mapping that starts before (or at) search_addr but still covers it,
594        // we must skip past it. This prevents returning an address inside an existing map.
595        if let Some((_, prev_map)) = g.memmap.range(..=search_addr).next_back() {
596            if prev_map.vmarea.end >= search_addr {
597                search_addr = prev_map.vmarea.end + 1;
598                search_addr = (search_addr + alignment - 1) & !(alignment - 1);
599            }
600        }
601
602        // Simple first-fit algorithm from the adjusted search address
603        for (_start, memory_map) in g.memmap.range(search_addr..) {
604            // Check if there's enough space before this memory map
605            if search_addr + aligned_size <= memory_map.vmarea.start {
606                return Some(search_addr);
607            }
608
609            // Move search point past this memory map
610            if memory_map.vmarea.end >= search_addr {
611                search_addr = memory_map.vmarea.end + 1;
612                search_addr = (search_addr + alignment - 1) & !(alignment - 1);
613            }
614        }
615        drop(g);
616        // Check if there's space after the last memory map
617        // For simplicity, we assume a reasonable upper limit for the address space
618        const MAX_USER_ADDR: usize = 0x80000000; // 2GB limit for user space
619        if search_addr + aligned_size <= MAX_USER_ADDR {
620            Some(search_addr)
621        } else {
622            None
623        }
624    }
625
626    /// Add a memory map at a fixed address, handling overlapping mappings by splitting them
627    ///
628    /// This method is designed for FIXED memory mappings where the caller wants to map
629    /// at a specific virtual address, potentially overwriting existing mappings.
630    /// Any existing mappings that overlap with the new mapping will be properly split
631    /// or removed to make room for the new mapping.
632    ///
633    /// # Arguments
634    /// * `map` - The memory map to add at a fixed location
635    ///
636    /// # Returns
637    /// * `Ok(Vec<VirtualMemoryMap>)` - Returns a vector of overwritten (intersected) memory regions that were replaced by the new mapping.
638    /// * `Err(&'static str)` - Error message if the operation failed
639    ///
640    /// # Design
641    /// For each existing mapping that overlaps with the new mapping:
642    /// - The function calculates the intersection (overwritten region) between the new mapping and each overlapping existing mapping.
643    /// - Only the intersection (overwritten part) is returned for each overlap.
644    /// - If the new mapping completely contains the existing mapping, the entire existing mapping is returned as the intersection.
645    /// - If the new mapping partially overlaps, only the overlapped region is returned.
646    /// - Non-overlapping parts of existing mappings are preserved (split and kept).
647    ///
648    /// The caller is responsible for handling any managed pages associated with the overwritten mappings.
649    pub fn add_memory_map_fixed(
650        &self,
651        map: VirtualMemoryMap,
652    ) -> Result<Vec<VirtualMemoryMap>, &'static str> {
653        // Validate alignment like the regular add_memory_map
654        if map.vmarea.start % PAGE_SIZE != 0
655            || map.pmarea.start % PAGE_SIZE != 0
656            || map.vmarea.size() % PAGE_SIZE != 0
657            || map.pmarea.size() % PAGE_SIZE != 0
658        {
659            return Err("Address or size is not aligned to PAGE_SIZE");
660        }
661
662        let new_start = map.vmarea.start;
663        let new_end = map.vmarea.end;
664        let mut overwritten_mappings = Vec::new();
665        let mut mappings_to_add = Vec::new();
666
667        let mut g = self.inner.write();
668        let overlapping_keys: alloc::vec::Vec<usize> = g
669            .memmap
670            .range(..)
671            .filter_map(|(start_addr, existing_map)| {
672                let existing_start = existing_map.vmarea.start;
673                let existing_end = existing_map.vmarea.end;
674                if new_start <= existing_end && new_end >= existing_start {
675                    Some(*start_addr)
676                } else {
677                    None
678                }
679            })
680            .collect();
681
682        for key in overlapping_keys {
683            if let Some(existing_map) = g.memmap.remove(&key) {
684                let existing_start = existing_map.vmarea.start;
685                let existing_end = existing_map.vmarea.end;
686
687                // Calculate the overwritten (intersection) part
688                let overlap_start = core::cmp::max(new_start, existing_start);
689                let overlap_end = core::cmp::min(new_end, existing_end);
690                if overlap_start <= overlap_end {
691                    // Cut out the pmarea at the same offset as the intersection
692                    let pm_offset = overlap_start - existing_start;
693                    let overwritten_map = VirtualMemoryMap {
694                        vmarea: MemoryArea {
695                            start: overlap_start,
696                            end: overlap_end,
697                        },
698                        pmarea: MemoryArea {
699                            start: existing_map.pmarea.start + pm_offset,
700                            end: existing_map.pmarea.start
701                                + pm_offset
702                                + (overlap_end - overlap_start),
703                        },
704                        permissions: existing_map.permissions,
705                        is_shared: existing_map.is_shared,
706                        owner: existing_map.owner.clone(),
707                    };
708                    overwritten_mappings.push(overwritten_map);
709                }
710
711                // Case 1: New mapping completely contains the existing mapping
712                if new_start <= existing_start && new_end >= existing_end {
713                    // Remove entire existing mapping
714                    continue;
715                }
716
717                // Case 2: Partial overlap - need to split
718                // Keep the part before the new mapping (if any)
719                if existing_start < new_start {
720                    let before_map = VirtualMemoryMap {
721                        vmarea: MemoryArea {
722                            start: existing_start,
723                            end: new_start - 1,
724                        },
725                        pmarea: MemoryArea {
726                            start: existing_map.pmarea.start,
727                            end: existing_map.pmarea.start + (new_start - existing_start) - 1,
728                        },
729                        permissions: existing_map.permissions,
730                        is_shared: existing_map.is_shared,
731                        owner: existing_map.owner.clone(),
732                    };
733                    mappings_to_add.push(before_map);
734                }
735
736                // Keep the part after the new mapping (if any)
737                if existing_end > new_end {
738                    let after_offset = (new_end + 1) - existing_start;
739                    let after_map = VirtualMemoryMap {
740                        vmarea: MemoryArea {
741                            start: new_end + 1,
742                            end: existing_end,
743                        },
744                        pmarea: MemoryArea {
745                            start: existing_map.pmarea.start + after_offset,
746                            end: existing_map.pmarea.end,
747                        },
748                        permissions: existing_map.permissions,
749                        is_shared: existing_map.is_shared,
750                        owner: existing_map.owner.clone(),
751                    };
752                    mappings_to_add.push(after_map);
753                }
754            }
755        }
756
757        // Clear cache since we've modified the memory layout
758        g.last_search_cache = None;
759
760        // Remove overlapping mappings from MMU (page table) after releasing lock
761        let split_vec = mappings_to_add.clone();
762        for split_map in split_vec {
763            g.memmap.insert(split_map.vmarea.start, split_map);
764        }
765        g.memmap.insert(map.vmarea.start, map);
766        drop(g);
767        for overwritten_map in &overwritten_mappings {
768            self.unmap_range_from_mmu(overwritten_map.vmarea.start, overwritten_map.vmarea.end);
769        }
770
771        Ok(overwritten_mappings)
772    }
773
774    /// Get memory statistics and usage information
775    /// This provides detailed information about memory usage patterns
776    ///
777    /// # Returns
778    /// A tuple containing (total_maps, total_virtual_size, fragmentation_info)
779    pub fn get_memory_stats(&self) -> (usize, usize, usize) {
780        let g = self.inner.read();
781        let total_maps = g.memmap.len();
782        let total_virtual_size: usize = g
783            .memmap
784            .values()
785            .map(|memory_map| memory_map.vmarea.end - memory_map.vmarea.start + 1)
786            .sum();
787
788        // Calculate fragmentation by finding gaps between memory maps
789        let mut gaps = 0;
790        let mut prev_end = None;
791
792        for memory_map in g.memmap.values() {
793            if let Some(prev) = prev_end {
794                if memory_map.vmarea.start > prev + 1 {
795                    gaps += 1;
796                }
797            }
798            prev_end = Some(memory_map.vmarea.end);
799        }
800
801        (total_maps, total_virtual_size, gaps)
802    }
803
804    /// Perform memory map coalescing optimization
805    /// This attempts to merge adjacent memory maps with compatible properties
806    ///
807    /// # Returns
808    /// Number of memory maps that were successfully coalesced
809    pub fn coalesce_memory_maps(&self) -> usize {
810        let mut coalesced_count = 0;
811        let mut to_remove = Vec::new();
812        let mut to_add = Vec::new();
813        let mut prev_start: Option<usize> = None;
814        let mut prev_map: Option<VirtualMemoryMap> = None;
815        let mut g = self.inner.write();
816        for (&start, memory_map) in &g.memmap {
817            if let (Some(prev_s), Some(prev_memory_map)) = (prev_start, &prev_map) {
818                // Check if memory maps are adjacent and can be merged
819                if prev_memory_map.vmarea.end + 1 == memory_map.vmarea.start
820                    && Self::can_merge_memory_maps(prev_memory_map, memory_map)
821                {
822                    // Create merged memory map
823                    let merged_map = VirtualMemoryMap {
824                        vmarea: super::vmem::MemoryArea {
825                            start: prev_memory_map.vmarea.start,
826                            end: memory_map.vmarea.end,
827                        },
828                        pmarea: super::vmem::MemoryArea {
829                            start: prev_memory_map.pmarea.start,
830                            end: memory_map.pmarea.end,
831                        },
832                        permissions: prev_memory_map.permissions, // Use permissions from first map
833                        is_shared: prev_memory_map.is_shared,
834                        owner: prev_memory_map.owner.clone(),
835                    };
836
837                    // Mark old memory maps for removal and add merged map
838                    to_remove.push(prev_s);
839                    to_remove.push(start);
840                    to_add.push(merged_map);
841                    coalesced_count += 1;
842
843                    // Skip setting prev for next iteration since we merged
844                    prev_start = None;
845                    prev_map = None;
846                    continue;
847                }
848            }
849
850            prev_start = Some(start);
851            prev_map = Some(memory_map.clone());
852        }
853
854        // Apply changes
855        for start in to_remove {
856            g.memmap.remove(&start);
857        }
858        for memory_map in to_add {
859            g.memmap.insert(memory_map.vmarea.start, memory_map);
860        }
861
862        // Clear cache after coalescing
863        if coalesced_count > 0 {
864            g.last_search_cache = None;
865        }
866
867        coalesced_count
868    }
869
870    /// Check if two memory maps can be merged
871    ///
872    /// # Arguments
873    /// * `map1` - First memory map
874    /// * `map2` - Second memory map
875    ///
876    /// # Returns
877    /// true if memory maps can be safely merged
878    fn can_merge_memory_maps(map1: &VirtualMemoryMap, map2: &VirtualMemoryMap) -> bool {
879        // Memory maps can be merged if:
880        // 1. They have the same permissions
881        // 2. They have the same sharing status
882        // 3. Physical addresses are also contiguous
883        map1.permissions == map2.permissions
884            && map1.is_shared == map2.is_shared
885            && map1.pmarea.end + 1 == map2.pmarea.start
886    }
887}
888
889impl Drop for VirtualMemoryManager {
890    /// Drops the virtual memory manager, freeing the address space if it is still in use.
891    fn drop(&mut self) {
892        let asid = self.get_asid();
893        if asid != 0 && is_asid_used(asid) {
894            free_virtual_address_space(asid);
895        }
896    }
897}
898
899fn find_memory_map_key_with_cache_update(inner: &mut InnerVmm, vaddr: usize) -> Option<usize> {
900    if let Some((cache_start, cache_end, cache_key)) = inner.last_search_cache {
901        if cache_start <= vaddr && vaddr <= cache_end {
902            return Some(cache_key);
903        }
904    }
905    if let Some((start_addr, map)) = inner.memmap.range(..=vaddr).next_back() {
906        if map.vmarea.start <= vaddr && vaddr <= map.vmarea.end {
907            inner.last_search_cache = Some((map.vmarea.start, map.vmarea.end, *start_addr));
908            return Some(*start_addr);
909        }
910    }
911    None
912}
913
914#[cfg(test)]
915mod tests {
916    use crate::arch::vm::alloc_virtual_address_space;
917    use crate::environment::PAGE_SIZE;
918    use crate::vm::VirtualMemoryMap;
919    use crate::vm::{manager::VirtualMemoryManager, vmem::MemoryArea};
920
921    #[test_case]
922    fn test_new_virtual_memory_manager() {
923        let vmm = VirtualMemoryManager::new();
924        assert_eq!(vmm.get_asid(), 0);
925    }
926
927    #[test_case]
928    fn test_set_and_get_asid() {
929        let vmm = VirtualMemoryManager::new();
930        vmm.set_asid(42);
931        assert_eq!(vmm.get_asid(), 42);
932    }
933
934    #[test_case]
935    fn test_add_and_get_memory_map() {
936        let vmm = VirtualMemoryManager::new();
937        let vma = MemoryArea {
938            start: 0x1000,
939            end: 0x1fff,
940        };
941        let map = VirtualMemoryMap {
942            vmarea: vma,
943            pmarea: vma,
944            permissions: 0,
945            is_shared: false,
946            owner: None,
947        };
948        vmm.add_memory_map(map).unwrap();
949
950        // Use non-cloning with_memmaps API for performance
951        assert_eq!(vmm.memmap_len(), 1);
952        let first_map_start = vmm.with_memmaps(|m| m.values().next().unwrap().vmarea.start);
953        assert_eq!(first_map_start, 0x1000);
954
955        // Test direct address-based access
956        assert!(vmm.get_memory_map_by_addr(0x1000).is_some());
957        assert_eq!(
958            vmm.get_memory_map_by_addr(0x1000).unwrap().vmarea.start,
959            0x1000
960        );
961    }
962
963    #[test_case]
964    fn test_remove_memory_map() {
965        let vmm = VirtualMemoryManager::new();
966        let vma = MemoryArea {
967            start: 0x1000,
968            end: 0x1fff,
969        };
970        let map = VirtualMemoryMap {
971            vmarea: vma,
972            pmarea: vma,
973            permissions: 0,
974            is_shared: false,
975            owner: None,
976        };
977        vmm.add_memory_map(map).unwrap();
978
979        // Use address-based removal instead of index-based
980        let removed_map = vmm.remove_memory_map_by_addr(0x1000).unwrap();
981        assert_eq!(removed_map.vmarea.start, 0x1000);
982
983        // Verify removal using efficient API
984        assert!(vmm.memmap_is_empty());
985        assert_eq!(vmm.memmap_len(), 0);
986        assert!(vmm.get_memory_map_by_addr(0x1000).is_none());
987    }
988
989    #[test_case]
990    fn test_search_memory_map() {
991        let vmm = VirtualMemoryManager::new();
992        let vma1 = MemoryArea {
993            start: 0x1000,
994            end: 0x1fff,
995        };
996        let map1 = VirtualMemoryMap {
997            vmarea: vma1,
998            pmarea: vma1,
999            permissions: 0,
1000            is_shared: false,
1001            owner: None,
1002        };
1003        let vma2 = MemoryArea {
1004            start: 0x3000,
1005            end: 0x3fff,
1006        };
1007        let map2 = VirtualMemoryMap {
1008            vmarea: vma2,
1009            pmarea: vma2,
1010            permissions: 0,
1011            is_shared: false,
1012            owner: None,
1013        };
1014        vmm.add_memory_map(map1).unwrap();
1015        vmm.add_memory_map(map2).unwrap();
1016        let found_map = vmm.search_memory_map(0x3500).unwrap();
1017        assert_eq!(found_map.vmarea.start, 0x3000);
1018    }
1019
1020    #[test_case]
1021    fn test_get_root_page_table() {
1022        let vmm = VirtualMemoryManager::new();
1023        let asid = alloc_virtual_address_space();
1024        vmm.set_asid(asid);
1025        let page_table = vmm.get_root_page_table();
1026        assert!(page_table.is_some());
1027    }
1028
1029    #[test_case]
1030    fn test_memory_optimization_features() {
1031        use crate::environment::PAGE_SIZE;
1032
1033        // Test memory optimization features
1034        let manager = VirtualMemoryManager::new();
1035
1036        // Test mmap_base functionality
1037        assert_eq!(manager.get_mmap_base(), 0x40000000);
1038        manager.set_mmap_base(0x50000000);
1039        assert_eq!(manager.get_mmap_base(), 0x50000000);
1040
1041        // Test find_unmapped_area
1042        let alignment = PAGE_SIZE;
1043        let size = PAGE_SIZE;
1044
1045        // Should find space at mmap_base when empty
1046        let addr = manager.find_unmapped_area(size, alignment);
1047        assert!(addr.is_some());
1048        assert_eq!(addr.unwrap(), 0x50000000);
1049
1050        // Add some memory maps to test collision avoidance
1051        let map1 = VirtualMemoryMap::new(
1052            crate::vm::vmem::MemoryArea {
1053                start: 0x80000000,
1054                end: 0x80000fff,
1055            }, // pmarea
1056            crate::vm::vmem::MemoryArea {
1057                start: 0x50000000,
1058                end: 0x50000fff,
1059            }, // vmarea
1060            0o644,
1061            false,
1062            None,
1063        );
1064        manager.add_memory_map(map1).unwrap();
1065
1066        // Should find space after the first mapping
1067        let addr2 = manager.find_unmapped_area(size, alignment);
1068        assert!(addr2.is_some());
1069        assert!(addr2.unwrap() > 0x50000fff);
1070
1071        // Regression: if a mapping starts before mmap_base but overlaps it,
1072        // find_unmapped_area must not return an address inside that mapping.
1073        manager.set_mmap_base(0x60000000);
1074        let overlapping_base_map = VirtualMemoryMap::new(
1075            crate::vm::vmem::MemoryArea {
1076                start: 0x90000000,
1077                end: 0x9001ffff,
1078            },
1079            crate::vm::vmem::MemoryArea {
1080                start: 0x5fff0000,
1081                end: 0x6000ffff,
1082            },
1083            0o644,
1084            false,
1085            None,
1086        );
1087        manager.add_memory_map(overlapping_base_map).unwrap();
1088        let addr3 = manager.find_unmapped_area(size, alignment).unwrap();
1089        assert!(addr3 >= 0x60010000);
1090
1091        // Test memory statistics
1092        let (total_maps, total_size, gaps) = manager.get_memory_stats();
1093        assert_eq!(total_maps, 2);
1094        // map1: 1 page (0x50000000-0x50000fff)
1095        // overlapping_base_map: 32 pages (0x5fff0000-0x6000ffff)
1096        // Total: 33 pages
1097        assert_eq!(total_size, PAGE_SIZE * 33);
1098        // These two maps are not adjacent, so there is 1 gap between them
1099        assert_eq!(gaps, 1);
1100
1101        // Add another non-adjacent map to create another gap
1102        let map2 = VirtualMemoryMap::new(
1103            crate::vm::vmem::MemoryArea {
1104                start: 0x80002000,
1105                end: 0x80002fff,
1106            }, // pmarea
1107            crate::vm::vmem::MemoryArea {
1108                start: 0x50002000,
1109                end: 0x50002fff,
1110            }, // vmarea
1111            0o644,
1112            false,
1113            None,
1114        );
1115        manager.add_memory_map(map2).unwrap();
1116
1117        let (total_maps, total_size, gaps) = manager.get_memory_stats();
1118        assert_eq!(total_maps, 3);
1119        // map1: 1 page, map2: 1 page, overlapping_base_map: 32 pages = 34 pages total
1120        assert_eq!(total_size, PAGE_SIZE * 34);
1121        // Gaps: between map1 and map2 (1), between map2 and overlapping_base_map (2)
1122        assert_eq!(gaps, 2);
1123
1124        // Test memory map coalescing (should fail due to non-adjacent physical addresses)
1125        let coalesced = manager.coalesce_memory_maps();
1126        assert_eq!(coalesced, 0); // No coalescing possible due to gap
1127    }
1128
1129    #[test_case]
1130    fn test_memory_map_coalescing() {
1131        use crate::environment::PAGE_SIZE;
1132
1133        // Test memory map coalescing with adjacent compatible maps
1134        let manager = VirtualMemoryManager::new();
1135
1136        // Add two adjacent memory maps that can be merged
1137        let map1 = VirtualMemoryMap::new(
1138            crate::vm::vmem::MemoryArea {
1139                start: 0x80000000,
1140                end: 0x80000fff,
1141            },
1142            crate::vm::vmem::MemoryArea {
1143                start: 0x10000000,
1144                end: 0x10000fff,
1145            },
1146            0o644,
1147            false,
1148            None,
1149        );
1150        let map2 = VirtualMemoryMap::new(
1151            crate::vm::vmem::MemoryArea {
1152                start: 0x80001000,
1153                end: 0x80001fff,
1154            },
1155            crate::vm::vmem::MemoryArea {
1156                start: 0x10001000,
1157                end: 0x10001fff,
1158            },
1159            0o644, // Same permissions
1160            false, // Same sharing status
1161            None,
1162        );
1163
1164        manager.add_memory_map(map1).unwrap();
1165        manager.add_memory_map(map2).unwrap();
1166
1167        // Before coalescing
1168        let (total_maps_before, _, _) = manager.get_memory_stats();
1169        assert_eq!(total_maps_before, 2);
1170
1171        // Perform coalescing
1172        let coalesced = manager.coalesce_memory_maps();
1173        assert_eq!(coalesced, 1); // Should merge one pair
1174
1175        // After coalescing
1176        let (total_maps_after, total_size, gaps) = manager.get_memory_stats();
1177        assert_eq!(total_maps_after, 1); // Should be merged into single map
1178        assert_eq!(total_size, PAGE_SIZE * 2); // Total size should remain same
1179        assert_eq!(gaps, 0); // No gaps after merging
1180
1181        // Verify the merged map covers the entire range
1182        let merged_map = manager.search_memory_map(0x10000000).unwrap();
1183        assert_eq!(merged_map.vmarea.start, 0x10000000);
1184        assert_eq!(merged_map.vmarea.end, 0x10001fff);
1185    }
1186
1187    #[test_case]
1188    fn test_complex_overlap_detection() {
1189        let manager = VirtualMemoryManager::new();
1190
1191        // Set up existing memory maps for comprehensive overlap testing
1192        // Map 1: [0x1000, 0x2000)
1193        let map1 = VirtualMemoryMap::new(
1194            crate::vm::vmem::MemoryArea {
1195                start: 0x10000000,
1196                end: 0x10000fff,
1197            }, // pmarea
1198            crate::vm::vmem::MemoryArea {
1199                start: 0x1000,
1200                end: 0x1fff,
1201            }, // vmarea
1202            0o644,
1203            false,
1204            None,
1205        );
1206        manager.add_memory_map(map1).unwrap();
1207
1208        // Map 2: [0x4000, 0x5000)
1209        let map2 = VirtualMemoryMap::new(
1210            crate::vm::vmem::MemoryArea {
1211                start: 0x20000000,
1212                end: 0x20000fff,
1213            }, // pmarea
1214            crate::vm::vmem::MemoryArea {
1215                start: 0x4000,
1216                end: 0x4fff,
1217            }, // vmarea
1218            0o644,
1219            false,
1220            None,
1221        );
1222        manager.add_memory_map(map2).unwrap();
1223
1224        // Map 3: [0x7000, 0x8000)
1225        let map3 = VirtualMemoryMap::new(
1226            crate::vm::vmem::MemoryArea {
1227                start: 0x30000000,
1228                end: 0x30000fff,
1229            }, // pmarea
1230            crate::vm::vmem::MemoryArea {
1231                start: 0x7000,
1232                end: 0x7fff,
1233            }, // vmarea
1234            0o644,
1235            false,
1236            None,
1237        );
1238        manager.add_memory_map(map3).unwrap();
1239
1240        // Test Case 1: Overlap with previous map (end boundary)
1241        // Try to add [0x1800, 0x2800) - overlaps with map1's end
1242        let overlap_with_prev = VirtualMemoryMap::new(
1243            crate::vm::vmem::MemoryArea {
1244                start: 0x40000000,
1245                end: 0x40000fff,
1246            }, // pmarea
1247            crate::vm::vmem::MemoryArea {
1248                start: 0x1800,
1249                end: 0x27ff,
1250            }, // vmarea
1251            0o644,
1252            false,
1253            None,
1254        );
1255        assert!(manager.add_memory_map(overlap_with_prev).is_err());
1256
1257        // Test Case 2: Overlap with next map (start boundary)
1258        // Try to add [0x3800, 0x4800) - overlaps with map2's start
1259        let overlap_with_next = VirtualMemoryMap::new(
1260            crate::vm::vmem::MemoryArea {
1261                start: 0x50000000,
1262                end: 0x50000fff,
1263            }, // pmarea
1264            crate::vm::vmem::MemoryArea {
1265                start: 0x3800,
1266                end: 0x47ff,
1267            }, // vmarea
1268            0o644,
1269            false,
1270            None,
1271        );
1272        assert!(manager.add_memory_map(overlap_with_next).is_err());
1273
1274        // Test Case 3: Complete containment by existing map
1275        // Try to add [0x1200, 0x1800) - completely inside map1
1276        let contained_map = VirtualMemoryMap::new(
1277            crate::vm::vmem::MemoryArea {
1278                start: 0x60000000,
1279                end: 0x600005ff,
1280            }, // pmarea
1281            crate::vm::vmem::MemoryArea {
1282                start: 0x1200,
1283                end: 0x17ff,
1284            }, // vmarea
1285            0o644,
1286            false,
1287            None,
1288        );
1289        assert!(manager.add_memory_map(contained_map).is_err());
1290
1291        // Test Case 4: Containing an existing map
1292        // Try to add [0x800, 0x2800) - contains map1 completely
1293        let containing_map = VirtualMemoryMap::new(
1294            crate::vm::vmem::MemoryArea {
1295                start: 0x70000000,
1296                end: 0x70001fff,
1297            }, // pmarea
1298            crate::vm::vmem::MemoryArea {
1299                start: 0x800,
1300                end: 0x27ff,
1301            }, // vmarea
1302            0o644,
1303            false,
1304            None,
1305        );
1306        assert!(manager.add_memory_map(containing_map).is_err());
1307
1308        // Test Case 5: Exact boundary collision (touching exactly)
1309        // Try to add [0x2000, 0x3000) - starts exactly where map1 ends
1310        let exact_boundary = VirtualMemoryMap::new(
1311            crate::vm::vmem::MemoryArea {
1312                start: 0x80000000,
1313                end: 0x80000fff,
1314            }, // pmarea
1315            crate::vm::vmem::MemoryArea {
1316                start: 0x2000,
1317                end: 0x2fff,
1318            }, // vmarea
1319            0o644,
1320            false,
1321            None,
1322        );
1323        assert!(manager.add_memory_map(exact_boundary).is_ok()); // Should succeed (touching but not overlapping)
1324
1325        // Test Case 6: Valid gap insertion
1326        // Add [0x5000, 0x6000) - fits perfectly between map2 and map3
1327        let gap_insertion = VirtualMemoryMap::new(
1328            crate::vm::vmem::MemoryArea {
1329                start: 0x90000000,
1330                end: 0x90000fff,
1331            }, // pmarea
1332            crate::vm::vmem::MemoryArea {
1333                start: 0x5000,
1334                end: 0x5fff,
1335            }, // vmarea
1336            0o644,
1337            false,
1338            None,
1339        );
1340        assert!(manager.add_memory_map(gap_insertion).is_ok());
1341
1342        // Test Case 7: Edge case - inserting at the very beginning
1343        // Add [0x0, 0x1000) - before all existing maps
1344        let beginning_map = VirtualMemoryMap::new(
1345            crate::vm::vmem::MemoryArea {
1346                start: 0xa0000000,
1347                end: 0xa0000fff,
1348            }, // pmarea
1349            crate::vm::vmem::MemoryArea {
1350                start: 0x0,
1351                end: 0xfff,
1352            }, // vmarea
1353            0o644,
1354            false,
1355            None,
1356        );
1357        assert!(manager.add_memory_map(beginning_map).is_ok());
1358
1359        // Test Case 8: Edge case - inserting at the very end
1360        // Add [0x8000, 0x9000) - after all existing maps
1361        let end_map = VirtualMemoryMap::new(
1362            crate::vm::vmem::MemoryArea {
1363                start: 0xb0000000,
1364                end: 0xb0000fff,
1365            }, // pmarea
1366            crate::vm::vmem::MemoryArea {
1367                start: 0x8000,
1368                end: 0x8fff,
1369            }, // vmarea
1370            0o644,
1371            false,
1372            None,
1373        );
1374        assert!(manager.add_memory_map(end_map).is_ok());
1375
1376        // Verify final state: should have 7 maps total
1377        assert_eq!(manager.memmap_len(), 7);
1378
1379        // Verify all maps are accessible and correctly ordered
1380        let starts: [usize; 7] = [0x0, 0x1000, 0x2000, 0x4000, 0x5000, 0x7000, 0x8000];
1381        let mut i = 0;
1382        manager.with_memmaps(|mm| {
1383            for map in mm.values() {
1384                assert_eq!(map.vmarea.start, starts[i]);
1385                i += 1;
1386            }
1387        });
1388        assert_eq!(i, 7);
1389    }
1390
1391    #[test_case]
1392    fn test_alignment_and_edge_cases() {
1393        let manager = VirtualMemoryManager::new();
1394
1395        // Test Case 1: Non-aligned virtual address (should fail)
1396        let misaligned_virtual = VirtualMemoryMap::new(
1397            crate::vm::vmem::MemoryArea {
1398                start: 0x10000000,
1399                end: 0x10000fff,
1400            }, // pmarea
1401            crate::vm::vmem::MemoryArea {
1402                start: 0x1001,
1403                end: 0x2000,
1404            }, // vmarea - Not PAGE_SIZE aligned
1405            0o644,
1406            false,
1407            None,
1408        );
1409        assert!(manager.add_memory_map(misaligned_virtual).is_err());
1410
1411        // Test Case 2: Non-aligned physical address (should fail)
1412        let misaligned_physical = VirtualMemoryMap::new(
1413            crate::vm::vmem::MemoryArea {
1414                start: 0x10000001,
1415                end: 0x10001000,
1416            }, // pmarea - Not PAGE_SIZE aligned
1417            crate::vm::vmem::MemoryArea {
1418                start: 0x1000,
1419                end: 0x1fff,
1420            }, // vmarea
1421            0o644,
1422            false,
1423            None,
1424        );
1425        assert!(manager.add_memory_map(misaligned_physical).is_err());
1426
1427        // Test Case 3: Non-aligned size (should fail)
1428        let misaligned_size = VirtualMemoryMap::new(
1429            crate::vm::vmem::MemoryArea {
1430                start: 0x10000000,
1431                end: 0x10000800,
1432            }, // pmarea
1433            crate::vm::vmem::MemoryArea {
1434                start: 0x1000,
1435                end: 0x1800,
1436            }, // vmarea - Size is not PAGE_SIZE multiple
1437            0o644,
1438            false,
1439            None,
1440        );
1441        assert!(manager.add_memory_map(misaligned_size).is_err());
1442
1443        // Test Case 4: Zero-size mapping (should fail)
1444        let zero_size = VirtualMemoryMap::new(
1445            crate::vm::vmem::MemoryArea {
1446                start: 0x10000000,
1447                end: 0x10000000,
1448            }, // pmarea - Start == End
1449            crate::vm::vmem::MemoryArea {
1450                start: 0x1000,
1451                end: 0x1000,
1452            }, // vmarea - Start == End
1453            0o644,
1454            false,
1455            None,
1456        );
1457        assert!(manager.add_memory_map(zero_size).is_err());
1458
1459        // Test Case 5: Single page mapping (should succeed)
1460        let single_page = VirtualMemoryMap::new(
1461            crate::vm::vmem::MemoryArea {
1462                start: 0x10000000,
1463                end: 0x10000fff,
1464            }, // pmarea
1465            crate::vm::vmem::MemoryArea {
1466                start: 0x1000,
1467                end: 0x1fff,
1468            }, // vmarea
1469            0o644,
1470            false,
1471            None,
1472        );
1473        assert!(manager.add_memory_map(single_page).is_ok());
1474
1475        // Test Case 6: Large mapping (multiple pages)
1476        let large_mapping = VirtualMemoryMap::new(
1477            crate::vm::vmem::MemoryArea {
1478                start: 0x20000000,
1479                end: 0x2000ffff,
1480            }, // pmarea - 64KB
1481            crate::vm::vmem::MemoryArea {
1482                start: 0x10000,
1483                end: 0x1ffff,
1484            }, // vmarea - 64KB
1485            0o644,
1486            false,
1487            None,
1488        );
1489        assert!(manager.add_memory_map(large_mapping).is_ok());
1490
1491        assert_eq!(manager.memmap_len(), 2);
1492    }
1493
1494    #[test_case]
1495    fn test_cache_invalidation_on_add() {
1496        let manager = VirtualMemoryManager::new();
1497
1498        // Add initial mapping
1499        let map1 = VirtualMemoryMap::new(
1500            crate::vm::vmem::MemoryArea {
1501                start: 0x10000000,
1502                end: 0x10000fff,
1503            }, // pmarea
1504            crate::vm::vmem::MemoryArea {
1505                start: 0x1000,
1506                end: 0x1fff,
1507            }, // vmarea
1508            0o644,
1509            false,
1510            None,
1511        );
1512        manager.add_memory_map(map1).unwrap();
1513
1514        // Search to populate cache
1515        let found = manager.search_memory_map(0x1500);
1516        assert!(found.is_some());
1517
1518        // Verify cache is populated (indirect test through repeated search performance)
1519        let found_again = manager.search_memory_map(0x1500);
1520        assert!(found_again.is_some());
1521
1522        // Add another mapping, which should invalidate cache
1523        let map2 = VirtualMemoryMap::new(
1524            crate::vm::vmem::MemoryArea {
1525                start: 0x20000000,
1526                end: 0x20000fff,
1527            }, // pmarea
1528            crate::vm::vmem::MemoryArea {
1529                start: 0x3000,
1530                end: 0x3fff,
1531            }, // vmarea
1532            0o644,
1533            false,
1534            None,
1535        );
1536        manager.add_memory_map(map2).unwrap();
1537
1538        // Search should still work correctly after cache invalidation
1539        let found_after_invalidation = manager.search_memory_map(0x1500);
1540        assert!(found_after_invalidation.is_some());
1541        assert_eq!(found_after_invalidation.unwrap().vmarea.start, 0x1000);
1542
1543        let found_new = manager.search_memory_map(0x3500);
1544        assert!(found_new.is_some());
1545        assert_eq!(found_new.unwrap().vmarea.start, 0x3000);
1546    }
1547
1548    #[test_case]
1549    fn test_add_memory_map_fixed_complete_overlap() {
1550        let manager = VirtualMemoryManager::new();
1551
1552        // Add initial mapping at [0x2000, 0x3000)
1553        let initial_map = VirtualMemoryMap::new(
1554            crate::vm::vmem::MemoryArea {
1555                start: 0x10000000,
1556                end: 0x10000fff,
1557            }, // pmarea
1558            crate::vm::vmem::MemoryArea {
1559                start: 0x2000,
1560                end: 0x2fff,
1561            }, // vmarea
1562            0o644,
1563            false,
1564            None,
1565        );
1566        manager.add_memory_map(initial_map).unwrap();
1567        assert_eq!(manager.memmap_len(), 1);
1568
1569        // Add fixed mapping that completely contains the existing mapping [0x1000, 0x4000)
1570        let fixed_map = VirtualMemoryMap::new(
1571            crate::vm::vmem::MemoryArea {
1572                start: 0x20000000,
1573                end: 0x20002fff,
1574            }, // pmarea - 3 pages
1575            crate::vm::vmem::MemoryArea {
1576                start: 0x1000,
1577                end: 0x3fff,
1578            }, // vmarea - 3 pages
1579            0o755,
1580            true,
1581            None,
1582        );
1583
1584        let result = manager.add_memory_map_fixed(fixed_map);
1585        assert!(result.is_ok());
1586
1587        let overwritten_mappings = result.unwrap();
1588        assert_eq!(overwritten_mappings.len(), 1); // Should have removed one mapping
1589        assert_eq!(overwritten_mappings[0].vmarea.start, 0x2000);
1590
1591        // Should now have only the new fixed mapping
1592        assert_eq!(manager.memmap_len(), 1);
1593        let remaining_map = manager.search_memory_map(0x2000).unwrap();
1594        assert_eq!(remaining_map.vmarea.start, 0x1000);
1595        assert_eq!(remaining_map.vmarea.end, 0x3fff);
1596        assert_eq!(remaining_map.permissions, 0o755);
1597        assert_eq!(remaining_map.is_shared, true);
1598    }
1599
1600    #[test_case]
1601    fn test_add_memory_map_fixed_partial_overlap() {
1602        let manager = VirtualMemoryManager::new();
1603
1604        // Add initial mapping at [0x1000, 0x3000) - 2 pages
1605        let initial_map = VirtualMemoryMap::new(
1606            crate::vm::vmem::MemoryArea {
1607                start: 0x10000000,
1608                end: 0x10001fff,
1609            }, // pmarea - 2 pages
1610            crate::vm::vmem::MemoryArea {
1611                start: 0x1000,
1612                end: 0x2fff,
1613            }, // vmarea - 2 pages
1614            0o644,
1615            false,
1616            None,
1617        );
1618        manager.add_memory_map(initial_map).unwrap();
1619        assert_eq!(manager.memmap_len(), 1);
1620
1621        // Add fixed mapping that overlaps from middle: [0x2000, 0x4000) - 2 pages
1622        let fixed_map = VirtualMemoryMap::new(
1623            crate::vm::vmem::MemoryArea {
1624                start: 0x20000000,
1625                end: 0x20001fff,
1626            }, // pmarea - 2 pages
1627            crate::vm::vmem::MemoryArea {
1628                start: 0x2000,
1629                end: 0x3fff,
1630            }, // vmarea - 2 pages
1631            0o755,
1632            true,
1633            None,
1634        );
1635
1636        let result = manager.add_memory_map_fixed(fixed_map);
1637        assert!(result.is_ok());
1638
1639        let overwritten_mappings = result.unwrap();
1640        assert_eq!(overwritten_mappings.len(), 1); // Should have removed the original mapping
1641
1642        // Should now have 2 mappings: the split part [0x1000, 0x2000) and the new fixed [0x2000, 0x4000)
1643        assert_eq!(manager.memmap_len(), 2);
1644
1645        // Check the remaining part of the original mapping
1646        let remaining_original = manager.search_memory_map(0x1500).unwrap();
1647        assert_eq!(remaining_original.vmarea.start, 0x1000);
1648        assert_eq!(remaining_original.vmarea.end, 0x1fff);
1649        assert_eq!(remaining_original.permissions, 0o644);
1650
1651        // Check the new fixed mapping
1652        let new_fixed = manager.search_memory_map(0x3000).unwrap();
1653        assert_eq!(new_fixed.vmarea.start, 0x2000);
1654        assert_eq!(new_fixed.vmarea.end, 0x3fff);
1655        assert_eq!(new_fixed.permissions, 0o755);
1656        assert_eq!(new_fixed.is_shared, true);
1657    }
1658
1659    #[test_case]
1660    fn test_add_memory_map_fixed_split_both_ends() {
1661        let manager = VirtualMemoryManager::new();
1662
1663        // Add initial mapping at [0x1000, 0x5000) - 4 pages
1664        let initial_map = VirtualMemoryMap::new(
1665            crate::vm::vmem::MemoryArea {
1666                start: 0x10000000,
1667                end: 0x10003fff,
1668            }, // pmarea - 4 pages
1669            crate::vm::vmem::MemoryArea {
1670                start: 0x1000,
1671                end: 0x4fff,
1672            }, // vmarea - 4 pages
1673            0o644,
1674            false,
1675            None,
1676        );
1677        manager.add_memory_map(initial_map).unwrap();
1678        assert_eq!(manager.memmap_len(), 1);
1679
1680        // Add fixed mapping in the middle: [0x2000, 0x4000) - 2 pages
1681        let fixed_map = VirtualMemoryMap::new(
1682            crate::vm::vmem::MemoryArea {
1683                start: 0x20000000,
1684                end: 0x20001fff,
1685            }, // pmarea - 2 pages
1686            crate::vm::vmem::MemoryArea {
1687                start: 0x2000,
1688                end: 0x3fff,
1689            }, // vmarea - 2 pages
1690            0o755,
1691            true,
1692            None,
1693        );
1694
1695        let result = manager.add_memory_map_fixed(fixed_map);
1696        assert!(result.is_ok());
1697
1698        let overwritten_mappings = result.unwrap();
1699        assert_eq!(overwritten_mappings.len(), 1); // Should have removed the original mapping
1700
1701        // Should now have 3 mappings: before [0x1000, 0x2000), fixed [0x2000, 0x4000), after [0x4000, 0x5000)
1702        assert_eq!(manager.memmap_len(), 3);
1703
1704        // Check the part before the fixed mapping
1705        let before_part = manager.search_memory_map(0x1500).unwrap();
1706        assert_eq!(before_part.vmarea.start, 0x1000);
1707        assert_eq!(before_part.vmarea.end, 0x1fff);
1708        assert_eq!(before_part.permissions, 0o644);
1709
1710        // Check the new fixed mapping
1711        let fixed_part = manager.search_memory_map(0x3000).unwrap();
1712        assert_eq!(fixed_part.vmarea.start, 0x2000);
1713        assert_eq!(fixed_part.vmarea.end, 0x3fff);
1714        assert_eq!(fixed_part.permissions, 0o755);
1715        assert_eq!(fixed_part.is_shared, true);
1716
1717        // Check the part after the fixed mapping
1718        let after_part = manager.search_memory_map(0x4500).unwrap();
1719        assert_eq!(after_part.vmarea.start, 0x4000);
1720        assert_eq!(after_part.vmarea.end, 0x4fff);
1721        assert_eq!(after_part.permissions, 0o644);
1722    }
1723
1724    #[test_case]
1725    fn test_add_memory_map_fixed_no_overlap() {
1726        let manager = VirtualMemoryManager::new();
1727
1728        // Add initial mapping at [0x1000, 0x2000)
1729        let initial_map = VirtualMemoryMap::new(
1730            crate::vm::vmem::MemoryArea {
1731                start: 0x10000000,
1732                end: 0x10000fff,
1733            }, // pmarea
1734            crate::vm::vmem::MemoryArea {
1735                start: 0x1000,
1736                end: 0x1fff,
1737            }, // vmarea
1738            0o644,
1739            false,
1740            None,
1741        );
1742        manager.add_memory_map(initial_map).unwrap();
1743
1744        // Add fixed mapping with no overlap at [0x3000, 0x4000)
1745        let fixed_map = VirtualMemoryMap::new(
1746            crate::vm::vmem::MemoryArea {
1747                start: 0x20000000,
1748                end: 0x20000fff,
1749            }, // pmarea
1750            crate::vm::vmem::MemoryArea {
1751                start: 0x3000,
1752                end: 0x3fff,
1753            }, // vmarea
1754            0o755,
1755            true,
1756            None,
1757        );
1758
1759        let result = manager.add_memory_map_fixed(fixed_map);
1760        assert!(result.is_ok());
1761
1762        let overwritten_mappings = result.unwrap();
1763        assert_eq!(overwritten_mappings.len(), 0); // No mappings should be removed
1764
1765        // Should now have 2 mappings
1766        assert_eq!(manager.memmap_len(), 2);
1767
1768        // Both mappings should be intact
1769        let first_map = manager.search_memory_map(0x1500);
1770        assert!(first_map.is_some());
1771        assert_eq!(first_map.unwrap().vmarea.start, 0x1000);
1772
1773        let second_map = manager.search_memory_map(0x3500);
1774        assert!(second_map.is_some());
1775        assert_eq!(second_map.unwrap().vmarea.start, 0x3000);
1776    }
1777
1778    #[test_case]
1779    fn test_lazy_mapping_and_unmapping() {
1780        let manager = VirtualMemoryManager::new();
1781        let vma = MemoryArea {
1782            start: 0x1000,
1783            end: 0x1fff,
1784        };
1785        let map = VirtualMemoryMap {
1786            vmarea: vma,
1787            pmarea: vma,
1788            permissions: 0o644,
1789            is_shared: false,
1790            owner: None,
1791        };
1792        let asid = alloc_virtual_address_space();
1793        manager.set_asid(asid);
1794        manager.add_memory_map(map).unwrap();
1795
1796        // Trigger lazy mapping by simulating a page fault at virtual address 0x1500
1797        assert!(manager.lazy_map_page(0x1500).is_ok());
1798
1799        // The page should now be mapped in the MMU
1800        // For testing, we can't directly check MMU state, so we verify by translating the address
1801        let translated_addr = manager.translate_vaddr(0x1500);
1802        assert!(translated_addr.is_some());
1803        assert_eq!(translated_addr.unwrap() & !(PAGE_SIZE - 1), 0x1000); // Should be page-aligned
1804
1805        // Test unmapping functionality by removing the memory map
1806        // This also unmaps from MMU due to our implementation
1807        manager.remove_memory_map_by_addr(0x1500);
1808
1809        // Translation should now fail as the memory map is removed
1810        let translated_addr_after_unmap = manager.translate_vaddr(0x1500);
1811        assert!(translated_addr_after_unmap.is_none());
1812    }
1813}