1extern crate alloc;
39use alloc::collections::btree_map::Values;
40use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
41use spin::RwLock;
42
43use crate::object::capability::memory_mapping::AccessOp;
44use crate::{
45 arch::vm::{free_virtual_address_space, get_root_pagetable, is_asid_used, mmu::PageTable},
46 environment::PAGE_SIZE,
47};
48
49use super::vmem::{MemoryArea, VirtualMemoryMap};
50
51#[derive(Debug, Clone)]
52pub struct VirtualMemoryManager {
53 inner: Arc<RwLock<InnerVmm>>, }
55
56#[derive(Debug, Clone)]
57struct InnerVmm {
58 memmap: BTreeMap<usize, VirtualMemoryMap>,
59 asid: u16,
60 mmap_base: usize,
61 page_tables: Vec<Arc<PageTable>>,
62 last_search_cache: Option<(usize, usize, usize)>,
63}
64
65impl VirtualMemoryManager {
66 pub fn new() -> Self {
71 let inner = InnerVmm {
72 memmap: BTreeMap::new(),
73 asid: 0,
74 mmap_base: 0x40000000, page_tables: Vec::new(),
76 last_search_cache: None,
77 };
78 VirtualMemoryManager {
79 inner: Arc::new(RwLock::new(inner)),
80 }
81 }
82
83 pub fn set_asid(&self, asid: u16) {
88 let mut g = self.inner.write();
89 if g.asid == asid {
90 return;
91 }
92 if g.asid != 0 && is_asid_used(g.asid) {
93 free_virtual_address_space(g.asid);
94 }
95 g.asid = asid;
96 }
97
98 pub fn get_asid(&self) -> u16 {
103 self.inner.read().asid
104 }
105
106 pub fn memmap_len(&self) -> usize {
117 self.inner.read().memmap.len()
118 }
119
120 pub fn memmap_is_empty(&self) -> bool {
125 self.inner.read().memmap.is_empty()
126 }
127
128 pub fn with_memmaps<R>(&self, f: impl FnOnce(&BTreeMap<usize, VirtualMemoryMap>) -> R) -> R {
131 let g = self.inner.read();
132 f(&g.memmap)
133 }
134
135 pub fn with_memmaps_mut<R>(
138 &self,
139 f: impl FnOnce(&mut BTreeMap<usize, VirtualMemoryMap>) -> R,
140 ) -> R {
141 let mut g = self.inner.write();
142 f(&mut g.memmap)
143 }
144
145 pub fn memmaps_iter_with<R, F>(&self, f: F) -> R
148 where
149 F: for<'a> FnOnce(Values<'a, usize, VirtualMemoryMap>) -> R,
150 {
151 let g = self.inner.read();
152 let iter = g.memmap.values();
153 f(iter)
154 }
155
156 pub fn get_memory_map_by_addr(&self, start_addr: usize) -> Option<VirtualMemoryMap> {
164 self.inner.read().memmap.get(&start_addr).cloned()
165 }
166
167 pub fn add_memory_map(&self, map: VirtualMemoryMap) -> Result<(), &'static str> {
193 if map.vmarea.start % PAGE_SIZE != 0
195 || map.pmarea.start % PAGE_SIZE != 0
196 || map.vmarea.size() % PAGE_SIZE != 0
197 || map.pmarea.size() % PAGE_SIZE != 0
198 {
199 return Err("Address or size is not aligned to PAGE_SIZE");
200 }
201
202 let mut g = self.inner.write();
203 if let Some((_, prev_map)) = g.memmap.range(..map.vmarea.start).next_back() {
205 if prev_map.vmarea.end > map.vmarea.start {
206 return Err("Memory mapping overlaps with a preceding map");
207 }
208 }
209 if let Some((_, next_map)) = g.memmap.range(map.vmarea.start..).next() {
211 if next_map.vmarea.start < map.vmarea.end {
212 return Err("Memory mapping overlaps with a succeeding map");
213 }
214 }
215
216 g.last_search_cache = None;
217 g.memmap.insert(map.vmarea.start, map);
218 Ok(())
219 }
220
221 pub fn remove_memory_map_by_addr(&self, vaddr: usize) -> Option<VirtualMemoryMap> {
231 let mut g = self.inner.write();
232 let start_addr = find_memory_map_key_with_cache_update(&mut *g, vaddr)?;
233 if let Some((_, _, cache_key)) = g.last_search_cache {
234 if cache_key == start_addr {
235 g.last_search_cache = None;
236 }
237 }
238 let removed_map = g.memmap.remove(&start_addr);
239 drop(g);
240 if let Some(m) = removed_map {
241 self.unmap_range_from_mmu(m.vmarea.start, m.vmarea.end);
242 Some(m)
243 } else {
244 None
245 }
246 }
247
248 pub fn remove_all_memory_maps(&self) -> impl Iterator<Item = VirtualMemoryMap> {
256 let mut g = self.inner.write();
257 g.last_search_cache = None;
258 let memmap = core::mem::take(&mut g.memmap);
259 memmap.into_values()
260 }
261
262 pub fn restore_memory_maps<I>(&self, maps: I) -> Result<(), &'static str>
271 where
272 I: IntoIterator<Item = VirtualMemoryMap>,
273 {
274 for map in maps {
275 if let Err(e) = self.add_memory_map(map) {
276 return Err(e);
277 }
278 }
279 Ok(())
280 }
281
282 pub fn search_memory_map(&self, vaddr: usize) -> Option<VirtualMemoryMap> {
291 let mut g = self.inner.write();
292 if let Some((cache_start, cache_end, cache_key)) = g.last_search_cache {
293 if cache_start <= vaddr && vaddr <= cache_end {
294 return g.memmap.get(&cache_key).cloned();
295 }
296 }
297 if let Some((_k, map)) = g.memmap.range(..=vaddr).next_back() {
298 if vaddr <= map.vmarea.end {
299 let start = map.vmarea.start;
300 let end = map.vmarea.end;
301 let out = map.clone();
302 g.last_search_cache = Some((start, end, start));
303 return Some(out);
304 }
305 }
306 None
307 }
308
309 pub fn add_page_table(&self, page_table: Arc<PageTable>) {
343 self.inner.write().page_tables.push(page_table);
344 }
345
346 pub fn get_root_page_table(&self) -> Option<&mut PageTable> {
351 get_root_pagetable(self.get_asid())
352 }
353
354 pub fn lazy_map_page(&self, vaddr: usize) -> Result<(), &'static str> {
366 let access = crate::object::capability::memory_mapping::AccessKind {
368 op: crate::object::capability::memory_mapping::AccessOp::Load,
369 vaddr,
370 size: None,
371 };
372 self.lazy_map_page_with(access)
373 }
374
375 pub fn lazy_map_page_with(
377 &self,
378 access: crate::object::capability::memory_mapping::AccessKind,
379 ) -> Result<(), &'static str> {
380 let vaddr = access.vaddr;
381 let memory_map = match self.search_memory_map(vaddr) {
383 Some(map) => map,
384 None => {
385 return self.try_extend_mapping_for_access(&access);
388 }
389 };
390
391 let page_vaddr = vaddr & !(PAGE_SIZE - 1);
393 let offset_in_mapping = page_vaddr - memory_map.vmarea.start;
394 let mut page_paddr = memory_map.pmarea.start + offset_in_mapping;
395 let mut perms = memory_map.permissions;
396
397 if let Some(owner_weak) = &memory_map.owner {
399 if let Some(owner) = owner_weak.upgrade() {
400 let owner_name = owner.mmap_owner_name();
401 let _should_log = owner_name.contains("xkb");
402 match owner.resolve_fault(&access, &memory_map) {
403 Ok(res) => {
404 page_paddr = res.paddr_page_base;
405 if res.is_tail {
406 perms &= !0x1; perms &= !0x2; }
410 }
411 Err(_e) => {
412 return Err("Owner failed to resolve fault");
413 }
414 }
415 }
416 }
417
418 if let Some(root_pagetable) = self.get_root_page_table() {
420 root_pagetable.map(
421 self.get_asid(),
422 page_vaddr,
423 page_paddr,
424 perms,
425 true,
426 access.op == AccessOp::Store,
427 );
428 Ok(())
429 } else {
430 Err("No root page table available")
431 }
432 }
433
434 fn try_extend_mapping_for_access(
439 &self,
440 access: &crate::object::capability::memory_mapping::AccessKind,
441 ) -> Result<(), &'static str> {
442 let vaddr = access.vaddr;
443 let page_vaddr = vaddr & !(PAGE_SIZE - 1);
444
445 let extend_result: Option<(usize, usize)>;
447
448 {
449 let mut g = self.inner.write();
451
452 let mut found = None;
454 for (_, map) in g.memmap.iter_mut() {
455 if map.vmarea.end < vaddr {
457 if let Some(owner_weak) = &map.owner {
459 if let Some(owner) = owner_weak.upgrade() {
460 let test_access =
462 crate::object::capability::memory_mapping::AccessKind {
463 vaddr: page_vaddr,
464 op: access.op,
465 size: access.size,
466 };
467
468 match owner.resolve_fault(&test_access, map) {
469 Ok(res) => {
470 let new_end = page_vaddr + PAGE_SIZE - 1;
472 crate::println!(
473 "[VmManager] Extending mapping vmarea.end from {:#x} to {:#x} for owner={}",
474 map.vmarea.end,
475 new_end,
476 owner.mmap_owner_name()
477 );
478 map.vmarea.end = new_end;
479
480 let pmarea_growth = new_end
482 - map.vmarea.start
483 - (map.pmarea.end - map.pmarea.start);
484 map.pmarea.end += pmarea_growth;
485
486 found = Some((res.paddr_page_base, map.permissions));
487 break;
488 }
489 Err(_) => {
490 }
492 }
493 }
494 }
495 }
496 }
497 extend_result = found;
498 } if let Some((paddr_page_base, permissions)) = extend_result {
502 if let Some(root_pagetable) = self.get_root_page_table() {
503 root_pagetable.map(
504 self.get_asid(),
505 page_vaddr,
506 paddr_page_base,
507 permissions,
508 true,
509 access.op == AccessOp::Store,
510 );
511 return Ok(());
512 } else {
513 return Err("No root page table available");
514 }
515 }
516
517 Err("No extendable memory mapping found for virtual address")
518 }
519
520 pub fn unmap_range_from_mmu(&self, vaddr_start: usize, vaddr_end: usize) {
529 if let Some(root_pagetable) = self.get_root_page_table() {
530 let num_pages = (vaddr_end - vaddr_start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
531
532 for i in 0..num_pages {
533 let page_vaddr = (vaddr_start & !(PAGE_SIZE - 1)) + i * PAGE_SIZE;
534 if page_vaddr <= vaddr_end {
535 root_pagetable.unmap(self.get_asid(), page_vaddr);
536 }
537 }
538 }
539 }
540
541 pub fn translate_vaddr(&self, vaddr: usize) -> Option<usize> {
553 if let Some(map) = self.search_memory_map(vaddr) {
554 let offset = vaddr - map.vmarea.start;
556 Some(map.pmarea.start + offset)
558 } else {
559 None
560 }
561 }
562
563 pub fn get_mmap_base(&self) -> usize {
568 self.inner.read().mmap_base
569 }
570
571 pub fn set_mmap_base(&self, base: usize) {
577 self.inner.write().mmap_base = base;
578 }
579
580 pub fn find_unmapped_area(&self, size: usize, alignment: usize) -> Option<usize> {
589 let aligned_size = (size + alignment - 1) & !(alignment - 1);
590 let g = self.inner.read();
591 let mut search_addr = (g.mmap_base + alignment - 1) & !(alignment - 1);
592
593 if let Some((_, prev_map)) = g.memmap.range(..=search_addr).next_back() {
596 if prev_map.vmarea.end >= search_addr {
597 search_addr = prev_map.vmarea.end + 1;
598 search_addr = (search_addr + alignment - 1) & !(alignment - 1);
599 }
600 }
601
602 for (_start, memory_map) in g.memmap.range(search_addr..) {
604 if search_addr + aligned_size <= memory_map.vmarea.start {
606 return Some(search_addr);
607 }
608
609 if memory_map.vmarea.end >= search_addr {
611 search_addr = memory_map.vmarea.end + 1;
612 search_addr = (search_addr + alignment - 1) & !(alignment - 1);
613 }
614 }
615 drop(g);
616 const MAX_USER_ADDR: usize = 0x80000000; if search_addr + aligned_size <= MAX_USER_ADDR {
620 Some(search_addr)
621 } else {
622 None
623 }
624 }
625
626 pub fn add_memory_map_fixed(
650 &self,
651 map: VirtualMemoryMap,
652 ) -> Result<Vec<VirtualMemoryMap>, &'static str> {
653 if map.vmarea.start % PAGE_SIZE != 0
655 || map.pmarea.start % PAGE_SIZE != 0
656 || map.vmarea.size() % PAGE_SIZE != 0
657 || map.pmarea.size() % PAGE_SIZE != 0
658 {
659 return Err("Address or size is not aligned to PAGE_SIZE");
660 }
661
662 let new_start = map.vmarea.start;
663 let new_end = map.vmarea.end;
664 let mut overwritten_mappings = Vec::new();
665 let mut mappings_to_add = Vec::new();
666
667 let mut g = self.inner.write();
668 let overlapping_keys: alloc::vec::Vec<usize> = g
669 .memmap
670 .range(..)
671 .filter_map(|(start_addr, existing_map)| {
672 let existing_start = existing_map.vmarea.start;
673 let existing_end = existing_map.vmarea.end;
674 if new_start <= existing_end && new_end >= existing_start {
675 Some(*start_addr)
676 } else {
677 None
678 }
679 })
680 .collect();
681
682 for key in overlapping_keys {
683 if let Some(existing_map) = g.memmap.remove(&key) {
684 let existing_start = existing_map.vmarea.start;
685 let existing_end = existing_map.vmarea.end;
686
687 let overlap_start = core::cmp::max(new_start, existing_start);
689 let overlap_end = core::cmp::min(new_end, existing_end);
690 if overlap_start <= overlap_end {
691 let pm_offset = overlap_start - existing_start;
693 let overwritten_map = VirtualMemoryMap {
694 vmarea: MemoryArea {
695 start: overlap_start,
696 end: overlap_end,
697 },
698 pmarea: MemoryArea {
699 start: existing_map.pmarea.start + pm_offset,
700 end: existing_map.pmarea.start
701 + pm_offset
702 + (overlap_end - overlap_start),
703 },
704 permissions: existing_map.permissions,
705 is_shared: existing_map.is_shared,
706 owner: existing_map.owner.clone(),
707 };
708 overwritten_mappings.push(overwritten_map);
709 }
710
711 if new_start <= existing_start && new_end >= existing_end {
713 continue;
715 }
716
717 if existing_start < new_start {
720 let before_map = VirtualMemoryMap {
721 vmarea: MemoryArea {
722 start: existing_start,
723 end: new_start - 1,
724 },
725 pmarea: MemoryArea {
726 start: existing_map.pmarea.start,
727 end: existing_map.pmarea.start + (new_start - existing_start) - 1,
728 },
729 permissions: existing_map.permissions,
730 is_shared: existing_map.is_shared,
731 owner: existing_map.owner.clone(),
732 };
733 mappings_to_add.push(before_map);
734 }
735
736 if existing_end > new_end {
738 let after_offset = (new_end + 1) - existing_start;
739 let after_map = VirtualMemoryMap {
740 vmarea: MemoryArea {
741 start: new_end + 1,
742 end: existing_end,
743 },
744 pmarea: MemoryArea {
745 start: existing_map.pmarea.start + after_offset,
746 end: existing_map.pmarea.end,
747 },
748 permissions: existing_map.permissions,
749 is_shared: existing_map.is_shared,
750 owner: existing_map.owner.clone(),
751 };
752 mappings_to_add.push(after_map);
753 }
754 }
755 }
756
757 g.last_search_cache = None;
759
760 let split_vec = mappings_to_add.clone();
762 for split_map in split_vec {
763 g.memmap.insert(split_map.vmarea.start, split_map);
764 }
765 g.memmap.insert(map.vmarea.start, map);
766 drop(g);
767 for overwritten_map in &overwritten_mappings {
768 self.unmap_range_from_mmu(overwritten_map.vmarea.start, overwritten_map.vmarea.end);
769 }
770
771 Ok(overwritten_mappings)
772 }
773
774 pub fn get_memory_stats(&self) -> (usize, usize, usize) {
780 let g = self.inner.read();
781 let total_maps = g.memmap.len();
782 let total_virtual_size: usize = g
783 .memmap
784 .values()
785 .map(|memory_map| memory_map.vmarea.end - memory_map.vmarea.start + 1)
786 .sum();
787
788 let mut gaps = 0;
790 let mut prev_end = None;
791
792 for memory_map in g.memmap.values() {
793 if let Some(prev) = prev_end {
794 if memory_map.vmarea.start > prev + 1 {
795 gaps += 1;
796 }
797 }
798 prev_end = Some(memory_map.vmarea.end);
799 }
800
801 (total_maps, total_virtual_size, gaps)
802 }
803
804 pub fn coalesce_memory_maps(&self) -> usize {
810 let mut coalesced_count = 0;
811 let mut to_remove = Vec::new();
812 let mut to_add = Vec::new();
813 let mut prev_start: Option<usize> = None;
814 let mut prev_map: Option<VirtualMemoryMap> = None;
815 let mut g = self.inner.write();
816 for (&start, memory_map) in &g.memmap {
817 if let (Some(prev_s), Some(prev_memory_map)) = (prev_start, &prev_map) {
818 if prev_memory_map.vmarea.end + 1 == memory_map.vmarea.start
820 && Self::can_merge_memory_maps(prev_memory_map, memory_map)
821 {
822 let merged_map = VirtualMemoryMap {
824 vmarea: super::vmem::MemoryArea {
825 start: prev_memory_map.vmarea.start,
826 end: memory_map.vmarea.end,
827 },
828 pmarea: super::vmem::MemoryArea {
829 start: prev_memory_map.pmarea.start,
830 end: memory_map.pmarea.end,
831 },
832 permissions: prev_memory_map.permissions, is_shared: prev_memory_map.is_shared,
834 owner: prev_memory_map.owner.clone(),
835 };
836
837 to_remove.push(prev_s);
839 to_remove.push(start);
840 to_add.push(merged_map);
841 coalesced_count += 1;
842
843 prev_start = None;
845 prev_map = None;
846 continue;
847 }
848 }
849
850 prev_start = Some(start);
851 prev_map = Some(memory_map.clone());
852 }
853
854 for start in to_remove {
856 g.memmap.remove(&start);
857 }
858 for memory_map in to_add {
859 g.memmap.insert(memory_map.vmarea.start, memory_map);
860 }
861
862 if coalesced_count > 0 {
864 g.last_search_cache = None;
865 }
866
867 coalesced_count
868 }
869
870 fn can_merge_memory_maps(map1: &VirtualMemoryMap, map2: &VirtualMemoryMap) -> bool {
879 map1.permissions == map2.permissions
884 && map1.is_shared == map2.is_shared
885 && map1.pmarea.end + 1 == map2.pmarea.start
886 }
887}
888
889impl Drop for VirtualMemoryManager {
890 fn drop(&mut self) {
892 let asid = self.get_asid();
893 if asid != 0 && is_asid_used(asid) {
894 free_virtual_address_space(asid);
895 }
896 }
897}
898
899fn find_memory_map_key_with_cache_update(inner: &mut InnerVmm, vaddr: usize) -> Option<usize> {
900 if let Some((cache_start, cache_end, cache_key)) = inner.last_search_cache {
901 if cache_start <= vaddr && vaddr <= cache_end {
902 return Some(cache_key);
903 }
904 }
905 if let Some((start_addr, map)) = inner.memmap.range(..=vaddr).next_back() {
906 if map.vmarea.start <= vaddr && vaddr <= map.vmarea.end {
907 inner.last_search_cache = Some((map.vmarea.start, map.vmarea.end, *start_addr));
908 return Some(*start_addr);
909 }
910 }
911 None
912}
913
914#[cfg(test)]
915mod tests {
916 use crate::arch::vm::alloc_virtual_address_space;
917 use crate::environment::PAGE_SIZE;
918 use crate::vm::VirtualMemoryMap;
919 use crate::vm::{manager::VirtualMemoryManager, vmem::MemoryArea};
920
921 #[test_case]
922 fn test_new_virtual_memory_manager() {
923 let vmm = VirtualMemoryManager::new();
924 assert_eq!(vmm.get_asid(), 0);
925 }
926
927 #[test_case]
928 fn test_set_and_get_asid() {
929 let vmm = VirtualMemoryManager::new();
930 vmm.set_asid(42);
931 assert_eq!(vmm.get_asid(), 42);
932 }
933
934 #[test_case]
935 fn test_add_and_get_memory_map() {
936 let vmm = VirtualMemoryManager::new();
937 let vma = MemoryArea {
938 start: 0x1000,
939 end: 0x1fff,
940 };
941 let map = VirtualMemoryMap {
942 vmarea: vma,
943 pmarea: vma,
944 permissions: 0,
945 is_shared: false,
946 owner: None,
947 };
948 vmm.add_memory_map(map).unwrap();
949
950 assert_eq!(vmm.memmap_len(), 1);
952 let first_map_start = vmm.with_memmaps(|m| m.values().next().unwrap().vmarea.start);
953 assert_eq!(first_map_start, 0x1000);
954
955 assert!(vmm.get_memory_map_by_addr(0x1000).is_some());
957 assert_eq!(
958 vmm.get_memory_map_by_addr(0x1000).unwrap().vmarea.start,
959 0x1000
960 );
961 }
962
963 #[test_case]
964 fn test_remove_memory_map() {
965 let vmm = VirtualMemoryManager::new();
966 let vma = MemoryArea {
967 start: 0x1000,
968 end: 0x1fff,
969 };
970 let map = VirtualMemoryMap {
971 vmarea: vma,
972 pmarea: vma,
973 permissions: 0,
974 is_shared: false,
975 owner: None,
976 };
977 vmm.add_memory_map(map).unwrap();
978
979 let removed_map = vmm.remove_memory_map_by_addr(0x1000).unwrap();
981 assert_eq!(removed_map.vmarea.start, 0x1000);
982
983 assert!(vmm.memmap_is_empty());
985 assert_eq!(vmm.memmap_len(), 0);
986 assert!(vmm.get_memory_map_by_addr(0x1000).is_none());
987 }
988
989 #[test_case]
990 fn test_search_memory_map() {
991 let vmm = VirtualMemoryManager::new();
992 let vma1 = MemoryArea {
993 start: 0x1000,
994 end: 0x1fff,
995 };
996 let map1 = VirtualMemoryMap {
997 vmarea: vma1,
998 pmarea: vma1,
999 permissions: 0,
1000 is_shared: false,
1001 owner: None,
1002 };
1003 let vma2 = MemoryArea {
1004 start: 0x3000,
1005 end: 0x3fff,
1006 };
1007 let map2 = VirtualMemoryMap {
1008 vmarea: vma2,
1009 pmarea: vma2,
1010 permissions: 0,
1011 is_shared: false,
1012 owner: None,
1013 };
1014 vmm.add_memory_map(map1).unwrap();
1015 vmm.add_memory_map(map2).unwrap();
1016 let found_map = vmm.search_memory_map(0x3500).unwrap();
1017 assert_eq!(found_map.vmarea.start, 0x3000);
1018 }
1019
1020 #[test_case]
1021 fn test_get_root_page_table() {
1022 let vmm = VirtualMemoryManager::new();
1023 let asid = alloc_virtual_address_space();
1024 vmm.set_asid(asid);
1025 let page_table = vmm.get_root_page_table();
1026 assert!(page_table.is_some());
1027 }
1028
1029 #[test_case]
1030 fn test_memory_optimization_features() {
1031 use crate::environment::PAGE_SIZE;
1032
1033 let manager = VirtualMemoryManager::new();
1035
1036 assert_eq!(manager.get_mmap_base(), 0x40000000);
1038 manager.set_mmap_base(0x50000000);
1039 assert_eq!(manager.get_mmap_base(), 0x50000000);
1040
1041 let alignment = PAGE_SIZE;
1043 let size = PAGE_SIZE;
1044
1045 let addr = manager.find_unmapped_area(size, alignment);
1047 assert!(addr.is_some());
1048 assert_eq!(addr.unwrap(), 0x50000000);
1049
1050 let map1 = VirtualMemoryMap::new(
1052 crate::vm::vmem::MemoryArea {
1053 start: 0x80000000,
1054 end: 0x80000fff,
1055 }, crate::vm::vmem::MemoryArea {
1057 start: 0x50000000,
1058 end: 0x50000fff,
1059 }, 0o644,
1061 false,
1062 None,
1063 );
1064 manager.add_memory_map(map1).unwrap();
1065
1066 let addr2 = manager.find_unmapped_area(size, alignment);
1068 assert!(addr2.is_some());
1069 assert!(addr2.unwrap() > 0x50000fff);
1070
1071 manager.set_mmap_base(0x60000000);
1074 let overlapping_base_map = VirtualMemoryMap::new(
1075 crate::vm::vmem::MemoryArea {
1076 start: 0x90000000,
1077 end: 0x9001ffff,
1078 },
1079 crate::vm::vmem::MemoryArea {
1080 start: 0x5fff0000,
1081 end: 0x6000ffff,
1082 },
1083 0o644,
1084 false,
1085 None,
1086 );
1087 manager.add_memory_map(overlapping_base_map).unwrap();
1088 let addr3 = manager.find_unmapped_area(size, alignment).unwrap();
1089 assert!(addr3 >= 0x60010000);
1090
1091 let (total_maps, total_size, gaps) = manager.get_memory_stats();
1093 assert_eq!(total_maps, 2);
1094 assert_eq!(total_size, PAGE_SIZE * 33);
1098 assert_eq!(gaps, 1);
1100
1101 let map2 = VirtualMemoryMap::new(
1103 crate::vm::vmem::MemoryArea {
1104 start: 0x80002000,
1105 end: 0x80002fff,
1106 }, crate::vm::vmem::MemoryArea {
1108 start: 0x50002000,
1109 end: 0x50002fff,
1110 }, 0o644,
1112 false,
1113 None,
1114 );
1115 manager.add_memory_map(map2).unwrap();
1116
1117 let (total_maps, total_size, gaps) = manager.get_memory_stats();
1118 assert_eq!(total_maps, 3);
1119 assert_eq!(total_size, PAGE_SIZE * 34);
1121 assert_eq!(gaps, 2);
1123
1124 let coalesced = manager.coalesce_memory_maps();
1126 assert_eq!(coalesced, 0); }
1128
1129 #[test_case]
1130 fn test_memory_map_coalescing() {
1131 use crate::environment::PAGE_SIZE;
1132
1133 let manager = VirtualMemoryManager::new();
1135
1136 let map1 = VirtualMemoryMap::new(
1138 crate::vm::vmem::MemoryArea {
1139 start: 0x80000000,
1140 end: 0x80000fff,
1141 },
1142 crate::vm::vmem::MemoryArea {
1143 start: 0x10000000,
1144 end: 0x10000fff,
1145 },
1146 0o644,
1147 false,
1148 None,
1149 );
1150 let map2 = VirtualMemoryMap::new(
1151 crate::vm::vmem::MemoryArea {
1152 start: 0x80001000,
1153 end: 0x80001fff,
1154 },
1155 crate::vm::vmem::MemoryArea {
1156 start: 0x10001000,
1157 end: 0x10001fff,
1158 },
1159 0o644, false, None,
1162 );
1163
1164 manager.add_memory_map(map1).unwrap();
1165 manager.add_memory_map(map2).unwrap();
1166
1167 let (total_maps_before, _, _) = manager.get_memory_stats();
1169 assert_eq!(total_maps_before, 2);
1170
1171 let coalesced = manager.coalesce_memory_maps();
1173 assert_eq!(coalesced, 1); let (total_maps_after, total_size, gaps) = manager.get_memory_stats();
1177 assert_eq!(total_maps_after, 1); assert_eq!(total_size, PAGE_SIZE * 2); assert_eq!(gaps, 0); let merged_map = manager.search_memory_map(0x10000000).unwrap();
1183 assert_eq!(merged_map.vmarea.start, 0x10000000);
1184 assert_eq!(merged_map.vmarea.end, 0x10001fff);
1185 }
1186
1187 #[test_case]
1188 fn test_complex_overlap_detection() {
1189 let manager = VirtualMemoryManager::new();
1190
1191 let map1 = VirtualMemoryMap::new(
1194 crate::vm::vmem::MemoryArea {
1195 start: 0x10000000,
1196 end: 0x10000fff,
1197 }, crate::vm::vmem::MemoryArea {
1199 start: 0x1000,
1200 end: 0x1fff,
1201 }, 0o644,
1203 false,
1204 None,
1205 );
1206 manager.add_memory_map(map1).unwrap();
1207
1208 let map2 = VirtualMemoryMap::new(
1210 crate::vm::vmem::MemoryArea {
1211 start: 0x20000000,
1212 end: 0x20000fff,
1213 }, crate::vm::vmem::MemoryArea {
1215 start: 0x4000,
1216 end: 0x4fff,
1217 }, 0o644,
1219 false,
1220 None,
1221 );
1222 manager.add_memory_map(map2).unwrap();
1223
1224 let map3 = VirtualMemoryMap::new(
1226 crate::vm::vmem::MemoryArea {
1227 start: 0x30000000,
1228 end: 0x30000fff,
1229 }, crate::vm::vmem::MemoryArea {
1231 start: 0x7000,
1232 end: 0x7fff,
1233 }, 0o644,
1235 false,
1236 None,
1237 );
1238 manager.add_memory_map(map3).unwrap();
1239
1240 let overlap_with_prev = VirtualMemoryMap::new(
1243 crate::vm::vmem::MemoryArea {
1244 start: 0x40000000,
1245 end: 0x40000fff,
1246 }, crate::vm::vmem::MemoryArea {
1248 start: 0x1800,
1249 end: 0x27ff,
1250 }, 0o644,
1252 false,
1253 None,
1254 );
1255 assert!(manager.add_memory_map(overlap_with_prev).is_err());
1256
1257 let overlap_with_next = VirtualMemoryMap::new(
1260 crate::vm::vmem::MemoryArea {
1261 start: 0x50000000,
1262 end: 0x50000fff,
1263 }, crate::vm::vmem::MemoryArea {
1265 start: 0x3800,
1266 end: 0x47ff,
1267 }, 0o644,
1269 false,
1270 None,
1271 );
1272 assert!(manager.add_memory_map(overlap_with_next).is_err());
1273
1274 let contained_map = VirtualMemoryMap::new(
1277 crate::vm::vmem::MemoryArea {
1278 start: 0x60000000,
1279 end: 0x600005ff,
1280 }, crate::vm::vmem::MemoryArea {
1282 start: 0x1200,
1283 end: 0x17ff,
1284 }, 0o644,
1286 false,
1287 None,
1288 );
1289 assert!(manager.add_memory_map(contained_map).is_err());
1290
1291 let containing_map = VirtualMemoryMap::new(
1294 crate::vm::vmem::MemoryArea {
1295 start: 0x70000000,
1296 end: 0x70001fff,
1297 }, crate::vm::vmem::MemoryArea {
1299 start: 0x800,
1300 end: 0x27ff,
1301 }, 0o644,
1303 false,
1304 None,
1305 );
1306 assert!(manager.add_memory_map(containing_map).is_err());
1307
1308 let exact_boundary = VirtualMemoryMap::new(
1311 crate::vm::vmem::MemoryArea {
1312 start: 0x80000000,
1313 end: 0x80000fff,
1314 }, crate::vm::vmem::MemoryArea {
1316 start: 0x2000,
1317 end: 0x2fff,
1318 }, 0o644,
1320 false,
1321 None,
1322 );
1323 assert!(manager.add_memory_map(exact_boundary).is_ok()); let gap_insertion = VirtualMemoryMap::new(
1328 crate::vm::vmem::MemoryArea {
1329 start: 0x90000000,
1330 end: 0x90000fff,
1331 }, crate::vm::vmem::MemoryArea {
1333 start: 0x5000,
1334 end: 0x5fff,
1335 }, 0o644,
1337 false,
1338 None,
1339 );
1340 assert!(manager.add_memory_map(gap_insertion).is_ok());
1341
1342 let beginning_map = VirtualMemoryMap::new(
1345 crate::vm::vmem::MemoryArea {
1346 start: 0xa0000000,
1347 end: 0xa0000fff,
1348 }, crate::vm::vmem::MemoryArea {
1350 start: 0x0,
1351 end: 0xfff,
1352 }, 0o644,
1354 false,
1355 None,
1356 );
1357 assert!(manager.add_memory_map(beginning_map).is_ok());
1358
1359 let end_map = VirtualMemoryMap::new(
1362 crate::vm::vmem::MemoryArea {
1363 start: 0xb0000000,
1364 end: 0xb0000fff,
1365 }, crate::vm::vmem::MemoryArea {
1367 start: 0x8000,
1368 end: 0x8fff,
1369 }, 0o644,
1371 false,
1372 None,
1373 );
1374 assert!(manager.add_memory_map(end_map).is_ok());
1375
1376 assert_eq!(manager.memmap_len(), 7);
1378
1379 let starts: [usize; 7] = [0x0, 0x1000, 0x2000, 0x4000, 0x5000, 0x7000, 0x8000];
1381 let mut i = 0;
1382 manager.with_memmaps(|mm| {
1383 for map in mm.values() {
1384 assert_eq!(map.vmarea.start, starts[i]);
1385 i += 1;
1386 }
1387 });
1388 assert_eq!(i, 7);
1389 }
1390
1391 #[test_case]
1392 fn test_alignment_and_edge_cases() {
1393 let manager = VirtualMemoryManager::new();
1394
1395 let misaligned_virtual = VirtualMemoryMap::new(
1397 crate::vm::vmem::MemoryArea {
1398 start: 0x10000000,
1399 end: 0x10000fff,
1400 }, crate::vm::vmem::MemoryArea {
1402 start: 0x1001,
1403 end: 0x2000,
1404 }, 0o644,
1406 false,
1407 None,
1408 );
1409 assert!(manager.add_memory_map(misaligned_virtual).is_err());
1410
1411 let misaligned_physical = VirtualMemoryMap::new(
1413 crate::vm::vmem::MemoryArea {
1414 start: 0x10000001,
1415 end: 0x10001000,
1416 }, crate::vm::vmem::MemoryArea {
1418 start: 0x1000,
1419 end: 0x1fff,
1420 }, 0o644,
1422 false,
1423 None,
1424 );
1425 assert!(manager.add_memory_map(misaligned_physical).is_err());
1426
1427 let misaligned_size = VirtualMemoryMap::new(
1429 crate::vm::vmem::MemoryArea {
1430 start: 0x10000000,
1431 end: 0x10000800,
1432 }, crate::vm::vmem::MemoryArea {
1434 start: 0x1000,
1435 end: 0x1800,
1436 }, 0o644,
1438 false,
1439 None,
1440 );
1441 assert!(manager.add_memory_map(misaligned_size).is_err());
1442
1443 let zero_size = VirtualMemoryMap::new(
1445 crate::vm::vmem::MemoryArea {
1446 start: 0x10000000,
1447 end: 0x10000000,
1448 }, crate::vm::vmem::MemoryArea {
1450 start: 0x1000,
1451 end: 0x1000,
1452 }, 0o644,
1454 false,
1455 None,
1456 );
1457 assert!(manager.add_memory_map(zero_size).is_err());
1458
1459 let single_page = VirtualMemoryMap::new(
1461 crate::vm::vmem::MemoryArea {
1462 start: 0x10000000,
1463 end: 0x10000fff,
1464 }, crate::vm::vmem::MemoryArea {
1466 start: 0x1000,
1467 end: 0x1fff,
1468 }, 0o644,
1470 false,
1471 None,
1472 );
1473 assert!(manager.add_memory_map(single_page).is_ok());
1474
1475 let large_mapping = VirtualMemoryMap::new(
1477 crate::vm::vmem::MemoryArea {
1478 start: 0x20000000,
1479 end: 0x2000ffff,
1480 }, crate::vm::vmem::MemoryArea {
1482 start: 0x10000,
1483 end: 0x1ffff,
1484 }, 0o644,
1486 false,
1487 None,
1488 );
1489 assert!(manager.add_memory_map(large_mapping).is_ok());
1490
1491 assert_eq!(manager.memmap_len(), 2);
1492 }
1493
1494 #[test_case]
1495 fn test_cache_invalidation_on_add() {
1496 let manager = VirtualMemoryManager::new();
1497
1498 let map1 = VirtualMemoryMap::new(
1500 crate::vm::vmem::MemoryArea {
1501 start: 0x10000000,
1502 end: 0x10000fff,
1503 }, crate::vm::vmem::MemoryArea {
1505 start: 0x1000,
1506 end: 0x1fff,
1507 }, 0o644,
1509 false,
1510 None,
1511 );
1512 manager.add_memory_map(map1).unwrap();
1513
1514 let found = manager.search_memory_map(0x1500);
1516 assert!(found.is_some());
1517
1518 let found_again = manager.search_memory_map(0x1500);
1520 assert!(found_again.is_some());
1521
1522 let map2 = VirtualMemoryMap::new(
1524 crate::vm::vmem::MemoryArea {
1525 start: 0x20000000,
1526 end: 0x20000fff,
1527 }, crate::vm::vmem::MemoryArea {
1529 start: 0x3000,
1530 end: 0x3fff,
1531 }, 0o644,
1533 false,
1534 None,
1535 );
1536 manager.add_memory_map(map2).unwrap();
1537
1538 let found_after_invalidation = manager.search_memory_map(0x1500);
1540 assert!(found_after_invalidation.is_some());
1541 assert_eq!(found_after_invalidation.unwrap().vmarea.start, 0x1000);
1542
1543 let found_new = manager.search_memory_map(0x3500);
1544 assert!(found_new.is_some());
1545 assert_eq!(found_new.unwrap().vmarea.start, 0x3000);
1546 }
1547
1548 #[test_case]
1549 fn test_add_memory_map_fixed_complete_overlap() {
1550 let manager = VirtualMemoryManager::new();
1551
1552 let initial_map = VirtualMemoryMap::new(
1554 crate::vm::vmem::MemoryArea {
1555 start: 0x10000000,
1556 end: 0x10000fff,
1557 }, crate::vm::vmem::MemoryArea {
1559 start: 0x2000,
1560 end: 0x2fff,
1561 }, 0o644,
1563 false,
1564 None,
1565 );
1566 manager.add_memory_map(initial_map).unwrap();
1567 assert_eq!(manager.memmap_len(), 1);
1568
1569 let fixed_map = VirtualMemoryMap::new(
1571 crate::vm::vmem::MemoryArea {
1572 start: 0x20000000,
1573 end: 0x20002fff,
1574 }, crate::vm::vmem::MemoryArea {
1576 start: 0x1000,
1577 end: 0x3fff,
1578 }, 0o755,
1580 true,
1581 None,
1582 );
1583
1584 let result = manager.add_memory_map_fixed(fixed_map);
1585 assert!(result.is_ok());
1586
1587 let overwritten_mappings = result.unwrap();
1588 assert_eq!(overwritten_mappings.len(), 1); assert_eq!(overwritten_mappings[0].vmarea.start, 0x2000);
1590
1591 assert_eq!(manager.memmap_len(), 1);
1593 let remaining_map = manager.search_memory_map(0x2000).unwrap();
1594 assert_eq!(remaining_map.vmarea.start, 0x1000);
1595 assert_eq!(remaining_map.vmarea.end, 0x3fff);
1596 assert_eq!(remaining_map.permissions, 0o755);
1597 assert_eq!(remaining_map.is_shared, true);
1598 }
1599
1600 #[test_case]
1601 fn test_add_memory_map_fixed_partial_overlap() {
1602 let manager = VirtualMemoryManager::new();
1603
1604 let initial_map = VirtualMemoryMap::new(
1606 crate::vm::vmem::MemoryArea {
1607 start: 0x10000000,
1608 end: 0x10001fff,
1609 }, crate::vm::vmem::MemoryArea {
1611 start: 0x1000,
1612 end: 0x2fff,
1613 }, 0o644,
1615 false,
1616 None,
1617 );
1618 manager.add_memory_map(initial_map).unwrap();
1619 assert_eq!(manager.memmap_len(), 1);
1620
1621 let fixed_map = VirtualMemoryMap::new(
1623 crate::vm::vmem::MemoryArea {
1624 start: 0x20000000,
1625 end: 0x20001fff,
1626 }, crate::vm::vmem::MemoryArea {
1628 start: 0x2000,
1629 end: 0x3fff,
1630 }, 0o755,
1632 true,
1633 None,
1634 );
1635
1636 let result = manager.add_memory_map_fixed(fixed_map);
1637 assert!(result.is_ok());
1638
1639 let overwritten_mappings = result.unwrap();
1640 assert_eq!(overwritten_mappings.len(), 1); assert_eq!(manager.memmap_len(), 2);
1644
1645 let remaining_original = manager.search_memory_map(0x1500).unwrap();
1647 assert_eq!(remaining_original.vmarea.start, 0x1000);
1648 assert_eq!(remaining_original.vmarea.end, 0x1fff);
1649 assert_eq!(remaining_original.permissions, 0o644);
1650
1651 let new_fixed = manager.search_memory_map(0x3000).unwrap();
1653 assert_eq!(new_fixed.vmarea.start, 0x2000);
1654 assert_eq!(new_fixed.vmarea.end, 0x3fff);
1655 assert_eq!(new_fixed.permissions, 0o755);
1656 assert_eq!(new_fixed.is_shared, true);
1657 }
1658
1659 #[test_case]
1660 fn test_add_memory_map_fixed_split_both_ends() {
1661 let manager = VirtualMemoryManager::new();
1662
1663 let initial_map = VirtualMemoryMap::new(
1665 crate::vm::vmem::MemoryArea {
1666 start: 0x10000000,
1667 end: 0x10003fff,
1668 }, crate::vm::vmem::MemoryArea {
1670 start: 0x1000,
1671 end: 0x4fff,
1672 }, 0o644,
1674 false,
1675 None,
1676 );
1677 manager.add_memory_map(initial_map).unwrap();
1678 assert_eq!(manager.memmap_len(), 1);
1679
1680 let fixed_map = VirtualMemoryMap::new(
1682 crate::vm::vmem::MemoryArea {
1683 start: 0x20000000,
1684 end: 0x20001fff,
1685 }, crate::vm::vmem::MemoryArea {
1687 start: 0x2000,
1688 end: 0x3fff,
1689 }, 0o755,
1691 true,
1692 None,
1693 );
1694
1695 let result = manager.add_memory_map_fixed(fixed_map);
1696 assert!(result.is_ok());
1697
1698 let overwritten_mappings = result.unwrap();
1699 assert_eq!(overwritten_mappings.len(), 1); assert_eq!(manager.memmap_len(), 3);
1703
1704 let before_part = manager.search_memory_map(0x1500).unwrap();
1706 assert_eq!(before_part.vmarea.start, 0x1000);
1707 assert_eq!(before_part.vmarea.end, 0x1fff);
1708 assert_eq!(before_part.permissions, 0o644);
1709
1710 let fixed_part = manager.search_memory_map(0x3000).unwrap();
1712 assert_eq!(fixed_part.vmarea.start, 0x2000);
1713 assert_eq!(fixed_part.vmarea.end, 0x3fff);
1714 assert_eq!(fixed_part.permissions, 0o755);
1715 assert_eq!(fixed_part.is_shared, true);
1716
1717 let after_part = manager.search_memory_map(0x4500).unwrap();
1719 assert_eq!(after_part.vmarea.start, 0x4000);
1720 assert_eq!(after_part.vmarea.end, 0x4fff);
1721 assert_eq!(after_part.permissions, 0o644);
1722 }
1723
1724 #[test_case]
1725 fn test_add_memory_map_fixed_no_overlap() {
1726 let manager = VirtualMemoryManager::new();
1727
1728 let initial_map = VirtualMemoryMap::new(
1730 crate::vm::vmem::MemoryArea {
1731 start: 0x10000000,
1732 end: 0x10000fff,
1733 }, crate::vm::vmem::MemoryArea {
1735 start: 0x1000,
1736 end: 0x1fff,
1737 }, 0o644,
1739 false,
1740 None,
1741 );
1742 manager.add_memory_map(initial_map).unwrap();
1743
1744 let fixed_map = VirtualMemoryMap::new(
1746 crate::vm::vmem::MemoryArea {
1747 start: 0x20000000,
1748 end: 0x20000fff,
1749 }, crate::vm::vmem::MemoryArea {
1751 start: 0x3000,
1752 end: 0x3fff,
1753 }, 0o755,
1755 true,
1756 None,
1757 );
1758
1759 let result = manager.add_memory_map_fixed(fixed_map);
1760 assert!(result.is_ok());
1761
1762 let overwritten_mappings = result.unwrap();
1763 assert_eq!(overwritten_mappings.len(), 0); assert_eq!(manager.memmap_len(), 2);
1767
1768 let first_map = manager.search_memory_map(0x1500);
1770 assert!(first_map.is_some());
1771 assert_eq!(first_map.unwrap().vmarea.start, 0x1000);
1772
1773 let second_map = manager.search_memory_map(0x3500);
1774 assert!(second_map.is_some());
1775 assert_eq!(second_map.unwrap().vmarea.start, 0x3000);
1776 }
1777
1778 #[test_case]
1779 fn test_lazy_mapping_and_unmapping() {
1780 let manager = VirtualMemoryManager::new();
1781 let vma = MemoryArea {
1782 start: 0x1000,
1783 end: 0x1fff,
1784 };
1785 let map = VirtualMemoryMap {
1786 vmarea: vma,
1787 pmarea: vma,
1788 permissions: 0o644,
1789 is_shared: false,
1790 owner: None,
1791 };
1792 let asid = alloc_virtual_address_space();
1793 manager.set_asid(asid);
1794 manager.add_memory_map(map).unwrap();
1795
1796 assert!(manager.lazy_map_page(0x1500).is_ok());
1798
1799 let translated_addr = manager.translate_vaddr(0x1500);
1802 assert!(translated_addr.is_some());
1803 assert_eq!(translated_addr.unwrap() & !(PAGE_SIZE - 1), 0x1000); manager.remove_memory_map_by_addr(0x1500);
1808
1809 let translated_addr_after_unmap = manager.translate_vaddr(0x1500);
1811 assert!(translated_addr_after_unmap.is_none());
1812 }
1813}