kernel/abi/linux/riscv64/
mm.rs

1use crate::{
2    abi::linux::riscv64::{
3        LinuxRiscv64Abi,
4        errno::{self, to_result},
5    },
6    arch::Trapframe,
7    environment::PAGE_SIZE,
8    mem::page::allocate_raw_pages,
9    task::mytask,
10    vm::vmem::{MemoryArea, VirtualMemoryMap},
11};
12use alloc::boxed::Box;
13
14pub fn sys_mmap(abi: &mut LinuxRiscv64Abi, trapframe: &mut Trapframe) -> usize {
15    // Linux mmap constants
16    const MAP_ANONYMOUS: usize = 0x20;
17    #[allow(dead_code)]
18    const MAP_FIXED: usize = 0x10;
19    #[allow(dead_code)]
20    const MAP_SHARED: usize = 0x01;
21
22    // Linux protection flags
23    const PROT_READ: usize = 0x1;
24    const PROT_WRITE: usize = 0x2;
25    const PROT_EXEC: usize = 0x4;
26
27    let task = match mytask() {
28        Some(task) => task,
29        None => return usize::MAX,
30    };
31
32    let addr = trapframe.get_arg(0);
33    let length = trapframe.get_arg(1);
34    let prot = trapframe.get_arg(2);
35    let flags = trapframe.get_arg(3);
36    let fd = trapframe.get_arg(4) as isize;
37    let offset = trapframe.get_arg(5);
38
39    // crate::println!(
40    //     "linux-riscv64: sys_mmap called: pc={:#x} addr={:#x} length={} prot={:#x} flags={:#x} fd={} offset={:#x}",
41    //     trapframe.epc,
42    //     addr,
43    //     length,
44    //     prot,
45    //     flags,
46    //     fd,
47    //     offset
48    // );
49
50    trapframe.increment_pc_next(task);
51
52    // crate::println!("sys_mmap: Step 1 - PC incremented");
53
54    // Input validation
55    if length == 0 {
56        // crate::println!("linux-riscv64: sys_mmap error: length == 0");
57        return usize::MAX; // -EINVAL
58    }
59
60    // crate::println!("sys_mmap: Step 2 - Length validated");
61
62    // Round up length to page boundary
63    let aligned_length = (length + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
64    let num_pages = aligned_length / PAGE_SIZE;
65
66    // crate::println!(
67    //     "sys_mmap: Step 3 - aligned_length={:#x}, num_pages={}",
68    //     aligned_length,
69    //     num_pages
70    // );
71
72    // Handle ANONYMOUS mappings specially
73    if (flags & MAP_ANONYMOUS) != 0 {
74        // crate::println!(
75        //     "linux-riscv64: sys_mmap - handling anonymous mapping (addr={:#x}, length={})",
76        //     addr,
77        //     aligned_length
78        // );
79        if fd != -1 {
80            // crate::println!("linux-riscv64: sys_mmap error: anonymous mapping with fd != -1 (fd={})", fd);
81            return to_result(errno::EINVAL);
82        }
83        let result = handle_anonymous_mapping(task, addr, aligned_length, num_pages, prot, flags);
84        // crate::println!("sys_mmap: RETURN {:#x} (anonymous mapping)", result);
85        return result;
86    }
87
88    // crate::println!("sys_mmap: Step 5 - Handling file-backed mapping");
89
90    // Handle file-backed mappings
91    if fd == -1 {
92        // crate::println!("sys_mmap: File-backed mapping requires valid file descriptor");
93        return to_result(errno::EINVAL);
94    }
95
96    // crate::println!("sys_mmap: Step 6 - Getting handle for fd={}", fd);
97
98    // Get handle from Linux fd
99    let handle = match abi.get_handle(fd as usize) {
100        Some(h) => {
101            // crate::println!("linux-riscv64: sys_mmap - fd {} -> handle {}", fd, h);
102            h
103        }
104        None => {
105            crate::println!(
106                "linux-riscv64: sys_mmap error - invalid file descriptor {}",
107                fd
108            );
109            return to_result(errno::EBADF);
110        }
111    };
112
113    // crate::println!("sys_mmap: Step 8 - Getting kernel object");
114
115    // Get kernel object from handle
116    let kernel_obj = match task.handle_table.get(handle) {
117        Some(obj) => {
118            // crate::println!("sys_mmap: Step 9 - Got kernel object");
119            obj
120        }
121        None => {
122            // crate::println!("sys_mmap: Invalid handle {}", handle);
123            return to_result(errno::EBADF);
124        }
125    };
126
127    // crate::println!("sys_mmap: Step 10 - Checking if object supports memory mapping");
128
129    // Check if object supports MemoryMappingOps
130    let memory_mappable = match kernel_obj.as_memory_mappable() {
131        Some(mappable) => {
132            // crate::println!("linux-riscv64: sys_mmap - object supports memory mapping");
133            mappable
134        }
135        None => {
136            crate::println!(
137                "linux-riscv64: sys_mmap error - object doesn't support memory mapping"
138            );
139            return to_result(errno::ENODEV);
140        }
141    };
142
143    // crate::println!("sys_mmap: Step 12 - Checking if object supports mmap");
144
145    // Check if the object supports mmap
146    if !memory_mappable.supports_mmap() {
147        crate::println!("sys_mmap: Object doesn't support mmap operation");
148        return to_result(errno::ENODEV);
149    }
150
151    // crate::println!(
152    //     "sys_mmap: Step 13 - Getting mapping info (offset={}, length={})",
153    //     offset,
154    //     length
155    // );
156    // crate::println!(
157    //     "linux-riscv64: sys_mmap - requesting mapping info (offset={:#x}, length={})",
158    //     offset,
159    //     length
160    // );
161
162    // Get mapping information from the object.
163    // Some backends reject length that extends beyond file size. We try to clamp
164    // to the largest mappable length (page down step) to avoid immediate failure.
165    let is_shared = (flags & MAP_SHARED) != 0;
166    let owner_name = memory_mappable.mmap_owner_name();
167    let should_log = owner_name.contains("xkb");
168    let mut ok_len = aligned_length;
169    let (paddr, obj_permissions, _obj_is_shared) = loop {
170        match memory_mappable.get_mapping_info_with(offset, ok_len, is_shared) {
171            Ok(info) => {
172                // crate::println!(
173                //     "linux-riscv64: sys_mmap - get_mapping_info returned paddr={:#x}, obj_perm={:#x}, is_shared={}, ok_len={}",
174                //     info.0, info.1, info.2, ok_len
175                // );
176                break info;
177            }
178            Err(e) => {
179                if ok_len >= PAGE_SIZE {
180                    ok_len -= PAGE_SIZE;
181                } else {
182                    ok_len = 0;
183                }
184                if ok_len == 0 {
185                    // crate::println!(
186                    //     "linux-riscv64: sys_mmap - object rejected requested length (offset={:#x}, length={}), no mappable bytes: {:?}",
187                    //     offset, length, e
188                    // );
189                    break (0, 0, false);
190                }
191            }
192        }
193    };
194
195    // Decide sharing semantics from flags (MAP_SHARED controls sharing)
196    let is_shared = (flags & MAP_SHARED) != 0;
197    // Determine final address
198    let is_fixed = (flags & MAP_FIXED) != 0;
199
200    let final_vaddr = if addr == 0 {
201        match task
202            .vm_manager
203            .find_unmapped_area(aligned_length, PAGE_SIZE)
204        {
205            Some(vaddr) => {
206                // crate::println!("linux-riscv64: sys_mmap - found unmapped area at {:#x}", vaddr);
207                vaddr
208            }
209            None => {
210                crate::println!(
211                    "linux-riscv64: sys_mmap error - no suitable unmapped area for length={}",
212                    aligned_length
213                );
214                return to_result(errno::ENOMEM);
215            }
216        }
217    } else {
218        if addr % PAGE_SIZE != 0 {
219            crate::println!(
220                "linux-riscv64: sys_mmap error - requested addr {:#x} not page aligned",
221                addr
222            );
223            return to_result(errno::EINVAL);
224        }
225
226        if !is_fixed {
227            // addr is a hint, check if it's available
228            let requested_end = addr + aligned_length - 1;
229            let has_overlap = task.vm_manager.with_memmaps(|mm| {
230                mm.values()
231                    .any(|map| !(requested_end < map.vmarea.start || addr > map.vmarea.end))
232            });
233
234            if has_overlap {
235                match task
236                    .vm_manager
237                    .find_unmapped_area(aligned_length, PAGE_SIZE)
238                {
239                    Some(vaddr) => {
240                        // crate::println!(
241                        //     "linux-riscv64: sys_mmap - hint address {:#x} overlaps, alternative {:#x}",
242                        //     addr,
243                        //     vaddr
244                        // );
245                        vaddr
246                    }
247                    None => {
248                        crate::println!(
249                            "linux-riscv64: sys_mmap error - no suitable unmapped area for length={}",
250                            aligned_length
251                        );
252                        return to_result(errno::ENOMEM);
253                    }
254                }
255            } else {
256                // crate::println!(
257                //     "linux-riscv64: sys_mmap - using hint address {:#x} (no overlap)",
258                //     addr
259                // );
260                addr
261            }
262        } else {
263            // crate::println!(
264            //     "linux-riscv64: sys_mmap - using fixed address {:#x} (MAP_FIXED set)",
265            //     addr
266            // );
267            addr
268        }
269    };
270
271    // crate::println!(
272    //     "linux-riscv64: sys_mmap - creating mapping vaddr={:#x} paddr={:#x} length={} perms_req={:#x} obj_perm={:#x} is_shared={}",
273    //     final_vaddr,
274    //     paddr,
275    //     aligned_length,
276    //     prot,
277    //     obj_permissions,
278    //     is_shared
279    // );
280
281    // crate::println!(
282    //     "sys_mmap: Step 19 - Creating memory areas (vaddr={:#x}, paddr={:#x})",
283    //     final_vaddr,
284    //     paddr
285    // );
286
287    // Create memory areas
288    let vmarea = MemoryArea::new(final_vaddr, final_vaddr + aligned_length - 1);
289
290    // crate::println!("sys_mmap: Step 20 - Calculating permissions");
291
292    // Convert protection flags to kernel permissions
293    // For private mappings, we use the requested prot directly
294    // For shared mappings, we need to respect object permissions
295    let mut prot_mask = 0;
296    if (prot & PROT_READ) != 0 {
297        prot_mask |= 0x1;
298    }
299    if (prot & PROT_WRITE) != 0 {
300        prot_mask |= 0x2;
301    }
302    if (prot & PROT_EXEC) != 0 {
303        prot_mask |= 0x4;
304    }
305
306    // For private mappings, use requested permissions directly
307    // For shared mappings, combine with object permissions
308    const MAP_PRIVATE: usize = 0x02;
309    let is_map_private_flag = (flags & MAP_PRIVATE) != 0;
310
311    let mut final_permissions = if is_map_private_flag {
312        // Private mapping: use requested permissions (will copy data)
313        prot_mask
314    } else {
315        // Shared mapping: must respect object permissions
316        obj_permissions & prot_mask
317    };
318
319    if prot != 0 {
320        final_permissions |= 0x08; // Access from user space (only if not PROT_NONE)
321    }
322
323    // Note: Tail-only permission adjustments (e.g., execute-only) are handled separately.
324
325    // crate::println!("sys_mmap: Step 21 - final_permissions={:#x} (prot_mask={:#x}, obj_perm={:#x}, is_private={})",
326    //     final_permissions, prot_mask, obj_permissions, is_map_private_flag);
327
328    // Determine whether the mapping was requested as MAP_PRIVATE
329
330    // crate::println!("sys_mmap: Step 22 - is_map_private_flag={}, is_shared={}", is_map_private_flag, is_shared);
331
332    // If this is a file-backed private mapping, allocate private pages now and copy contents
333    if is_map_private_flag && !is_shared {
334        // crate::println!("sys_mmap: Step 23 - Allocating pages for private mapping");
335        // Allocate pages for the private copy
336        let pages = allocate_raw_pages(num_pages);
337        // crate::println!("sys_mmap: Step 24 - Allocated pages at {:#x}", pages as usize);
338        let pages_ptr = pages as usize;
339        let private_pmarea = MemoryArea::new(pages_ptr, pages_ptr + aligned_length - 1);
340
341        // crate::println!("sys_mmap: Step 25 - Creating VirtualMemoryMap");
342        let vm_map = VirtualMemoryMap::new(private_pmarea, vmarea, final_permissions, false, None);
343
344        // crate::println!("sys_mmap: Step 26 - Adding memory map to VM manager (is_fixed={})", is_fixed);
345
346        // Use add_memory_map_fixed only if MAP_FIXED is set, otherwise use add_memory_map
347        let map_result = if is_fixed {
348            task.vm_manager
349                .add_memory_map_fixed(vm_map)
350                .map(|removed| Some(removed))
351        } else {
352            task.vm_manager.add_memory_map(vm_map).map(|_| None)
353        };
354
355        match map_result {
356            Ok(removed_mappings_opt) => {
357                // let removed_count = removed_mappings_opt.as_ref().map(|r| r.len()).unwrap_or(0);
358                // crate::println!("sys_mmap: Step 27 - Memory map added successfully, processing {} removed mappings", removed_count);
359                // For private mappings we do not notify the original object via on_mapped
360                // because the new mapping uses private physical pages and the object
361                // is not the owner of those pages.
362
363                // Notify owners for any removed mappings (only shared ones)
364                // crate::println!("sys_mmap: Step 28 - Notifying owners of removed mappings");
365                if let Some(removed_mappings) = &removed_mappings_opt {
366                    for removed_map in removed_mappings {
367                        if removed_map.is_shared {
368                            if let Some(owner_weak) = &removed_map.owner {
369                                if let Some(owner) = owner_weak.upgrade() {
370                                    owner.on_unmapped(
371                                        removed_map.vmarea.start,
372                                        removed_map.vmarea.size(),
373                                    );
374                                }
375                            }
376                        }
377                    }
378                }
379
380                // Clean up managed pages from removed mappings
381                // crate::println!("sys_mmap: Step 29 - Cleaning up managed pages from removed mappings");
382                if let Some(removed_mappings) = removed_mappings_opt {
383                    for removed_map in removed_mappings {
384                        if !removed_map.is_shared {
385                            let mapping_start = removed_map.vmarea.start;
386                            let mapping_end = removed_map.vmarea.end;
387                            let num_removed_pages =
388                                (mapping_end - mapping_start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
389                            for i in 0..num_removed_pages {
390                                let page_vaddr = mapping_start + i * PAGE_SIZE;
391                                if let Some(_managed_page) = task.remove_managed_page(page_vaddr) {
392                                    // freed when dropped
393                                }
394                            }
395                        }
396                    }
397                }
398
399                // Zero-initialize entire region, then copy only the mappable portion (ok_len)
400                unsafe {
401                    core::ptr::write_bytes(pages as *mut u8, 0u8, aligned_length);
402                }
403                if ok_len > 0 {
404                    let copy_len = core::cmp::min(ok_len, aligned_length);
405                    unsafe {
406                        core::ptr::copy_nonoverlapping(
407                            paddr as *const u8,
408                            pages as *mut u8,
409                            copy_len,
410                        );
411                    }
412                }
413
414                // Add managed pages for the task so they are freed on task exit
415                // crate::println!("sys_mmap: Step 31 - Adding {} managed pages", num_pages);
416                for i in 0..num_pages {
417                    let page_vaddr = final_vaddr + i * crate::environment::PAGE_SIZE;
418                    let page_ptr = unsafe { (pages as *mut crate::mem::page::Page).add(i) };
419                    task.add_managed_page(crate::task::ManagedPage {
420                        vaddr: page_vaddr,
421                        page: unsafe { Box::from_raw(page_ptr) },
422                    });
423                }
424
425                // crate::println!("sys_mmap: Step 32 - Private mapping complete, returning {:#x}", final_vaddr);
426
427                // Print current memory map for debugging
428                // crate::println!("=== Memory Map After mmap ===");
429                // for map in task.vm_manager.memmap_iter() {
430                //     crate::println!("  VA: {:#x}-{:#x} -> PA: {:#x}-{:#x} (perm: {:#x}, shared: {})",
431                //         map.vmarea.start, map.vmarea.end,
432                //         map.pmarea.start, map.pmarea.end,
433                //         map.permissions, map.is_shared);
434                // }
435                // crate::println!("=============================");
436
437                // crate::println!("sys_mmap: RETURN {:#x} (private file-backed)", final_vaddr);
438                final_vaddr
439            }
440            Err(_) => {
441                // crate::println!("sys_mmap: Step 33 - Failed to add private mapping");
442                // Free allocated pages to avoid leak
443                crate::mem::page::free_raw_pages(pages, num_pages);
444                // crate::println!("sys_mmap: Failed to add private mapping");
445                to_result(errno::ENOMEM)
446            }
447        }
448    } else {
449        // crate::println!("sys_mmap: Step 33 - Shared or object-backed mapping path");
450        // For MAP_SHARED (or object-backed) mappings, if the backend couldn't provide
451        // the full requested length, only map the largest page-aligned prefix and leave
452        // the tail unmapped so accesses fault (Linux would raise SIGBUS beyond EOF).
453        if paddr == 0 && ok_len == 0 {
454            // Nothing mappable at all (e.g., offset beyond EOF)
455            return to_result(errno::EINVAL);
456        }
457
458        let ok_len_aligned = (ok_len / PAGE_SIZE) * PAGE_SIZE;
459        if ok_len_aligned == 0 {
460            // Partial (< PAGE_SIZE) tail only is not representable as shared mapping safely
461            // without a COW-like helper; reject for now.
462            crate::println!(
463                "linux-riscv64: sys_mmap - only subpage tail available; rejecting shared mapping"
464            );
465            return to_result(errno::EINVAL);
466        }
467
468        // Shrink vm/pm areas to the mappable prefix when necessary
469        let vmarea = MemoryArea::new(final_vaddr, final_vaddr + ok_len_aligned - 1);
470        let pmarea = MemoryArea::new(paddr, paddr + ok_len_aligned - 1);
471
472        // Create virtual memory map with weak reference to the object (shared/object-backed)
473        let owner = kernel_obj.as_memory_mappable_weak();
474        let vm_map = VirtualMemoryMap::new(pmarea, vmarea, final_permissions, is_shared, owner);
475
476        // Add the mapping to VM manager
477        // crate::println!("sys_mmap: Step 35 - Adding memory map to VM manager (is_fixed={})", is_fixed);
478
479        let map_result = if is_fixed {
480            task.vm_manager
481                .add_memory_map_fixed(vm_map)
482                .map(|removed| Some(removed))
483        } else {
484            task.vm_manager.add_memory_map(vm_map).map(|_| None)
485        };
486
487        match map_result {
488            Ok(removed_mappings_opt) => {
489                // Notify the object that mapping was created
490                memory_mappable.on_mapped(final_vaddr, paddr, aligned_length, offset);
491
492                // First, notify object owners about removed mappings
493                if let Some(removed_mappings) = &removed_mappings_opt {
494                    for removed_map in removed_mappings {
495                        if removed_map.is_shared {
496                            if let Some(owner_weak) = &removed_map.owner {
497                                if let Some(owner) = owner_weak.upgrade() {
498                                    owner.on_unmapped(
499                                        removed_map.vmarea.start,
500                                        removed_map.vmarea.size(),
501                                    );
502                                }
503                            }
504                        }
505                    }
506                }
507
508                // Then, handle managed page cleanup (MMU cleanup is already handled by VmManager.add_memory_map_fixed)
509                if let Some(removed_mappings) = removed_mappings_opt {
510                    for removed_map in removed_mappings {
511                        // Remove managed pages only for private mappings
512                        if !removed_map.is_shared {
513                            let mapping_start = removed_map.vmarea.start;
514                            let mapping_end = removed_map.vmarea.end;
515                            let num_removed_pages =
516                                (mapping_end - mapping_start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
517
518                            for i in 0..num_removed_pages {
519                                let page_vaddr = mapping_start + i * PAGE_SIZE;
520                                if let Some(_managed_page) = task.remove_managed_page(page_vaddr) {
521                                    // The managed page is automatically freed when dropped
522                                }
523                            }
524                        }
525                    }
526                }
527
528                // crate::println!("sys_mmap: RETURN {:#x} (shared/object-backed)", final_vaddr);
529                final_vaddr
530            }
531            Err(_) => {
532                // crate::println!("sys_mmap: Failed to add memory mapping");
533                to_result(errno::ENOMEM)
534            }
535        }
536    }
537}
538
539/// Handle anonymous memory mapping based on scarlet's implementation
540fn handle_anonymous_mapping(
541    task: &crate::task::Task,
542    vaddr: usize,
543    aligned_length: usize,
544    num_pages: usize,
545    prot: usize,
546    flags: usize,
547) -> usize {
548    // Linux protection flags
549    const PROT_READ: usize = 0x1;
550    const PROT_WRITE: usize = 0x2;
551    const PROT_EXEC: usize = 0x4;
552    const MAP_FIXED: usize = 0x10;
553
554    // For anonymous mappings, decide shareable based on flags
555    const MAP_SHARED: usize = 0x01;
556    let is_shared = (flags & MAP_SHARED) != 0;
557
558    // Determine final address - if vaddr is 0, find an unmapped area
559    let final_vaddr = if vaddr == 0 {
560        match task
561            .vm_manager
562            .find_unmapped_area(aligned_length, PAGE_SIZE)
563        {
564            Some(addr) => {
565                // crate::println!("sys_mmap (anonymous): Found unmapped area at {:#x}", addr);
566                addr
567            }
568            None => {
569                // crate::println!("sys_mmap (anonymous): No suitable address found");
570                return to_result(errno::ENOMEM);
571            }
572        }
573    } else {
574        // If vaddr is non-zero and MAP_FIXED is not set, treat it as a hint
575        let is_fixed = (flags & MAP_FIXED) != 0;
576        if !is_fixed {
577            // Check if the requested range is available
578            let requested_end = vaddr + aligned_length - 1;
579            let has_overlap = task.vm_manager.with_memmaps(|mm| {
580                mm.values()
581                    .any(|map| !(requested_end < map.vmarea.start || vaddr > map.vmarea.end))
582            });
583
584            if has_overlap {
585                match task
586                    .vm_manager
587                    .find_unmapped_area(aligned_length, PAGE_SIZE)
588                {
589                    Some(addr) => {
590                        // crate::println!("sys_mmap (anonymous): Hint address {:#x} occupied, using {:#x}", vaddr, addr);
591                        addr
592                    }
593                    None => {
594                        // crate::println!("sys_mmap (anonymous): No suitable address found");
595                        return to_result(errno::ENOMEM);
596                    }
597                }
598            } else {
599                // crate::println!("sys_mmap (anonymous): Using hint address {:#x}", vaddr);
600                vaddr
601            }
602        } else {
603            // crate::println!("sys_mmap (anonymous): Using fixed address {:#x}", vaddr);
604            vaddr
605        }
606    };
607
608    // For anonymous mappings, allocate physical memory directly
609    let pages = allocate_raw_pages(num_pages);
610    let pages_ptr = pages as usize;
611
612    // Convert protection flags to kernel permissions
613    let mut permissions = 0;
614    if prot != 0 {
615        permissions |= 0x08; // Access from user space (only if not PROT_NONE)
616        if (prot & PROT_READ) != 0 {
617            permissions |= 0x1; // Readable
618        }
619        if (prot & PROT_WRITE) != 0 {
620            permissions |= 0x2; // Writable
621        }
622        if (prot & PROT_EXEC) != 0 {
623            permissions |= 0x4; // Executable
624        }
625    }
626
627    // Create memory areas
628    let vmarea = MemoryArea::new(final_vaddr, final_vaddr + aligned_length - 1);
629    let pmarea = MemoryArea::new(pages_ptr, pages_ptr + aligned_length - 1);
630
631    // Create virtual memory map
632    let vm_map = VirtualMemoryMap::new(pmarea, vmarea, permissions, is_shared, None); // Anonymous mappings have no owner
633
634    // Use add_memory_map_fixed for both FIXED and non-FIXED mappings to handle overlaps consistently
635    match task.vm_manager.add_memory_map_fixed(vm_map) {
636        Ok(removed_mappings) => {
637            // First, process notifications for object owners
638            for removed_map in &removed_mappings {
639                if removed_map.is_shared {
640                    if let Some(owner_weak) = &removed_map.owner {
641                        if let Some(owner) = owner_weak.upgrade() {
642                            owner.on_unmapped(removed_map.vmarea.start, removed_map.vmarea.size());
643                        }
644                    }
645                }
646            }
647
648            // Then, handle managed page cleanup (MMU cleanup is already handled by VmManager.add_memory_map_fixed)
649            for removed_map in removed_mappings {
650                // Remove managed pages only for private mappings
651                if !removed_map.is_shared {
652                    let mapping_start = removed_map.vmarea.start;
653                    let mapping_end = removed_map.vmarea.end;
654                    let num_removed_pages =
655                        (mapping_end - mapping_start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
656
657                    for i in 0..num_removed_pages {
658                        let page_vaddr = mapping_start + i * PAGE_SIZE;
659                        if let Some(_managed_page) = task.remove_managed_page(page_vaddr) {
660                            // The managed page is automatically freed when dropped
661                        }
662                    }
663                }
664            }
665
666            // Add managed pages for the new anonymous mapping
667            for i in 0..num_pages {
668                let page_vaddr = final_vaddr + i * crate::environment::PAGE_SIZE;
669                let page_ptr = unsafe { (pages as *mut crate::mem::page::Page).add(i) };
670                task.add_managed_page(crate::task::ManagedPage {
671                    vaddr: page_vaddr,
672                    page: unsafe { Box::from_raw(page_ptr) },
673                });
674            }
675
676            final_vaddr
677        }
678        Err(_) => {
679            // Free allocated pages on error to avoid leak
680            crate::mem::page::free_raw_pages(pages, num_pages);
681            to_result(errno::ENOMEM)
682        }
683    }
684}
685
686pub fn sys_mprotect(_abi: &mut LinuxRiscv64Abi, trapframe: &mut Trapframe) -> usize {
687    // Linux protection flags
688    const PROT_READ: usize = 0x1;
689    const PROT_WRITE: usize = 0x2;
690    const PROT_EXEC: usize = 0x4;
691
692    let task = match mytask() {
693        Some(task) => task,
694        None => return usize::MAX,
695    };
696
697    let addr = trapframe.get_arg(0);
698    let length = trapframe.get_arg(1);
699    let prot = trapframe.get_arg(2);
700
701    // crate::println!("sys_mprotect: addr={:#x}, length={}, prot={:#x}", addr, length, prot);
702
703    trapframe.increment_pc_next(task);
704    // return 0;
705
706    // Input validation
707    if length == 0 || addr % PAGE_SIZE != 0 {
708        return usize::MAX; // -EINVAL
709    }
710
711    // Round up length to page boundary
712    let aligned_length = (length + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
713    let num_pages = aligned_length / PAGE_SIZE;
714
715    // Check if all pages in the range are mapped
716    for i in 0..num_pages {
717        let page_addr = addr + i * PAGE_SIZE;
718        if task.vm_manager.translate_vaddr(page_addr).is_none() {
719            // crate::println!("sys_mprotect: Unmapped page at address {:#x}", page_addr);
720            return usize::MAX; // -ENOMEM
721        }
722    }
723
724    // Get the original mapping to determine properties
725    let original_mapping = match task.vm_manager.search_memory_map(addr) {
726        Some(map) => map,
727        None => {
728            // crate::println!("sys_mprotect: No memory mapping found at address {:#x}", addr);
729            return usize::MAX; // -ENOMEM
730        }
731    };
732
733    // Convert Linux protection flags to kernel permissions
734    let mut new_permissions = 0;
735    if prot != 0 {
736        new_permissions |= 0x08; // Access from user space (only if not PROT_NONE)
737        if (prot & PROT_READ) != 0 {
738            new_permissions |= 0x1; // Readable
739        }
740        if (prot & PROT_WRITE) != 0 {
741            new_permissions |= 0x2; // Writable
742        }
743        if (prot & PROT_EXEC) != 0 {
744            new_permissions |= 0x4; // Executable
745        }
746    }
747
748    // Note: Do not globally enforce execute-only here.
749
750    // For file-backed mappings, check object permissions
751    if let Some(owner_weak) = &original_mapping.owner {
752        if let Some(owner) = owner_weak.upgrade() {
753            let offset = addr - original_mapping.vmarea.start;
754            if let Ok((_, obj_permissions, _)) = owner.get_mapping_info(offset, aligned_length) {
755                if (new_permissions & obj_permissions) != (new_permissions & 0x7) {
756                    // crate::println!("sys_mprotect: Requested permissions exceed object permissions");
757                    return usize::MAX; // -EACCES
758                }
759            }
760        }
761    }
762
763    // Calculate physical address for the new mapping
764    let offset_in_mapping = addr - original_mapping.vmarea.start;
765    let new_paddr = original_mapping.pmarea.start + offset_in_mapping;
766
767    // Create the new memory mapping with updated permissions
768    let new_map = VirtualMemoryMap::new(
769        MemoryArea::new(new_paddr, new_paddr + aligned_length - 1),
770        MemoryArea::new(addr, addr + aligned_length - 1),
771        new_permissions,
772        original_mapping.is_shared,
773        original_mapping.owner.clone(),
774    );
775
776    // Use add_memory_map_fixed to handle splitting and overlaps automatically
777    match task.vm_manager.add_memory_map_fixed(new_map) {
778        Ok(_removed_mappings) => {
779            // crate::println!("sys_mprotect: Successfully updated permissions for {:#x}-{:#x}",
780            //    addr, addr + aligned_length - 1);
781
782            0 // Success
783        }
784        Err(_) => {
785            // crate::println!("sys_mprotect: Failed to update memory mapping: {}", e);
786            usize::MAX // -EFAULT
787        }
788    }
789}
790
791pub fn sys_munmap(_abi: &mut LinuxRiscv64Abi, trapframe: &mut Trapframe) -> usize {
792    let task = match mytask() {
793        Some(task) => task,
794        None => return usize::MAX,
795    };
796
797    let vaddr = trapframe.get_arg(0);
798    let length = trapframe.get_arg(1);
799
800    trapframe.increment_pc_next(task);
801
802    // Input validation
803    if length == 0 || vaddr % PAGE_SIZE != 0 {
804        return usize::MAX; // -EINVAL
805    }
806
807    if vaddr == 0 {
808        crate::println!("sys_munmap: Cannot unmap null address");
809        return usize::MAX; // -EINVAL
810    }
811
812    // Remove the mapping regardless of whether it's anonymous or object-based
813    if let Some(removed_map) = task.vm_manager.remove_memory_map_by_addr(vaddr) {
814        // Notify the object owner if available (for object-based mappings)
815        if let Some(owner_weak) = &removed_map.owner {
816            if removed_map.is_shared {
817                if let Some(owner) = owner_weak.upgrade() {
818                    owner.on_unmapped(vaddr, length);
819                }
820            }
821        }
822
823        // Remove managed pages only for private mappings
824        // Shared mappings should not have their physical pages freed here
825        // as they might be used by other processes
826        // (MMU cleanup is already handled by VmManager.remove_memory_map_by_addr)
827        if !removed_map.is_shared {
828            let mapping_start = removed_map.vmarea.start;
829            let mapping_end = removed_map.vmarea.end;
830            let num_pages = (mapping_end - mapping_start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
831
832            for i in 0..num_pages {
833                let page_vaddr = mapping_start + i * PAGE_SIZE;
834                if let Some(_managed_page) = task.remove_managed_page(page_vaddr) {
835                    // The managed page is automatically freed when dropped
836                }
837            }
838        }
839
840        0
841    } else {
842        usize::MAX // No mapping found at this address
843    }
844}
845
846// TODO: Migrate object-backed MAP_PRIVATE mappings to delayed Copy-On-Write (COW).
847// Motivation:
848// - Currently MAP_PRIVATE file-backed mappings in the Linux ABI handler may allocate
849//   private copies eagerly. For large mappings or read-mostly workloads this is
850//   inefficient. Delayed COW copies pages only on first write, saving memory and CPU.
851//
852// High-level plan (implementation checklist):
853// 1) Syscall layer (this file): when a user requests MAP_PRIVATE, mark the new
854//    VirtualMemoryMap with a `cow` flag and avoid performing an immediate copy.
855//    - Install the mapping with write permission cleared so stores will trap.
856//    - Keep the owner reference so reads can still source data from the object.
857//
858// 2) VM representation: ensure VirtualMemoryMap has a boolean `cow` field and
859//    the field is propagated to all mapping creation sites in the kernel.
860//
861// 3) Trap handling: modify the architecture trap/exception handler so that a
862//    store (write) page fault will check the `cow` flag on the mapping and call
863//    a dedicated per-page COW handler instead of the generic lazy mapping path.
864//
865// 4) Per-page COW handler (Task::handle_cow_page): allocate a new physical page,
866//    copy the contents from the original backing paddr into it, replace only the
867//    single faulting page in the mapping (e.g. via vm_manager.add_memory_map_fixed
868//    with a one-page mapping), map it immediately, and register it as a managed
869//    page of the task so it will be freed on exit.
870//
871// 5) Fork/clone semantics: preserve COW semantics across fork/clone so parent and
872//    child share pages until either writes; ensure managed_pages bookkeeping is
873//    adjusted so that only private copies are freed by the owner task.
874//
875// 6) Tests and validation:
876//    - Integration tests for two tasks mapping the same file MAP_PRIVATE and
877//      verifying that a write by one task creates a private copy while the other
878//      retains original contents.
879//    - Tests for fork/clone + MAP_PRIVATE behavior and corner cases (partial-page
880//      writes, overlapping mappings, munmap after COW).
881//
882// 7) Documentation: update rustdoc and design documents describing the `cow`
883//    flag, the runtime behavior, and which object types are eligible for COW.
884//
885// Acceptance criteria:
886// - MAP_PRIVATE mappings are created without eager copying (cow=true and write bit
887//   cleared).
888// - On first write to a page, only that page is copied and the writer receives a
889//   private writable page while others continue to see the original data.
890// - Tests pass and there are no page leaks.
891//
892// Notes:
893// - Some object types (device MMIO, special backing) cannot be COW'ed safely;
894//   in such cases the syscall should either fall back to eager copy, reject the
895//   mapping, or require explicit flags. Document these cases.