kernel/fs/vfs_v2/drivers/fat32/
node.rs

1//! FAT32 VFS Node Implementation
2//!
3//! This module implements the VfsNode trait for FAT32 filesystem nodes.
4//! It provides the interface between the VFS layer and FAT32-specific node data.
5
6use alloc::{
7    boxed::Box,
8    collections::BTreeMap,
9    string::String,
10    sync::{Arc, Weak},
11    vec,
12    vec::Vec,
13};
14use core::{any::Any, fmt::Debug};
15use spin::{Mutex, rwlock::RwLock};
16
17use crate::fs::{
18    FileMetadata, FileObject, FilePermission, FileSystemError, FileSystemErrorKind, FileType,
19    SeekFrom,
20};
21use crate::object::capability::{ControlOps, MemoryMappingOps, StreamError, StreamOps};
22
23use crate::environment::PAGE_SIZE;
24use crate::fs::vfs_v2::cache::PageCacheCapable;
25use crate::fs::vfs_v2::core::{FileSystemOperations, VfsNode};
26use crate::mem::{page::allocate_boxed_pages, page_cache::PageCacheManager};
27
28/// FAT32 filesystem node
29///
30/// This structure represents a file or directory in the FAT32 filesystem.
31/// It implements the VfsNode trait to integrate with the VFS v2 architecture.
32/// Content is read/written directly from/to the block device, not stored in memory.
33pub struct Fat32Node {
34    /// Node name
35    pub name: RwLock<String>,
36    /// File type (file or directory)
37    pub file_type: RwLock<FileType>,
38    /// File metadata
39    pub metadata: RwLock<FileMetadata>,
40    /// Child nodes (for directories) - cached, but loaded from disk on demand
41    pub children: RwLock<BTreeMap<String, Arc<dyn VfsNode>>>,
42    /// Parent node (weak reference to avoid cycles)
43    pub parent: RwLock<Option<Weak<Fat32Node>>>,
44    /// Reference to filesystem
45    pub filesystem: RwLock<Option<Weak<dyn FileSystemOperations>>>,
46    /// Starting cluster number in FAT32
47    pub cluster: RwLock<u32>,
48    /// Directory entries loaded flag (for directories)
49    pub children_loaded: RwLock<bool>,
50}
51
52impl Debug for Fat32Node {
53    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
54        f.debug_struct("Fat32Node")
55            .field("name", &self.name.read())
56            .field("file_type", &self.file_type.read())
57            .field("metadata", &self.metadata.read())
58            .field("cluster", &self.cluster.read())
59            .field("children_loaded", &self.children_loaded.read())
60            .field(
61                "parent",
62                &self.parent.read().as_ref().map(|p| p.strong_count()),
63            )
64            .finish()
65    }
66}
67
68impl Fat32Node {
69    /// Create a new regular file node
70    pub fn new_file(name: String, file_id: u64, cluster: u32) -> Self {
71        Self {
72            name: RwLock::new(name),
73            file_type: RwLock::new(FileType::RegularFile),
74            metadata: RwLock::new(FileMetadata {
75                file_type: FileType::RegularFile,
76                size: 0,
77                permissions: FilePermission {
78                    read: true,
79                    write: true,
80                    execute: false,
81                },
82                created_time: 0, // TODO: Convert FAT32 timestamps
83                modified_time: 0,
84                accessed_time: 0,
85                file_id,
86                link_count: 1,
87            }),
88            children: RwLock::new(BTreeMap::new()),
89            parent: RwLock::new(None),
90            filesystem: RwLock::new(None),
91            cluster: RwLock::new(cluster),
92            children_loaded: RwLock::new(false),
93        }
94    }
95
96    /// Create a new directory node
97    pub fn new_directory(name: String, file_id: u64, cluster: u32) -> Self {
98        Self {
99            name: RwLock::new(name),
100            file_type: RwLock::new(FileType::Directory),
101            metadata: RwLock::new(FileMetadata {
102                file_type: FileType::Directory,
103                size: 0,
104                permissions: FilePermission {
105                    read: true,
106                    write: true,
107                    execute: true, // Directories need execute permission for traversal
108                },
109                created_time: 0, // TODO: Convert FAT32 timestamps
110                modified_time: 0,
111                accessed_time: 0,
112                file_id,
113                link_count: 1,
114            }),
115            children: RwLock::new(BTreeMap::new()),
116            parent: RwLock::new(None),
117            filesystem: RwLock::new(None),
118            cluster: RwLock::new(cluster),
119            children_loaded: RwLock::new(false),
120        }
121    }
122
123    /// Set the parent node (weak reference)
124    pub fn set_parent(&self, parent: Option<Weak<Fat32Node>>) {
125        *self.parent.write() = parent;
126    }
127
128    /// Set the filesystem reference
129    pub fn set_filesystem(&self, filesystem: Weak<dyn FileSystemOperations>) {
130        *self.filesystem.write() = Some(filesystem);
131    }
132
133    /// Get the starting cluster number
134    pub fn cluster(&self) -> u32 {
135        *self.cluster.read()
136    }
137
138    /// Set the starting cluster number
139    pub fn set_cluster(&self, cluster: u32) {
140        *self.cluster.write() = cluster;
141    }
142}
143
144impl VfsNode for Fat32Node {
145    fn id(&self) -> u64 {
146        self.metadata.read().file_id
147    }
148
149    fn filesystem(&self) -> Option<Weak<dyn FileSystemOperations>> {
150        self.filesystem.read().clone()
151    }
152
153    fn file_type(&self) -> Result<FileType, FileSystemError> {
154        Ok(self.file_type.read().clone())
155    }
156
157    fn metadata(&self) -> Result<FileMetadata, FileSystemError> {
158        Ok(self.metadata.read().clone())
159    }
160
161    fn as_any(&self) -> &dyn Any {
162        self
163    }
164}
165
166impl Clone for Fat32Node {
167    fn clone(&self) -> Self {
168        Self {
169            name: RwLock::new(self.name.read().clone()),
170            file_type: RwLock::new(self.file_type.read().clone()),
171            metadata: RwLock::new(self.metadata.read().clone()),
172            children: RwLock::new(self.children.read().clone()),
173            parent: RwLock::new(self.parent.read().clone()),
174            filesystem: RwLock::new(self.filesystem.read().clone()),
175            cluster: RwLock::new(*self.cluster.read()),
176            children_loaded: RwLock::new(*self.children_loaded.read()),
177        }
178    }
179}
180
181/// FAT32 file object for regular files
182pub struct Fat32FileObject {
183    /// Reference to the FAT32 node
184    node: Arc<Fat32Node>,
185    /// Current file position
186    position: RwLock<usize>,
187    /// Parent directory cluster (for directory entry updates)
188    parent_cluster: u32,
189    /// File-level dirty flag to avoid unnecessary writeback
190    dirty: Mutex<bool>,
191    /// Page-aligned backing for mmap operations (lazy initialized)
192    mmap_backing: RwLock<Option<Box<[crate::mem::page::Page]>>>,
193    /// Byte length of the mmap backing (file size snapshot)
194    mmap_backing_len: Mutex<usize>,
195    /// Active mmap ranges keyed by starting virtual address
196    mmap_ranges: RwLock<BTreeMap<usize, MmapRange>>,
197}
198
199impl Fat32FileObject {
200    pub fn new(node: Arc<Fat32Node>, parent_cluster: u32) -> Self {
201        Self {
202            node,
203            position: RwLock::new(0),
204            parent_cluster,
205            dirty: Mutex::new(false),
206            mmap_backing: RwLock::new(None),
207            mmap_backing_len: Mutex::new(0),
208            mmap_ranges: RwLock::new(BTreeMap::new()),
209        }
210    }
211
212    /// Write back current page-cache-backed content to disk if dirty.
213    fn sync_to_disk(&self) -> Result<(), StreamError> {
214        let size = self.node.metadata.read().size;
215        if size > 0 {
216            let dirty = self.dirty.lock();
217            if !*dirty {
218                return Ok(());
219            }
220        }
221        if size == 0 {
222            let fs = self
223                .node
224                .filesystem
225                .read()
226                .as_ref()
227                .and_then(|w| w.upgrade())
228                .ok_or(StreamError::Closed)?;
229            let fat32_fs = fs
230                .as_any()
231                .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
232                .ok_or(StreamError::NotSupported)?;
233            let current_cluster = self.node.cluster();
234            if current_cluster != 0 {
235                self.update_directory_entry(fat32_fs, current_cluster, 0)?;
236            }
237            *self.dirty.lock() = false;
238            return Ok(());
239        }
240
241        let fs = self
242            .node
243            .filesystem
244            .read()
245            .as_ref()
246            .and_then(|weak| weak.upgrade())
247            .ok_or(StreamError::Closed)?;
248        let fat32_fs = fs
249            .as_any()
250            .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
251            .ok_or(StreamError::NotSupported)?;
252
253        let mut buffer = Vec::with_capacity(size);
254        buffer.resize(size, 0);
255        let cache_id = self.cache_id();
256        let page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
257        for page_index in 0..(page_count as u64) {
258            let start = page_index as usize * PAGE_SIZE;
259            let len = core::cmp::min(PAGE_SIZE, size.saturating_sub(start));
260            if len == 0 {
261                break;
262            }
263            let pinned = if let Some(p) = PageCacheManager::global().try_pin(cache_id, page_index) {
264                p
265            } else {
266                PageCacheManager::global()
267                    .pin_or_load(cache_id, page_index, |paddr| {
268                        let current_cluster = self.node.cluster();
269                        if current_cluster == 0 {
270                            unsafe {
271                                core::ptr::write_bytes(paddr as *mut u8, 0, PAGE_SIZE);
272                            }
273                            return Ok(());
274                        }
275                        let data = fat32_fs
276                            .read_file_content(current_cluster, size)
277                            .map_err(|_| "io error")?;
278                        let start = page_index as usize * PAGE_SIZE;
279                        let len = core::cmp::min(PAGE_SIZE, data.len().saturating_sub(start));
280                        unsafe {
281                            core::ptr::write_bytes(paddr as *mut u8, 0, PAGE_SIZE);
282                            if len > 0 {
283                                core::ptr::copy_nonoverlapping(
284                                    data.as_ptr().add(start),
285                                    paddr as *mut u8,
286                                    len,
287                                );
288                            }
289                        }
290                        Ok(())
291                    })
292                    .map_err(|_| StreamError::IoError)?
293            };
294
295            unsafe {
296                core::ptr::copy_nonoverlapping(
297                    pinned.paddr() as *const u8,
298                    buffer.as_mut_ptr().add(start),
299                    len,
300                );
301            }
302        }
303
304        let current_cluster = self.node.cluster();
305        let old_cache_id = self.cache_id();
306        let new_cluster = if buffer.is_empty() {
307            0
308        } else {
309            fat32_fs
310                .write_file_content(current_cluster, &buffer)
311                .map_err(|_| StreamError::IoError)?
312        };
313
314        if new_cluster != current_cluster {
315            *self.node.cluster.write() = new_cluster;
316            {
317                let mut meta = self.node.metadata.write();
318                if new_cluster != 0 {
319                    meta.file_id = new_cluster as u64;
320                }
321            }
322        } else if current_cluster != 0 {
323            let mut meta = self.node.metadata.write();
324            if meta.file_id != current_cluster as u64 {
325                meta.file_id = current_cluster as u64;
326            }
327        }
328
329        self.update_directory_entry(fat32_fs, new_cluster, buffer.len())?;
330        PageCacheManager::global().invalidate(old_cache_id);
331
332        {
333            let mut metadata = self.node.metadata.write();
334            metadata.size = buffer.len();
335        }
336
337        *self.dirty.lock() = false;
338        Ok(())
339    }
340
341    fn ensure_mmap_backing(
342        &self,
343        file_size: usize,
344        required_size: usize,
345    ) -> Result<(), StreamError> {
346        if file_size == 0 || required_size == 0 {
347            return Err(StreamError::InvalidArgument);
348        }
349
350        let num_pages = (required_size + PAGE_SIZE - 1) / PAGE_SIZE;
351        let mut backing_guard = self.mmap_backing.write();
352        let needs_alloc = backing_guard
353            .as_ref()
354            .map(|buf| buf.len() < num_pages)
355            .unwrap_or(true);
356        if needs_alloc {
357            *backing_guard = Some(allocate_boxed_pages(num_pages));
358        }
359
360        let backing = backing_guard.as_mut().expect("mmap backing missing");
361        *self.mmap_backing_len.lock() = file_size;
362
363        let fs = self
364            .node
365            .filesystem
366            .read()
367            .as_ref()
368            .and_then(|weak| weak.upgrade())
369            .ok_or(StreamError::Closed)?;
370        let fat32_fs = fs
371            .as_any()
372            .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
373            .ok_or(StreamError::NotSupported)?;
374
375        let cache_id = self.cache_id();
376        let backing_ptr = backing.as_mut_ptr() as *mut u8;
377        for page_index in 0..num_pages {
378            let pinned = PageCacheManager::global()
379                .pin_or_load(cache_id, page_index as u64, |paddr| {
380                    fat32_fs
381                        .read_page_content(self.node.cluster(), page_index as u64, paddr)
382                        .map_err(|_| "fat32: read_page_content failed")
383                })
384                .map_err(|_| StreamError::IoError)?;
385            unsafe {
386                core::ptr::copy_nonoverlapping(
387                    pinned.paddr() as *const u8,
388                    backing_ptr.add(page_index * PAGE_SIZE),
389                    PAGE_SIZE,
390                );
391            }
392        }
393
394        Ok(())
395    }
396
397    /// Update the directory entry for this file
398    fn update_directory_entry(
399        &self,
400        fat32_fs: &crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem,
401        cluster: u32,
402        size: usize,
403    ) -> Result<(), StreamError> {
404        // Determine the actual parent cluster to use
405        let actual_parent_cluster = if self.parent_cluster == 0 {
406            // For files in root directory, use the root cluster
407            fat32_fs.root_cluster
408        } else {
409            self.parent_cluster
410        };
411
412        // crate::early_println!("[FAT32] Debug: parent_cluster={}, actual_parent_cluster={}, updating file with cluster={}, size={}",
413        //                        self.parent_cluster, actual_parent_cluster, cluster, size);
414
415        // Create updated directory entry
416        let filename = self.node.name.read().clone();
417        // crate::early_println!("[FAT32] Debug: Updating directory entry for filename: '{}'", filename);
418
419        let dir_entry =
420            crate::fs::vfs_v2::drivers::fat32::structures::Fat32DirectoryEntry::new_file(
421                &filename,
422                cluster,
423                size as u32,
424            );
425
426        // Write the updated directory entry
427        match fat32_fs.update_directory_entry(actual_parent_cluster, &filename, &dir_entry) {
428            Ok(()) => {
429                // crate::early_println!("[FAT32] Debug: Successfully updated directory entry");
430                Ok(())
431            }
432            Err(e) => {
433                crate::early_println!("[FAT32] Error: Failed to update directory entry: {:?}", e);
434                Err(StreamError::IoError)
435            }
436        }
437    }
438}
439
440#[derive(Clone, Copy, Debug)]
441struct MmapRange {
442    vaddr_start: usize,
443    vaddr_end: usize,
444    offset: usize,
445}
446
447impl Debug for Fat32FileObject {
448    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
449        f.debug_struct("Fat32FileObject")
450            .field("node", &self.node.name.read())
451            .field("position", &self.position.read())
452            .field("dirty", &self.dirty.lock())
453            .finish()
454    }
455}
456
457impl StreamOps for Fat32FileObject {
458    fn read(&self, buffer: &mut [u8]) -> Result<usize, StreamError> {
459        let file_size = self.node.metadata.read().size;
460        let mut pos = *self.position.read();
461        if pos >= file_size {
462            return Ok(0);
463        }
464
465        let mut total_read = 0usize;
466        while total_read < buffer.len() && pos < file_size {
467            let page_index = (pos / PAGE_SIZE) as u64;
468            let offset_in_page = pos % PAGE_SIZE;
469            let cache_id = self.cache_id();
470            let start_cluster = self.node.cluster();
471            let pinned = PageCacheManager::global()
472                .pin_or_load(cache_id, page_index, |paddr| {
473                    let fs = self
474                        .node
475                        .filesystem
476                        .read()
477                        .as_ref()
478                        .and_then(|w| w.upgrade())
479                        .ok_or("filesystem gone")
480                        .map_err(|_| "filesystem gone")?;
481                    let fat32_fs = fs
482                        .as_any()
483                        .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
484                        .ok_or("bad fs type")?;
485                    fat32_fs
486                        .read_page_content(start_cluster, page_index, paddr)
487                        .map_err(|_| "io error")
488                })
489                .map_err(|_| StreamError::IoError)?;
490
491            unsafe {
492                let src = (pinned.paddr() as *const u8).add(offset_in_page);
493                let remaining_in_page = PAGE_SIZE - offset_in_page;
494                let remaining_file = file_size - pos;
495                let remaining_buf = buffer.len() - total_read;
496                let chunk = core::cmp::min(
497                    remaining_in_page,
498                    core::cmp::min(remaining_file, remaining_buf),
499                );
500                core::ptr::copy_nonoverlapping(src, buffer.as_mut_ptr().add(total_read), chunk);
501                total_read += chunk;
502                pos += chunk;
503            }
504        }
505
506        {
507            let mut position = self.position.write();
508            *position = pos;
509        }
510        Ok(total_read)
511    }
512
513    fn write(&self, buffer: &[u8]) -> Result<usize, StreamError> {
514        let cache_id = self.cache_id();
515        let mut written = 0usize;
516        let mut pos = *self.position.read();
517        let start_cluster = self.node.cluster();
518
519        while written < buffer.len() {
520            let page_index = (pos / PAGE_SIZE) as u64;
521            let page_off = pos % PAGE_SIZE;
522            let remain_in_page = PAGE_SIZE - page_off;
523            let chunk = core::cmp::min(buffer.len() - written, remain_in_page);
524
525            let pinned = PageCacheManager::global()
526                .pin_or_load(cache_id, page_index, |paddr| {
527                    let fs = self
528                        .node
529                        .filesystem
530                        .read()
531                        .as_ref()
532                        .and_then(|w| w.upgrade())
533                        .ok_or("filesystem gone")?;
534                    let fat32_fs = fs
535                        .as_any()
536                        .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
537                        .ok_or("bad fs type")?;
538                    fat32_fs
539                        .read_page_content(start_cluster, page_index, paddr)
540                        .map_err(|_| "io error")
541                })
542                .map_err(|_| StreamError::IoError)?;
543
544            unsafe {
545                let dst = (pinned.paddr() as *mut u8).add(page_off);
546                let src = buffer.as_ptr().add(written);
547                core::ptr::copy_nonoverlapping(src, dst, chunk);
548            }
549            pinned.mark_dirty();
550
551            written += chunk;
552            pos += chunk;
553        }
554
555        {
556            let mut position = self.position.write();
557            *position += written;
558        }
559
560        {
561            let mut meta = self.node.metadata.write();
562            let new_end = (*self.position.read()) as usize;
563            if new_end > meta.size {
564                meta.size = new_end;
565            }
566        }
567
568        *self.dirty.lock() = true;
569
570        Ok(written)
571    }
572}
573
574impl ControlOps for Fat32FileObject {
575    fn control(&self, _command: u32, _arg: usize) -> Result<i32, &'static str> {
576        Err("Control operations not supported for FAT32 files")
577    }
578}
579
580impl MemoryMappingOps for Fat32FileObject {
581    fn get_mapping_info(
582        &self,
583        offset: usize,
584        length: usize,
585    ) -> Result<(usize, usize, bool), &'static str> {
586        if offset % PAGE_SIZE != 0 {
587            return Err("Offset not page aligned");
588        }
589
590        let file_size = self.node.metadata.read().size;
591        if file_size == 0 || offset >= file_size {
592            return Err("Offset beyond file size");
593        }
594
595        let required_size = offset.saturating_add(length).max(file_size);
596        self.ensure_mmap_backing(file_size, required_size)
597            .map_err(|_| "Failed to prepare mmap backing")?;
598
599        let backing_guard = self.mmap_backing.read();
600        let backing = backing_guard.as_ref().ok_or("mmap backing missing")?;
601        let base = backing.as_ptr() as usize;
602        let paddr = base + offset;
603        if paddr % PAGE_SIZE != 0 {
604            return Err("Backing address not aligned");
605        }
606
607        Ok((paddr, 0x3, true))
608    }
609
610    fn get_mapping_info_with(
611        &self,
612        offset: usize,
613        length: usize,
614        is_shared: bool,
615    ) -> Result<(usize, usize, bool), &'static str> {
616        if is_shared {
617            if offset % PAGE_SIZE != 0 {
618                return Err("Offset not page aligned");
619            }
620
621            let file_size = self.node.metadata.read().size;
622            if file_size == 0 || offset >= file_size {
623                return Err("Offset beyond file size");
624            }
625
626            let _ = length;
627            return Ok((0, 0x3, true));
628        }
629
630        self.get_mapping_info(offset, length)
631    }
632
633    fn on_mapped(&self, vaddr: usize, _paddr: usize, length: usize, offset: usize) {
634        if length == 0 {
635            return;
636        }
637        let vaddr_end = vaddr.saturating_add(length - 1);
638        let range = MmapRange {
639            vaddr_start: vaddr,
640            vaddr_end,
641            offset,
642        };
643        self.mmap_ranges.write().insert(vaddr, range);
644    }
645
646    fn on_unmapped(&self, vaddr: usize, _length: usize) {
647        self.mmap_ranges.write().remove(&vaddr);
648        let backing_guard = self.mmap_backing.read();
649        let backing = match backing_guard.as_ref() {
650            Some(buf) => buf,
651            None => {
652                let _ = self.sync_to_disk();
653                return;
654            }
655        };
656        let backing_len = *self.mmap_backing_len.lock();
657        if backing_len == 0 {
658            return;
659        }
660
661        let fs = match self
662            .node
663            .filesystem
664            .read()
665            .as_ref()
666            .and_then(|weak| weak.upgrade())
667        {
668            Some(fs) => fs,
669            None => return,
670        };
671        let fat32_fs = match fs
672            .as_any()
673            .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
674        {
675            Some(fs) => fs,
676            None => return,
677        };
678
679        let backing_ptr = backing.as_ptr() as *const u8;
680        let data = unsafe { core::slice::from_raw_parts(backing_ptr, backing_len) };
681        let _ = fat32_fs.write_file_content(self.node.cluster(), data);
682        PageCacheManager::global().invalidate(self.cache_id());
683    }
684
685    fn supports_mmap(&self) -> bool {
686        true
687    }
688
689    fn resolve_fault(
690        &self,
691        access: &crate::object::capability::memory_mapping::AccessKind,
692        map: &crate::vm::vmem::VirtualMemoryMap,
693    ) -> core::result::Result<
694        crate::object::capability::memory_mapping::ResolveFaultResult,
695        crate::object::capability::memory_mapping::ResolveFaultError,
696    > {
697        let range = self
698            .mmap_ranges
699            .read()
700            .get(&map.vmarea.start)
701            .copied()
702            .ok_or(crate::object::capability::memory_mapping::ResolveFaultError::Invalid)?;
703        if access.vaddr < range.vaddr_start || access.vaddr > range.vaddr_end {
704            return Err(crate::object::capability::memory_mapping::ResolveFaultError::Invalid);
705        }
706
707        let file_size = self.node.metadata.read().size;
708        let file_offset = range
709            .offset
710            .saturating_add(access.vaddr.saturating_sub(range.vaddr_start));
711        if file_size == 0 || file_offset >= file_size {
712            return Err(crate::object::capability::memory_mapping::ResolveFaultError::Invalid);
713        }
714
715        let fs = self
716            .node
717            .filesystem
718            .read()
719            .as_ref()
720            .and_then(|weak| weak.upgrade())
721            .ok_or(crate::object::capability::memory_mapping::ResolveFaultError::Invalid)?;
722        let fat32_fs = fs
723            .as_any()
724            .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
725            .ok_or(crate::object::capability::memory_mapping::ResolveFaultError::Invalid)?;
726
727        let page_index = (file_offset / PAGE_SIZE) as u64;
728        let pinned = PageCacheManager::global()
729            .pin_or_load(self.cache_id(), page_index, |paddr| {
730                fat32_fs
731                    .read_page_content(self.node.cluster(), page_index, paddr)
732                    .map_err(|_| "fat32: read_page_content failed")
733            })
734            .map_err(|_| crate::object::capability::memory_mapping::ResolveFaultError::Invalid)?;
735
736        if matches!(
737            access.op,
738            crate::object::capability::memory_mapping::AccessOp::Store
739        ) {
740            pinned.mark_dirty();
741            *self.dirty.lock() = true;
742        }
743
744        Ok(
745            crate::object::capability::memory_mapping::ResolveFaultResult {
746                paddr_page_base: pinned.paddr(),
747                is_tail: false,
748            },
749        )
750    }
751}
752
753impl FileObject for Fat32FileObject {
754    fn read_at(&self, offset: u64, buffer: &mut [u8]) -> Result<usize, StreamError> {
755        let file_size = self.node.metadata.read().size;
756        let off = usize::try_from(offset).map_err(|_| StreamError::InvalidArgument)?;
757        if off >= file_size {
758            return Ok(0);
759        }
760
761        let mut total_read = 0usize;
762        let cache_id = self.cache_id();
763        while total_read < buffer.len() && off + total_read < file_size {
764            let absolute = off + total_read;
765            let page_index = (absolute / PAGE_SIZE) as u64;
766            let offset_in_page = absolute % PAGE_SIZE;
767
768            let pinned = PageCacheManager::global()
769                .pin_or_load(cache_id, page_index, |paddr| {
770                    let fs = self
771                        .node
772                        .filesystem
773                        .read()
774                        .as_ref()
775                        .and_then(|w| w.upgrade())
776                        .ok_or("filesystem gone")?;
777                    let fat32_fs = fs
778                        .as_any()
779                        .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
780                        .ok_or("bad fs type")?;
781                    fat32_fs
782                        .read_page_content(self.node.cluster(), page_index, paddr)
783                        .map_err(|_| "io error")
784                })
785                .map_err(|_| StreamError::IoError)?;
786
787            unsafe {
788                let src = (pinned.paddr() as *const u8).add(offset_in_page);
789                let remaining_in_page = PAGE_SIZE - offset_in_page;
790                let remaining_file = file_size - (off + total_read);
791                let remaining_buf = buffer.len() - total_read;
792                let chunk = core::cmp::min(
793                    remaining_in_page,
794                    core::cmp::min(remaining_file, remaining_buf),
795                );
796                core::ptr::copy_nonoverlapping(src, buffer.as_mut_ptr().add(total_read), chunk);
797                total_read += chunk;
798            }
799        }
800
801        Ok(total_read)
802    }
803
804    fn write_at(&self, offset: u64, buffer: &[u8]) -> Result<usize, StreamError> {
805        if buffer.is_empty() {
806            return Ok(0);
807        }
808        let off = usize::try_from(offset).map_err(|_| StreamError::InvalidArgument)?;
809        let mut written = 0usize;
810        let cache_id = self.cache_id();
811
812        while written < buffer.len() {
813            let absolute = off + written;
814            let page_index = (absolute / PAGE_SIZE) as u64;
815            let page_off = absolute % PAGE_SIZE;
816            let remain_in_page = PAGE_SIZE - page_off;
817            let chunk = core::cmp::min(buffer.len() - written, remain_in_page);
818
819            let pinned = PageCacheManager::global()
820                .pin_or_load(cache_id, page_index, |paddr| {
821                    let fs = self
822                        .node
823                        .filesystem
824                        .read()
825                        .as_ref()
826                        .and_then(|w| w.upgrade())
827                        .ok_or("filesystem gone")?;
828                    let fat32_fs = fs
829                        .as_any()
830                        .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
831                        .ok_or("bad fs type")?;
832                    fat32_fs
833                        .read_page_content(self.node.cluster(), page_index, paddr)
834                        .map_err(|_| "io error")
835                })
836                .map_err(|_| StreamError::IoError)?;
837
838            unsafe {
839                let dst = (pinned.paddr() as *mut u8).add(page_off);
840                let src = buffer.as_ptr().add(written);
841                core::ptr::copy_nonoverlapping(src, dst, chunk);
842            }
843            pinned.mark_dirty();
844            written += chunk;
845        }
846
847        let new_end = off + written;
848        {
849            let mut meta = self.node.metadata.write();
850            if new_end > meta.size {
851                meta.size = new_end;
852            }
853        }
854
855        *self.dirty.lock() = true;
856
857        Ok(written)
858    }
859
860    fn truncate(&self, size: u64) -> Result<(), StreamError> {
861        if *self.node.file_type.read() != FileType::RegularFile {
862            return Err(StreamError::from(FileSystemError::new(
863                FileSystemErrorKind::IsADirectory,
864                "Cannot truncate non-regular file",
865            )));
866        }
867
868        let new_size = usize::try_from(size).map_err(|_| StreamError::InvalidArgument)?;
869        let old_size = self.node.metadata.read().size;
870        if new_size == old_size {
871            return Ok(());
872        }
873
874        let fs = self
875            .node
876            .filesystem
877            .read()
878            .as_ref()
879            .and_then(|w| w.upgrade())
880            .ok_or(StreamError::Closed)?;
881        let fat32_fs = fs
882            .as_any()
883            .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
884            .ok_or(StreamError::NotSupported)?;
885
886        let mut buffer = Vec::with_capacity(new_size);
887        buffer.resize(new_size, 0);
888        let copy_len = core::cmp::min(old_size, new_size);
889        if copy_len > 0 {
890            let cache_id = self.cache_id();
891            let page_count = (copy_len + PAGE_SIZE - 1) / PAGE_SIZE;
892            for page_index in 0..(page_count as u64) {
893                let start = page_index as usize * PAGE_SIZE;
894                let len = core::cmp::min(PAGE_SIZE, copy_len.saturating_sub(start));
895                if len == 0 {
896                    break;
897                }
898                let pinned = PageCacheManager::global()
899                    .pin_or_load(cache_id, page_index, |paddr| {
900                        fat32_fs
901                            .read_page_content(self.node.cluster(), page_index, paddr)
902                            .map_err(|_| "io error")
903                    })
904                    .map_err(|_| StreamError::IoError)?;
905                unsafe {
906                    core::ptr::copy_nonoverlapping(
907                        pinned.paddr() as *const u8,
908                        buffer.as_mut_ptr().add(start),
909                        len,
910                    );
911                }
912            }
913        }
914
915        let current_cluster = self.node.cluster();
916        let old_cache_id = self.cache_id();
917        let new_cluster = if buffer.is_empty() {
918            0
919        } else {
920            fat32_fs
921                .write_file_content(current_cluster, &buffer)
922                .map_err(|_| StreamError::IoError)?
923        };
924
925        if new_cluster != current_cluster {
926            *self.node.cluster.write() = new_cluster;
927            {
928                let mut meta = self.node.metadata.write();
929                if new_cluster != 0 {
930                    meta.file_id = new_cluster as u64;
931                }
932            }
933            self.update_directory_entry(fat32_fs, new_cluster, buffer.len())?;
934            PageCacheManager::global().invalidate(old_cache_id);
935        } else if current_cluster != 0 {
936            let mut meta = self.node.metadata.write();
937            if meta.file_id != current_cluster as u64 {
938                meta.file_id = current_cluster as u64;
939                PageCacheManager::global().invalidate(old_cache_id);
940            }
941        }
942
943        {
944            let mut metadata = self.node.metadata.write();
945            metadata.size = buffer.len();
946        }
947        *self.dirty.lock() = false;
948
949        let mut position = self.position.write();
950        if *position > size as usize {
951            *position = size as usize;
952        }
953
954        Ok(())
955    }
956
957    fn seek(&self, from: SeekFrom) -> Result<u64, StreamError> {
958        let metadata = self.node.metadata.read();
959        let file_size = metadata.size;
960        let mut pos = self.position.write();
961
962        let new_pos = match from {
963            SeekFrom::Start(offset) => offset as usize,
964            SeekFrom::End(offset) => {
965                if offset < 0 {
966                    file_size.saturating_sub((-offset) as usize)
967                } else {
968                    file_size + offset as usize
969                }
970            }
971            SeekFrom::Current(offset) => {
972                if offset < 0 {
973                    pos.saturating_sub((-offset) as usize)
974                } else {
975                    *pos + offset as usize
976                }
977            }
978        };
979
980        *pos = new_pos;
981        Ok(new_pos as u64)
982    }
983
984    fn metadata(&self) -> Result<crate::fs::FileMetadata, StreamError> {
985        Ok(self.node.metadata.read().clone())
986    }
987
988    fn sync(&self) -> Result<(), StreamError> {
989        self.sync_to_disk()
990    }
991
992    fn as_any(&self) -> &dyn Any {
993        self
994    }
995}
996
997impl PageCacheCapable for Fat32FileObject {
998    fn cache_id(&self) -> crate::fs::vfs_v2::cache::CacheId {
999        let fs = self
1000            .node
1001            .filesystem
1002            .read()
1003            .as_ref()
1004            .and_then(|w| w.upgrade())
1005            .expect("Fat32FileObject: filesystem gone");
1006        let fat32_fs = fs
1007            .as_any()
1008            .downcast_ref::<crate::fs::vfs_v2::drivers::fat32::Fat32FileSystem>()
1009            .expect("Fat32FileObject: invalid filesystem type");
1010
1011        let fs_id = fat32_fs.fs_id().get();
1012        let file_key = self.node.metadata.read().file_id;
1013        let cache_id = (fs_id << 32) | file_key;
1014        crate::fs::vfs_v2::cache::CacheId::new(cache_id)
1015    }
1016}
1017
1018impl crate::object::capability::selectable::Selectable for Fat32FileObject {
1019    fn current_ready(
1020        &self,
1021        interest: crate::object::capability::selectable::ReadyInterest,
1022    ) -> crate::object::capability::selectable::ReadySet {
1023        let mut set = crate::object::capability::selectable::ReadySet::none();
1024        if interest.read {
1025            set.read = true;
1026        }
1027        if interest.write {
1028            set.write = true;
1029        }
1030        if interest.except {
1031            set.except = false;
1032        }
1033        set
1034    }
1035
1036    fn wait_until_ready(
1037        &self,
1038        _interest: crate::object::capability::selectable::ReadyInterest,
1039        _trapframe: &mut crate::arch::Trapframe,
1040        _timeout_ticks: Option<u64>,
1041    ) -> crate::object::capability::selectable::SelectWaitOutcome {
1042        crate::object::capability::selectable::SelectWaitOutcome::Ready
1043    }
1044
1045    fn is_nonblocking(&self) -> bool {
1046        true
1047    }
1048}
1049
1050impl Drop for Fat32FileObject {
1051    fn drop(&mut self) {
1052        let _ = self.sync_to_disk();
1053    }
1054}
1055
1056/// FAT32 directory object
1057pub struct Fat32DirectoryObject {
1058    /// Reference to the FAT32 node
1059    node: Arc<Fat32Node>,
1060    /// Current position in directory listing
1061    position: RwLock<usize>,
1062}
1063
1064impl Fat32DirectoryObject {
1065    pub fn new(node: Arc<Fat32Node>) -> Self {
1066        Self {
1067            node,
1068            position: RwLock::new(0),
1069        }
1070    }
1071}
1072
1073impl Debug for Fat32DirectoryObject {
1074    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1075        f.debug_struct("Fat32DirectoryObject")
1076            .field("node", &self.node.name.read())
1077            .field("position", &self.position.read())
1078            .finish()
1079    }
1080}
1081
1082impl StreamOps for Fat32DirectoryObject {
1083    fn read(&self, _buffer: &mut [u8]) -> Result<usize, StreamError> {
1084        Err(StreamError::NotSupported)
1085    }
1086
1087    fn write(&self, _buffer: &[u8]) -> Result<usize, StreamError> {
1088        Err(StreamError::NotSupported)
1089    }
1090}
1091
1092impl ControlOps for Fat32DirectoryObject {
1093    fn control(&self, _command: u32, _arg: usize) -> Result<i32, &'static str> {
1094        Err("Control operations not supported for FAT32 directories")
1095    }
1096}
1097
1098impl MemoryMappingOps for Fat32DirectoryObject {
1099    fn get_mapping_info(
1100        &self,
1101        _offset: usize,
1102        _length: usize,
1103    ) -> Result<(usize, usize, bool), &'static str> {
1104        Err("Memory mapping not supported for FAT32 directories")
1105    }
1106
1107    fn on_mapped(&self, _vaddr: usize, _paddr: usize, _length: usize, _offset: usize) {
1108        // Not supported
1109    }
1110
1111    fn on_unmapped(&self, _vaddr: usize, _length: usize) {
1112        // Not supported
1113    }
1114
1115    fn supports_mmap(&self) -> bool {
1116        false
1117    }
1118}
1119
1120impl FileObject for Fat32DirectoryObject {
1121    fn seek(&self, from: SeekFrom) -> Result<u64, StreamError> {
1122        let children = self.node.children.read();
1123        let mut pos = self.position.write();
1124
1125        let new_pos = match from {
1126            SeekFrom::Start(offset) => offset as usize,
1127            SeekFrom::End(offset) => {
1128                if offset < 0 {
1129                    children.len().saturating_sub((-offset) as usize)
1130                } else {
1131                    children.len() + offset as usize
1132                }
1133            }
1134            SeekFrom::Current(offset) => {
1135                if offset < 0 {
1136                    pos.saturating_sub((-offset) as usize)
1137                } else {
1138                    *pos + offset as usize
1139                }
1140            }
1141        };
1142
1143        *pos = new_pos;
1144        Ok(new_pos as u64)
1145    }
1146
1147    fn metadata(&self) -> Result<crate::fs::FileMetadata, StreamError> {
1148        Ok(self.node.metadata.read().clone())
1149    }
1150
1151    fn as_any(&self) -> &dyn Any {
1152        self
1153    }
1154}
1155
1156impl crate::object::capability::selectable::Selectable for Fat32DirectoryObject {
1157    fn current_ready(
1158        &self,
1159        interest: crate::object::capability::selectable::ReadyInterest,
1160    ) -> crate::object::capability::selectable::ReadySet {
1161        let mut set = crate::object::capability::selectable::ReadySet::none();
1162        if interest.read {
1163            set.read = true;
1164        }
1165        if interest.write {
1166            set.write = true;
1167        }
1168        if interest.except {
1169            set.except = false;
1170        }
1171        set
1172    }
1173
1174    fn wait_until_ready(
1175        &self,
1176        _interest: crate::object::capability::selectable::ReadyInterest,
1177        _trapframe: &mut crate::arch::Trapframe,
1178        _timeout_ticks: Option<u64>,
1179    ) -> crate::object::capability::selectable::SelectWaitOutcome {
1180        crate::object::capability::selectable::SelectWaitOutcome::Ready
1181    }
1182
1183    fn is_nonblocking(&self) -> bool {
1184        true
1185    }
1186}