kernel/fs/vfs_v2/drivers/ext2/
mod.rs

1//! ext2 Filesystem Implementation
2//!
3//! This module implements an ext2 filesystem driver for the VFS v2 architecture.
4//! It provides support for reading and writing ext2 filesystems on block devices,
5//! particularly designed to work with virtio-blk devices.
6//!
7//! ## Features
8//!
9//! - Full ext2 filesystem support
10//! - Read and write operations
11//! - Directory navigation
12//! - File creation, deletion, and modification
13//! - Integration with VFS v2 architecture
14//! - Block device compatibility
15//!
16//! ## Architecture
17//!
18//! The ext2 implementation consists of:
19//! - `Ext2FileSystem`: Main filesystem implementation
20//! - `Ext2Node`: VFS node implementation for files and directories
21//! - `Ext2Driver`: Filesystem driver for registration
22//! - Data structures for ext2 format (superblock, inode, directory entries, etc.)
23
24use alloc::{
25    boxed::Box,
26    collections::BTreeMap,
27    format,
28    string::{String, ToString},
29    sync::Arc,
30    vec,
31    vec::Vec,
32};
33use core::{any::Any, mem};
34use hashbrown::HashMap;
35use spin::{Mutex, rwlock::RwLock};
36
37use crate::{
38    DeviceManager,
39    device::block::BlockDevice,
40    driver_initcall,
41    fs::{
42        FileObject, FileSystemError, FileSystemErrorKind, FileType, SocketFileInfo,
43        get_fs_driver_manager, params::FileSystemParams,
44    },
45    profile_scope,
46    task::mytask,
47};
48
49use super::super::{
50    core::{DirectoryEntryInternal, FileSystemId, FileSystemOperations, VfsNode},
51    manager::get_global_vfs_manager,
52};
53
54pub mod driver;
55pub mod node;
56pub mod structures;
57
58#[cfg(test)]
59pub mod tests;
60
61#[cfg(test)]
62pub mod char_device_tests;
63
64pub use driver::Ext2Driver;
65pub use node::{Ext2CharDeviceFileObject, Ext2DirectoryObject, Ext2FileObject, Ext2Node};
66pub use structures::*;
67
68/// ext2 filesystem parameters for mount options
69///
70/// This struct holds the parameters parsed from mount option strings
71/// and provides the interface for creating ext2 filesystems.
72#[derive(Debug, Clone)]
73pub struct Ext2Params {
74    /// Device file path (e.g., "/dev/vda")
75    pub device_path: Option<String>,
76    /// Block device ID (if resolved)
77    pub device_id: Option<usize>,
78    /// Mount options
79    pub options: BTreeMap<String, String>,
80}
81
82impl Ext2Params {
83    /// Create new empty parameters
84    pub fn new() -> Self {
85        Self {
86            device_path: None,
87            device_id: None,
88            options: BTreeMap::new(),
89        }
90    }
91
92    /// Parse parameters from option string
93    ///
94    /// Expected format: "device=/dev/vda,rw,sync"
95    /// or: "device=/dev/vda,ro"
96    pub fn from_option_string(options: &str) -> Result<Self, FileSystemError> {
97        let mut params = Self::new();
98
99        for option in options.split(',') {
100            let option = option.trim();
101            if option.is_empty() {
102                continue;
103            }
104
105            if let Some((key, value)) = option.split_once('=') {
106                match key {
107                    "device" => {
108                        params.device_path = Some(value.to_string());
109                    }
110                    _ => {
111                        params.options.insert(key.to_string(), value.to_string());
112                    }
113                }
114            } else {
115                // Boolean options like "rw", "ro", "sync"
116                params
117                    .options
118                    .insert(option.to_string(), "true".to_string());
119            }
120        }
121
122        if params.device_path.is_none() {
123            return Err(FileSystemError::new(
124                FileSystemErrorKind::InvalidData,
125                "Device path is required for ext2 filesystem",
126            ));
127        }
128
129        Ok(params)
130    }
131
132    /// Resolve device path to device ID
133    pub fn resolve_device(&mut self) -> Result<(), FileSystemError> {
134        if let Some(device_path) = &self.device_path {
135            // Get VFS manager from current task or use global one
136            let vfs_manager = {
137                if let Some(task) = mytask() {
138                    task.vfs
139                        .read()
140                        .as_ref()
141                        .cloned()
142                        .unwrap_or_else(|| get_global_vfs_manager())
143                } else {
144                    get_global_vfs_manager()
145                }
146            };
147
148            // Use VFS to resolve device file path to device ID
149            let (entry, _mount_point) = vfs_manager.resolve_path(device_path).map_err(|e| {
150                FileSystemError::new(
151                    FileSystemErrorKind::DeviceError,
152                    format!("Failed to resolve device path '{}': {:?}", device_path, e),
153                )
154            })?;
155
156            // Get node from entry and then metadata
157            let node = entry.node();
158            let metadata = node.metadata().map_err(|e| {
159                FileSystemError::new(
160                    FileSystemErrorKind::DeviceError,
161                    format!("Failed to get device metadata: {:?}", e),
162                )
163            })?;
164
165            // Extract device ID from metadata
166            if let FileType::BlockDevice(device_info) = metadata.file_type {
167                self.device_id = Some(device_info.device_id);
168                Ok(())
169            } else {
170                Err(FileSystemError::new(
171                    FileSystemErrorKind::DeviceError,
172                    format!("'{}' is not a block device", device_path),
173                ))
174            }
175        } else {
176            Err(FileSystemError::new(
177                FileSystemErrorKind::InvalidData,
178                "No device path specified",
179            ))
180        }
181    }
182
183    /// Get the resolved device ID
184    pub fn get_device_id(&self) -> Option<usize> {
185        self.device_id
186    }
187
188    /// Get a specific option value
189    pub fn get_option(&self, key: &str) -> Option<&String> {
190        self.options.get(key)
191    }
192
193    /// Check if filesystem should be mounted read-only
194    pub fn is_readonly(&self) -> bool {
195        self.options.get("ro").is_some()
196            || (self.options.get("rw").is_none() && self.options.get("ro").is_none())
197    }
198
199    /// Create ext2 filesystem from these parameters
200    pub fn create_filesystem(&mut self) -> Result<Arc<Ext2FileSystem>, FileSystemError> {
201        // crate::early_println!("[EXT2] Creating filesystem from parameters");
202
203        // First resolve device path to device_id if not already resolved
204        if self.device_id.is_none() {
205            // crate::early_println!("[EXT2] Resolving device path: {:?}", self.device_path);
206            self.resolve_device()?;
207        }
208
209        // Get device_id (should be resolved by now)
210        let device_id = self.device_id.ok_or_else(|| {
211            // crate::early_println!("[EXT2] Error: Device ID not resolved");
212            FileSystemError::new(FileSystemErrorKind::DeviceError, "Device ID not resolved")
213        })?;
214
215        // crate::early_println!("[EXT2] Using device ID: {}", device_id);
216
217        // Get device from DeviceManager
218        let device = DeviceManager::get_manager()
219            .get_device(device_id)
220            .ok_or_else(|| {
221                // crate::early_println!("[EXT2] Error: Device with ID {} not found", device_id);
222                FileSystemError::new(
223                    FileSystemErrorKind::DeviceError,
224                    format!("Device with ID {} not found", device_id),
225                )
226            })?;
227
228        // crate::early_println!("[EXT2] Found device, converting to block device");
229
230        // Convert to block device using the new into_block_device() method
231        let block_device = device.into_block_device().ok_or_else(|| {
232            // crate::early_println!("[EXT2] Error: Device is not a block device");
233            FileSystemError::new(
234                FileSystemErrorKind::DeviceError,
235                "Device is not a block device",
236            )
237        })?;
238
239        // crate::early_println!("[EXT2] Successfully converted to block device, creating filesystem");
240
241        // Create ext2 filesystem using existing method
242        Ext2FileSystem::new(block_device)
243    }
244}
245impl FileSystemParams for Ext2Params {
246    fn as_any(&self) -> &dyn Any {
247        self
248    }
249
250    fn to_string_map(&self) -> BTreeMap<String, String> {
251        let mut map = self.options.clone();
252        if let Some(device_path) = &self.device_path {
253            map.insert("device".to_string(), device_path.clone());
254        }
255        map
256    }
257
258    fn from_string_map(map: &BTreeMap<String, String>) -> Result<Self, String> {
259        let mut params = Ext2Params::new();
260
261        for (key, value) in map {
262            match key.as_str() {
263                "device" => {
264                    params.device_path = Some(value.clone());
265                }
266                _ => {
267                    params.options.insert(key.clone(), value.clone());
268                }
269            }
270        }
271
272        if params.device_path.is_none() {
273            return Err("Device path is required for ext2 filesystem".to_string());
274        }
275
276        Ok(params)
277    }
278}
279
280/// ext2 Filesystem implementation
281///
282/// This struct implements an ext2 filesystem that can be mounted on block devices.
283/// It maintains the block device reference and provides filesystem operations
284/// through the VFS v2 interface.
285pub struct Ext2FileSystem {
286    /// Unique filesystem identifier
287    fs_id: FileSystemId,
288    /// Reference to the underlying block device
289    block_device: Arc<dyn BlockDevice>,
290    /// Superblock information (heap allocated to avoid stack overflow)
291    superblock: Box<Ext2Superblock>,
292    /// Block size in bytes
293    block_size: u32,
294    /// Root directory inode
295    root_inode: u32,
296    /// Root directory node
297    root: RwLock<Arc<Ext2Node>>,
298    /// Filesystem name
299    name: String,
300    /// Next file ID generator
301    next_file_id: Mutex<u64>,
302    /// LRU cached inodes
303    inode_cache: Mutex<InodeLruCache>,
304    /// LRU cached blocks
305    block_cache: Mutex<BlockLruCache>,
306}
307
308/// Node in doubly-linked list for O(1) LRU operations for inodes
309#[derive(Debug)]
310struct InodeLruNode {
311    inode_num: u32,
312    inode: Ext2Inode,
313    access_count: u64,
314    prev: Option<NodeId>,
315    next: Option<NodeId>,
316}
317
318/// O(1) LRU cache implementation for inodes using HashMap + doubly-linked list
319struct InodeLruCache {
320    /// Map from inode number to node ID
321    map: HashMap<u32, NodeId>,
322    /// Storage for all nodes
323    nodes: HashMap<NodeId, InodeLruNode>,
324    /// Head of doubly-linked list (most recently used)
325    head: Option<NodeId>,
326    /// Tail of doubly-linked list (least recently used)  
327    tail: Option<NodeId>,
328    /// Next available node ID
329    next_id: NodeId,
330    /// Maximum cache size
331    max_size: usize,
332    /// Cache statistics
333    hits: u64,
334    misses: u64,
335    /// Access counter for approximate LRU
336    access_counter: u64,
337}
338
339impl InodeLruCache {
340    fn new(max_size: usize) -> Self {
341        Self {
342            map: HashMap::new(),
343            nodes: HashMap::new(),
344            head: None,
345            tail: None,
346            next_id: 0,
347            max_size,
348            hits: 0,
349            misses: 0,
350            access_counter: 0,
351        }
352    }
353
354    /// O(1) get operation with LRU update
355    fn get(&mut self, inode_num: u32) -> Option<Ext2Inode> {
356        if let Some(&node_id) = self.map.get(&inode_num) {
357            self.hits += 1;
358            // Move to head (most recently used) - O(1)
359            self.move_to_head(node_id);
360            // Return copy of inode
361            self.nodes.get(&node_id).map(|node| node.inode.clone())
362        } else {
363            self.misses += 1;
364            None
365        }
366    }
367
368    /// O(1) insert operation with LRU eviction
369    fn insert(&mut self, inode_num: u32, inode: Ext2Inode) {
370        self.access_counter += 1;
371
372        // If already exists, update and move to head - O(1)
373        if let Some(&node_id) = self.map.get(&inode_num) {
374            if let Some(node) = self.nodes.get_mut(&node_id) {
375                node.inode = inode;
376                node.access_count = self.access_counter;
377            }
378            self.move_to_head(node_id);
379            return;
380        }
381
382        // If cache is full, remove LRU (tail) item - O(1)
383        if self.nodes.len() >= self.max_size {
384            self.remove_tail();
385        }
386
387        // Create new node and add to head - O(1)
388        let new_node_id = self.next_id;
389        self.next_id = self.next_id.wrapping_add(1);
390
391        let new_node = InodeLruNode {
392            inode_num,
393            inode,
394            access_count: self.access_counter,
395            prev: None,
396            next: self.head,
397        };
398
399        self.nodes.insert(new_node_id, new_node);
400        self.map.insert(inode_num, new_node_id);
401
402        // Update existing head's prev pointer
403        if let Some(old_head) = self.head {
404            if let Some(old_head_node) = self.nodes.get_mut(&old_head) {
405                old_head_node.prev = Some(new_node_id);
406            }
407        }
408
409        // Update head/tail pointers
410        self.head = Some(new_node_id);
411        if self.tail.is_none() {
412            self.tail = Some(new_node_id);
413        }
414    }
415
416    /// O(1) remove operation
417    fn remove(&mut self, inode_num: u32) {
418        if let Some(&node_id) = self.map.get(&inode_num) {
419            self.remove_node(node_id);
420            self.map.remove(&inode_num);
421        }
422    }
423
424    /// O(1) move node to head of LRU list
425    fn move_to_head(&mut self, node_id: NodeId) {
426        // If already head, nothing to do
427        if self.head == Some(node_id) {
428            return;
429        }
430
431        // Remove from current position
432        self.remove_node_from_list(node_id);
433
434        // Add to head
435        if let Some(node) = self.nodes.get_mut(&node_id) {
436            node.prev = None;
437            node.next = self.head;
438        }
439
440        // Update old head's prev pointer
441        if let Some(old_head) = self.head {
442            if let Some(old_head_node) = self.nodes.get_mut(&old_head) {
443                old_head_node.prev = Some(node_id);
444            }
445        }
446
447        self.head = Some(node_id);
448
449        // If this was the only node, it's also the tail
450        if self.tail.is_none() {
451            self.tail = Some(node_id);
452        }
453    }
454
455    /// O(1) remove tail (LRU) node
456    fn remove_tail(&mut self) {
457        if let Some(tail_id) = self.tail {
458            if let Some(tail_node) = self.nodes.get(&tail_id) {
459                let inode_num = tail_node.inode_num;
460                self.map.remove(&inode_num);
461            }
462            self.remove_node(tail_id);
463        }
464    }
465
466    /// O(1) remove node completely
467    fn remove_node(&mut self, node_id: NodeId) {
468        self.remove_node_from_list(node_id);
469        self.nodes.remove(&node_id);
470    }
471
472    /// O(1) remove node from doubly-linked list (but keep in nodes map)
473    fn remove_node_from_list(&mut self, node_id: NodeId) {
474        if let Some(node) = self.nodes.get(&node_id) {
475            let prev_id = node.prev;
476            let next_id = node.next;
477
478            // Update prev node's next pointer
479            if let Some(prev_id) = prev_id {
480                if let Some(prev_node) = self.nodes.get_mut(&prev_id) {
481                    prev_node.next = next_id;
482                }
483            } else {
484                // This was the head
485                self.head = next_id;
486            }
487
488            // Update next node's prev pointer
489            if let Some(next_id) = next_id {
490                if let Some(next_node) = self.nodes.get_mut(&next_id) {
491                    next_node.prev = prev_id;
492                }
493            } else {
494                // This was the tail
495                self.tail = prev_id;
496            }
497        }
498    }
499
500    fn len(&self) -> usize {
501        self.nodes.len()
502    }
503
504    /// Get cache statistics for debugging and performance analysis
505    fn get_stats(&self) -> (u64, u64, usize) {
506        (self.hits, self.misses, self.nodes.len())
507    }
508
509    /// Print cache statistics
510    fn print_stats(&self, cache_name: &str) {
511        let total = self.hits + self.misses;
512        let hit_rate = if total > 0 {
513            (self.hits * 100) / total
514        } else {
515            0
516        };
517        crate::early_println!(
518            "[ext2] {} Cache Stats: hits={}, misses={}, size={}, hit_rate={}%",
519            cache_name,
520            self.hits,
521            self.misses,
522            self.nodes.len(),
523            hit_rate
524        );
525    }
526}
527
528/// Node ID type for the LRU cache
529type NodeId = u32;
530
531/// Node in doubly-linked list for O(1) LRU operations
532#[derive(Debug)]
533struct LruNode {
534    block_num: u64,
535    data: Vec<u8>,
536    prev: Option<NodeId>,
537    next: Option<NodeId>,
538}
539
540/// O(1) LRU cache implementation using HashMap + doubly-linked list
541/// This implementation uses indices instead of raw pointers to be thread-safe
542struct BlockLruCache {
543    /// Map from block number to node ID
544    map: HashMap<u64, NodeId>,
545    /// Storage for all nodes
546    nodes: HashMap<NodeId, LruNode>,
547    /// Head of doubly-linked list (most recently used)
548    head: Option<NodeId>,
549    /// Tail of doubly-linked list (least recently used)  
550    tail: Option<NodeId>,
551    /// Next available node ID
552    next_id: NodeId,
553    /// Maximum cache size
554    max_size: usize,
555    /// Cache statistics
556    hits: u64,
557    misses: u64,
558}
559
560impl BlockLruCache {
561    fn new(max_size: usize) -> Self {
562        Self {
563            map: HashMap::new(),
564            nodes: HashMap::new(),
565            head: None,
566            tail: None,
567            next_id: 0,
568            max_size,
569            hits: 0,
570            misses: 0,
571        }
572    }
573
574    fn get(&mut self, block_num: u64) -> Option<Vec<u8>> {
575        if let Some(&node_id) = self.map.get(&block_num) {
576            self.hits += 1;
577            // Move to head (most recently used)
578            self.move_to_head(node_id);
579            // Return cloned data
580            self.nodes.get(&node_id).map(|node| node.data.clone())
581        } else {
582            self.misses += 1;
583            None
584        }
585    }
586
587    /// Move node to head of LRU list (O(1))
588    fn move_to_head(&mut self, node_id: NodeId) {
589        if Some(node_id) == self.head {
590            return; // Already at head
591        }
592
593        // Remove from current position
594        self.remove_from_list(node_id);
595
596        // Add to head
597        self.add_to_head(node_id);
598    }
599
600    /// Remove node from doubly-linked list (O(1))
601    fn remove_from_list(&mut self, node_id: NodeId) {
602        if let Some(node) = self.nodes.get(&node_id) {
603            let prev = node.prev;
604            let next = node.next;
605
606            // Update prev node's next pointer
607            if let Some(prev_id) = prev {
608                if let Some(prev_node) = self.nodes.get_mut(&prev_id) {
609                    prev_node.next = next;
610                }
611            } else {
612                // This was the head
613                self.head = next;
614            }
615
616            // Update next node's prev pointer
617            if let Some(next_id) = next {
618                if let Some(next_node) = self.nodes.get_mut(&next_id) {
619                    next_node.prev = prev;
620                }
621            } else {
622                // This was the tail
623                self.tail = prev;
624            }
625        }
626    }
627
628    /// Add node to head of list (O(1))
629    fn add_to_head(&mut self, node_id: NodeId) {
630        if let Some(node) = self.nodes.get_mut(&node_id) {
631            node.prev = None;
632            node.next = self.head;
633        }
634
635        if let Some(old_head) = self.head {
636            if let Some(old_head_node) = self.nodes.get_mut(&old_head) {
637                old_head_node.prev = Some(node_id);
638            }
639        } else {
640            // List was empty
641            self.tail = Some(node_id);
642        }
643
644        self.head = Some(node_id);
645    }
646
647    fn insert(&mut self, block_num: u64, block_data: Vec<u8>) {
648        // If already exists, update and move to head
649        if let Some(&existing_id) = self.map.get(&block_num) {
650            if let Some(existing_node) = self.nodes.get_mut(&existing_id) {
651                existing_node.data = block_data;
652            }
653            self.move_to_head(existing_id);
654            return;
655        }
656
657        // If cache is full, remove LRU (tail) item
658        if self.nodes.len() >= self.max_size {
659            if let Some(tail_id) = self.tail {
660                if let Some(tail_node) = self.nodes.get(&tail_id) {
661                    let tail_block_num = tail_node.block_num;
662                    self.map.remove(&tail_block_num);
663                }
664                self.remove_from_list(tail_id);
665                self.nodes.remove(&tail_id);
666            }
667        }
668
669        // Create new node
670        let node_id = self.next_id;
671        self.next_id += 1;
672
673        let new_node = LruNode {
674            block_num,
675            data: block_data,
676            prev: None,
677            next: None,
678        };
679
680        // Insert into data structures
681        self.nodes.insert(node_id, new_node);
682        self.map.insert(block_num, node_id);
683
684        // Add to head of list
685        self.add_to_head(node_id);
686    }
687
688    fn remove(&mut self, block_num: u64) {
689        if let Some(&node_id) = self.map.get(&block_num) {
690            self.map.remove(&block_num);
691            self.remove_from_list(node_id);
692            self.nodes.remove(&node_id);
693        }
694    }
695
696    fn len(&self) -> usize {
697        self.nodes.len()
698    }
699
700    /// Get cache statistics for debugging and performance analysis
701    fn get_stats(&self) -> (u64, u64, usize) {
702        (self.hits, self.misses, self.nodes.len())
703    }
704
705    /// Print cache statistics
706    fn print_stats(&self, cache_name: &str) {
707        let total = self.hits + self.misses;
708        let hit_rate = if total > 0 {
709            (self.hits * 100) / total
710        } else {
711            0
712        };
713        crate::early_println!(
714            "[ext2] {} Cache Stats: hits={}, misses={}, size={}, hit_rate={}%",
715            cache_name,
716            self.hits,
717            self.misses,
718            self.nodes.len(),
719            hit_rate
720        );
721    }
722}
723
724impl Ext2FileSystem {
725    /// Create a new ext2 filesystem from a block device
726    pub fn new(block_device: Arc<dyn BlockDevice>) -> Result<Arc<Self>, FileSystemError> {
727        // Read the superblock from sectors 2-3 (block 1, since each block is 1024 bytes = 2 sectors)
728        let request = Box::new(crate::device::block::request::BlockIORequest {
729            request_type: crate::device::block::request::BlockIORequestType::Read,
730            sector: 2,       // Start at sector 2 (block 1)
731            sector_count: 2, // Read 2 sectors (1024 bytes)
732            head: 0,
733            cylinder: 0,
734            buffer: vec![0u8; 1024],
735        });
736
737        block_device.enqueue_request(request);
738        let results = block_device.process_requests();
739
740        let superblock_data = if let Some(result) = results.first() {
741            match &result.result {
742                Ok(_) => result.request.buffer.clone(),
743                Err(_) => {
744                    return Err(FileSystemError::new(
745                        FileSystemErrorKind::IoError,
746                        "Failed to read ext2 superblock",
747                    ));
748                }
749            }
750        } else {
751            return Err(FileSystemError::new(
752                FileSystemErrorKind::IoError,
753                "No result from block device read",
754            ));
755        };
756
757        // Parse superblock and move to heap to avoid stack overflow
758        let superblock = Ext2Superblock::from_bytes_boxed(&superblock_data)?;
759
760        let block_size = superblock.get_block_size();
761        let root_inode = EXT2_ROOT_INO;
762
763        // Create root node
764        let root = Ext2Node::new(
765            root_inode,
766            FileType::Directory,
767            1, // Root node ID is 1
768        );
769
770        let fs = Arc::new(Self {
771            fs_id: FileSystemId::new(),
772            block_device,
773            superblock,
774            block_size,
775            root_inode,
776            root: RwLock::new(Arc::new(root)),
777            name: "ext2".to_string(),
778            next_file_id: Mutex::new(2), // Start from 2, root is 1
779            inode_cache: Mutex::new(InodeLruCache::new(8192)),
780            block_cache: Mutex::new(BlockLruCache::new(8192)),
781        });
782
783        // Set filesystem reference in root node
784        let fs_weak = Arc::downgrade(&(fs.clone() as Arc<dyn FileSystemOperations>));
785        fs.root.read().set_filesystem(fs_weak);
786
787        Ok(fs)
788    }
789
790    /// Create a new ext2 filesystem from a device ID using the new Device trait methods
791    pub fn new_from_device_id(device_id: usize) -> Result<Arc<Self>, FileSystemError> {
792        // Get device from DeviceManager
793        let device = DeviceManager::get_manager()
794            .get_device(device_id)
795            .ok_or_else(|| {
796                FileSystemError::new(
797                    FileSystemErrorKind::DeviceError,
798                    format!("Device with ID {} not found", device_id),
799                )
800            })?;
801
802        // Convert to block device using the new into_block_device method
803        let block_device = device.into_block_device().ok_or_else(|| {
804            FileSystemError::new(
805                FileSystemErrorKind::DeviceError,
806                format!("Device with ID {} is not a block device", device_id),
807            )
808        })?;
809
810        // Create ext2 filesystem with the block device
811        Self::new(block_device)
812    }
813
814    /// Create a new ext2 filesystem from parameters
815    pub fn new_from_params(params: &Ext2Params) -> Result<Arc<Self>, FileSystemError> {
816        if let Some(device_id) = params.get_device_id() {
817            Self::new_from_device_id(device_id)
818        } else {
819            Err(FileSystemError::new(
820                FileSystemErrorKind::InvalidData,
821                "Device ID not resolved in parameters",
822            ))
823        }
824    }
825
826    /// Read an inode from disk
827    pub fn read_inode(&self, inode_num: u32) -> Result<Ext2Inode, FileSystemError> {
828        profile_scope!("ext2::read_inode");
829        // Check cache first
830        {
831            let mut cache = self.inode_cache.lock();
832            if let Some(inode) = cache.get(inode_num) {
833                return Ok(inode);
834            }
835        }
836
837        // Calculate inode location
838        let group = (inode_num - 1) / self.superblock.inodes_per_group;
839        let local_inode = (inode_num - 1) % self.superblock.inodes_per_group;
840
841        // Read block group descriptor
842        // BGD table starts after the superblock
843        // For 1KB blocks: superblock is at block 1, so BGD starts at block 2
844        // For 2KB+ blocks: superblock is at block 0, so BGD starts at block 1
845        let bgd_table_start_block = if self.block_size == 1024 { 2 } else { 1 };
846        let bgd_block = bgd_table_start_block
847            + (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) / self.block_size;
848        let bgd_block_sector = self.block_to_sector(bgd_block as u64);
849
850        let request = Box::new(crate::device::block::request::BlockIORequest {
851            request_type: crate::device::block::request::BlockIORequestType::Read,
852            sector: bgd_block_sector,
853            sector_count: (self.block_size / 512) as usize,
854            head: 0,
855            cylinder: 0,
856            buffer: vec![0u8; self.block_size as usize],
857        });
858
859        self.block_device.enqueue_request(request);
860        let results = self.block_device.process_requests();
861
862        let bgd_data = if let Some(result) = results.first() {
863            match &result.result {
864                Ok(_) => &result.request.buffer,
865                Err(_) => {
866                    return Err(FileSystemError::new(
867                        FileSystemErrorKind::IoError,
868                        "Failed to read block group descriptor",
869                    ));
870                }
871            }
872        } else {
873            return Err(FileSystemError::new(
874                FileSystemErrorKind::IoError,
875                "No result from block device read",
876            ));
877        };
878
879        let bgd_offset =
880            (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
881        let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset as usize..])?;
882
883        // Calculate inode table location
884        let inode_size = self.superblock.get_inode_size() as u32;
885        let inode_block = bgd.inode_table + (local_inode * inode_size) / self.block_size;
886        let inode_offset = (local_inode * inode_size) % self.block_size;
887
888        #[cfg(test)]
889        crate::early_println!(
890            "[ext2] read_inode: Reading inode {} from block {}, offset {}, inode_size={}",
891            inode_num,
892            inode_block,
893            inode_offset,
894            inode_size
895        );
896
897        // Read inode
898        let inode_sector = self.block_to_sector(inode_block as u64);
899        let request = Box::new(crate::device::block::request::BlockIORequest {
900            request_type: crate::device::block::request::BlockIORequestType::Read,
901            sector: inode_sector,
902            sector_count: (self.block_size / 512) as usize,
903            head: 0,
904            cylinder: 0,
905            buffer: vec![0u8; self.block_size as usize],
906        });
907
908        self.block_device.enqueue_request(request);
909        let results = self.block_device.process_requests();
910
911        let inode_data = if let Some(result) = results.first() {
912            match &result.result {
913                Ok(_) => &result.request.buffer,
914                Err(_) => {
915                    return Err(FileSystemError::new(
916                        FileSystemErrorKind::IoError,
917                        "Failed to read inode",
918                    ));
919                }
920            }
921        } else {
922            return Err(FileSystemError::new(
923                FileSystemErrorKind::IoError,
924                "No result from block device read",
925            ));
926        };
927
928        let inode = Ext2Inode::from_bytes(&inode_data[inode_offset as usize..])?;
929
930        // Cache the inode with LRU eviction
931        {
932            let mut cache = self.inode_cache.lock();
933            cache.insert(inode_num, inode);
934        }
935
936        #[cfg(test)]
937        unsafe {
938            static mut INODE_CALL_COUNT: u64 = 0;
939            INODE_CALL_COUNT += 1;
940            // Print inode cache stats periodically (every 50th call)
941            if INODE_CALL_COUNT % 50 == 0 {
942                let cache = self.inode_cache.lock();
943                cache.print_stats("Inode");
944            }
945        }
946
947        Ok(inode)
948    }
949
950    /// Read directory entries from an inode
951    pub fn read_directory_entries(
952        &self,
953        inode: &Ext2Inode,
954    ) -> Result<Vec<Ext2DirectoryEntry>, FileSystemError> {
955        profile_scope!("ext2::read_directory_entries");
956
957        let mut entries = Vec::new();
958        let num_blocks = (inode.size as u64 + self.block_size as u64 - 1) / self.block_size as u64;
959
960        if num_blocks == 0 {
961            return Ok(entries);
962        }
963
964        // Use batched block reading for better performance
965        let block_nums = self.get_inode_blocks(inode, 0, num_blocks)?;
966
967        // Filter out zero blocks and collect valid block numbers
968        let mut valid_blocks = Vec::new();
969        for &block_num in &block_nums {
970            if block_num > 0 {
971                valid_blocks.push(block_num);
972            }
973        }
974
975        if valid_blocks.is_empty() {
976            return Ok(entries);
977        }
978
979        // Read all blocks at once using the cached method
980        let blocks_data = self.read_blocks_cached(&valid_blocks)?;
981
982        // Process each block
983        for block_data in blocks_data {
984            let mut offset = 0;
985            while offset < self.block_size as usize {
986                if offset + 8 > self.block_size as usize {
987                    break;
988                }
989
990                let entry = Ext2DirectoryEntry::from_bytes(&block_data[offset..])?;
991                if entry.entry.inode == 0 {
992                    // In ext2, an inode of 0 can mean an unused entry, but not necessarily the end.
993                    // The record length should still be valid.
994                    let rec_len = entry.entry.rec_len;
995                    if rec_len == 0 {
996                        break;
997                    }
998                    offset += rec_len as usize;
999                    continue;
1000                }
1001
1002                let rec_len = entry.entry.rec_len;
1003                entries.push(entry);
1004                offset += rec_len as usize;
1005
1006                if rec_len == 0 {
1007                    break;
1008                }
1009            }
1010        }
1011
1012        Ok(entries)
1013    }
1014
1015    /// Get the block number for a logical block within an inode
1016    fn get_inode_block(
1017        &self,
1018        inode: &Ext2Inode,
1019        logical_block: u64,
1020    ) -> Result<u64, FileSystemError> {
1021        profile_scope!("ext2::get_inode_block");
1022        let blocks_per_indirect = self.block_size / 4; // Each pointer is 4 bytes
1023
1024        if logical_block < 12 {
1025            // Direct blocks
1026            Ok(inode.block[logical_block as usize] as u64)
1027        } else if logical_block < 12 + blocks_per_indirect as u64 {
1028            // Single indirect
1029            let indirect_block = inode.block[12] as u64;
1030            if indirect_block == 0 {
1031                return Ok(0);
1032            }
1033
1034            let index = logical_block - 12;
1035            let indirect_data = self.read_block_cached(indirect_block)?;
1036
1037            let block_ptr = u32::from_le_bytes([
1038                indirect_data[index as usize * 4],
1039                indirect_data[index as usize * 4 + 1],
1040                indirect_data[index as usize * 4 + 2],
1041                indirect_data[index as usize * 4 + 3],
1042            ]);
1043
1044            Ok(block_ptr as u64)
1045        } else if logical_block
1046            < 12 + blocks_per_indirect as u64
1047                + blocks_per_indirect as u64 * blocks_per_indirect as u64
1048        {
1049            // Double indirect
1050            let double_indirect_block = inode.block[13] as u64;
1051            if double_indirect_block == 0 {
1052                return Ok(0);
1053            }
1054
1055            let offset_in_double = logical_block - 12 - blocks_per_indirect as u64;
1056            let first_indirect_index = offset_in_double / blocks_per_indirect as u64;
1057            let second_indirect_index = offset_in_double % blocks_per_indirect as u64;
1058
1059            // Read double indirect block
1060            let double_indirect_data = self.read_block_cached(double_indirect_block)?;
1061
1062            // Get the first level indirect block pointer
1063            let first_indirect_ptr = u32::from_le_bytes([
1064                double_indirect_data[first_indirect_index as usize * 4],
1065                double_indirect_data[first_indirect_index as usize * 4 + 1],
1066                double_indirect_data[first_indirect_index as usize * 4 + 2],
1067                double_indirect_data[first_indirect_index as usize * 4 + 3],
1068            ]);
1069
1070            if first_indirect_ptr == 0 {
1071                return Ok(0);
1072            }
1073
1074            // Read the first level indirect block
1075            let first_indirect_data = self.read_block_cached(first_indirect_ptr as u64)?;
1076
1077            // Get the final block pointer
1078            let block_ptr = u32::from_le_bytes([
1079                first_indirect_data[second_indirect_index as usize * 4],
1080                first_indirect_data[second_indirect_index as usize * 4 + 1],
1081                first_indirect_data[second_indirect_index as usize * 4 + 2],
1082                first_indirect_data[second_indirect_index as usize * 4 + 3],
1083            ]);
1084
1085            Ok(block_ptr as u64)
1086        } else {
1087            // Triple indirect blocks - not implemented yet
1088            Err(FileSystemError::new(
1089                FileSystemErrorKind::NotSupported,
1090                "Triple indirect blocks not yet supported",
1091            ))
1092        }
1093    }
1094
1095    /// Get multiple contiguous block numbers for logical blocks within an inode (batched version)
1096    /// This function efficiently maps multiple logical blocks to physical blocks, reducing
1097    /// the number of indirect block reads by batching them together.
1098    fn get_inode_blocks(
1099        &self,
1100        inode: &Ext2Inode,
1101        start_logical_block: u64,
1102        count: u64,
1103    ) -> Result<Vec<u64>, FileSystemError> {
1104        profile_scope!("ext2::get_inode_blocks");
1105        if count == 0 {
1106            return Ok(Vec::new());
1107        }
1108
1109        let blocks_per_indirect = self.block_size / 4; // Each pointer is 4 bytes
1110        let mut result = Vec::with_capacity(count as usize);
1111
1112        let mut current_block = start_logical_block;
1113        let end_block = start_logical_block + count;
1114
1115        // For simplicity, we'll read blocks as needed rather than using a complex cache
1116
1117        while current_block < end_block {
1118            if current_block < 12 {
1119                // Direct blocks - process all direct blocks in this range
1120                let direct_end = (end_block).min(12);
1121                for i in current_block..direct_end {
1122                    result.push(inode.block[i as usize] as u64);
1123                }
1124                current_block = direct_end;
1125            } else if current_block < 12 + blocks_per_indirect as u64 {
1126                // Single indirect blocks
1127                let indirect_block = inode.block[12] as u64;
1128                if indirect_block == 0 {
1129                    // All blocks in this range are zero
1130                    let indirect_end = (end_block).min(12 + blocks_per_indirect as u64);
1131                    for _ in current_block..indirect_end {
1132                        result.push(0);
1133                    }
1134                    current_block = indirect_end;
1135                } else {
1136                    let indirect_data = self.read_block_cached(indirect_block)?;
1137                    let indirect_end = (end_block).min(12 + blocks_per_indirect as u64);
1138
1139                    for logical_block in current_block..indirect_end {
1140                        let index = logical_block - 12;
1141                        let block_ptr = u32::from_le_bytes([
1142                            indirect_data[index as usize * 4],
1143                            indirect_data[index as usize * 4 + 1],
1144                            indirect_data[index as usize * 4 + 2],
1145                            indirect_data[index as usize * 4 + 3],
1146                        ]);
1147                        result.push(block_ptr as u64);
1148                    }
1149                    current_block = indirect_end;
1150                }
1151            } else if current_block
1152                < 12 + blocks_per_indirect as u64
1153                    + blocks_per_indirect as u64 * blocks_per_indirect as u64
1154            {
1155                // Double indirect
1156                let double_indirect_block = inode.block[13] as u64;
1157                if double_indirect_block == 0 {
1158                    // All blocks in this range are zero
1159                    let double_indirect_end = (end_block).min(
1160                        12 + blocks_per_indirect as u64
1161                            + blocks_per_indirect as u64 * blocks_per_indirect as u64,
1162                    );
1163                    for _ in current_block..double_indirect_end {
1164                        result.push(0);
1165                    }
1166                    current_block = double_indirect_end;
1167                } else {
1168                    // Optimized double indirect block handling
1169                    let double_indirect_data = self.read_block_cached(double_indirect_block)?;
1170                    let double_indirect_end = (end_block).min(
1171                        12 + blocks_per_indirect as u64
1172                            + blocks_per_indirect as u64 * blocks_per_indirect as u64,
1173                    );
1174
1175                    let first_single_indirect_start = 12 + blocks_per_indirect as u64;
1176                    let first_single_indirect_index = ((current_block
1177                        - first_single_indirect_start)
1178                        / blocks_per_indirect as u64)
1179                        as usize;
1180                    let offset_in_first_single_indirect = ((current_block
1181                        - first_single_indirect_start)
1182                        % blocks_per_indirect as u64)
1183                        as usize;
1184
1185                    let last_single_indirect_index =
1186                        ((double_indirect_end - 1 - first_single_indirect_start)
1187                            / blocks_per_indirect as u64) as usize;
1188
1189                    for single_indirect_index in
1190                        first_single_indirect_index..=last_single_indirect_index
1191                    {
1192                        let single_indirect_ptr = u32::from_le_bytes([
1193                            double_indirect_data[single_indirect_index * 4],
1194                            double_indirect_data[single_indirect_index * 4 + 1],
1195                            double_indirect_data[single_indirect_index * 4 + 2],
1196                            double_indirect_data[single_indirect_index * 4 + 3],
1197                        ]) as u64;
1198
1199                        if single_indirect_ptr == 0 {
1200                            // This entire single indirect block is sparse
1201                            let blocks_in_this_indirect = if single_indirect_index
1202                                == last_single_indirect_index
1203                            {
1204                                let offset_in_last_single_indirect =
1205                                    ((double_indirect_end - 1 - first_single_indirect_start)
1206                                        % blocks_per_indirect as u64)
1207                                        as usize
1208                                        + 1;
1209                                if single_indirect_index == first_single_indirect_index {
1210                                    offset_in_last_single_indirect - offset_in_first_single_indirect
1211                                } else {
1212                                    offset_in_last_single_indirect
1213                                }
1214                            } else if single_indirect_index == first_single_indirect_index {
1215                                blocks_per_indirect as usize - offset_in_first_single_indirect
1216                            } else {
1217                                blocks_per_indirect as usize
1218                            };
1219
1220                            for _ in 0..blocks_in_this_indirect {
1221                                result.push(0);
1222                            }
1223                        } else {
1224                            // Read this single indirect block
1225                            let single_indirect_data =
1226                                self.read_block_cached(single_indirect_ptr)?;
1227
1228                            let start_offset =
1229                                if single_indirect_index == first_single_indirect_index {
1230                                    offset_in_first_single_indirect
1231                                } else {
1232                                    0
1233                                };
1234                            let end_offset = if single_indirect_index == last_single_indirect_index
1235                            {
1236                                ((double_indirect_end - 1 - first_single_indirect_start)
1237                                    % blocks_per_indirect as u64)
1238                                    as usize
1239                                    + 1
1240                            } else {
1241                                blocks_per_indirect as usize
1242                            };
1243
1244                            for offset in start_offset..end_offset {
1245                                let block_ptr = u32::from_le_bytes([
1246                                    single_indirect_data[offset * 4],
1247                                    single_indirect_data[offset * 4 + 1],
1248                                    single_indirect_data[offset * 4 + 2],
1249                                    single_indirect_data[offset * 4 + 3],
1250                                ]);
1251                                result.push(block_ptr as u64);
1252                            }
1253                        }
1254                    }
1255                    current_block = double_indirect_end;
1256                }
1257            } else {
1258                // Triple indirect blocks - not implemented yet
1259                return Err(FileSystemError::new(
1260                    FileSystemErrorKind::NotSupported,
1261                    "Triple indirect blocks not yet supported",
1262                ));
1263            }
1264        }
1265
1266        Ok(result)
1267    }
1268
1269    /// Read the entire content of a file given its inode number (optimized)
1270    pub fn read_file_content(
1271        &self,
1272        inode_num: u32,
1273        size: usize,
1274    ) -> Result<Vec<u8>, FileSystemError> {
1275        profile_scope!("ext2::read_file_content");
1276        let inode = self.read_inode(inode_num)?;
1277        let mut content = Vec::with_capacity(size);
1278
1279        let num_blocks = (size as u64 + self.block_size as u64 - 1) / self.block_size as u64;
1280        if num_blocks == 0 {
1281            return Ok(content);
1282        }
1283
1284        // Use batched block reading for better performance
1285        let block_nums = self.get_inode_blocks(&inode, 0, num_blocks)?;
1286
1287        let mut block_nums_to_read = Vec::new();
1288        for &block_num in block_nums.iter() {
1289            if block_num > 0 {
1290                block_nums_to_read.push(block_num);
1291            } else {
1292                // If there are pending blocks to read, read them first
1293                if !block_nums_to_read.is_empty() {
1294                    let blocks_data = self.read_blocks_cached(&block_nums_to_read)?;
1295                    for data in blocks_data {
1296                        content.extend_from_slice(&data);
1297                    }
1298                    block_nums_to_read.clear();
1299                }
1300                // Handle sparse block by adding zeros
1301                let len_to_add = core::cmp::min(self.block_size as usize, size - content.len());
1302                content.extend(core::iter::repeat(0).take(len_to_add));
1303            }
1304        }
1305
1306        if !block_nums_to_read.is_empty() {
1307            let blocks_data = self.read_blocks_cached(&block_nums_to_read)?;
1308            for data in blocks_data {
1309                content.extend_from_slice(&data);
1310            }
1311        }
1312
1313        // Truncate to the exact size
1314        content.truncate(size);
1315        Ok(content)
1316    }
1317
1318    /// Read a single page (4096 bytes) of file content into physical memory.
1319    ///
1320    /// This is used by the page cache manager for demand paging.
1321    /// The page is filled with zeros if beyond EOF.
1322    pub fn read_page_content(
1323        &self,
1324        inode_num: u32,
1325        page_index: u64,
1326        paddr: usize,
1327    ) -> Result<(), FileSystemError> {
1328        use crate::environment::PAGE_SIZE;
1329
1330        profile_scope!("ext2::read_page_content");
1331
1332        let inode = self.read_inode(inode_num)?;
1333        let file_size = inode.size as u64;
1334        let page_offset = page_index * PAGE_SIZE as u64;
1335
1336        // Clear the page first
1337        unsafe {
1338            core::ptr::write_bytes(paddr as *mut u8, 0, PAGE_SIZE);
1339        }
1340
1341        // If page is beyond EOF, return zeros
1342        if page_offset >= file_size {
1343            return Ok(());
1344        }
1345
1346        // Calculate how many bytes to read from this page
1347        let bytes_in_page = if page_offset + PAGE_SIZE as u64 > file_size {
1348            (file_size - page_offset) as usize
1349        } else {
1350            PAGE_SIZE
1351        };
1352
1353        // Calculate block range for this page
1354        let start_block = page_offset / self.block_size as u64;
1355        let end_block = (page_offset + bytes_in_page as u64 + self.block_size as u64 - 1)
1356            / self.block_size as u64;
1357        let num_blocks = end_block - start_block;
1358
1359        if num_blocks == 0 {
1360            return Ok(());
1361        }
1362
1363        // Get block numbers
1364        let block_nums = self.get_inode_blocks(&inode, start_block, num_blocks)?;
1365
1366        let mut page_ptr = paddr as *mut u8;
1367        let mut bytes_written = 0usize;
1368
1369        for (i, &block_num) in block_nums.iter().enumerate() {
1370            if block_num == 0 {
1371                // Sparse block - already zeroed
1372                let bytes_to_skip =
1373                    core::cmp::min(self.block_size as usize, bytes_in_page - bytes_written);
1374                unsafe {
1375                    page_ptr = page_ptr.add(bytes_to_skip);
1376                }
1377                bytes_written += bytes_to_skip;
1378                continue;
1379            }
1380
1381            // Read the block
1382            let block_data = self.read_block_cached(block_num)?;
1383
1384            // Calculate offset within this block
1385            let block_offset = if i == 0 {
1386                (page_offset % self.block_size as u64) as usize
1387            } else {
1388                0
1389            };
1390
1391            // Calculate how many bytes to copy from this block
1392            let bytes_to_copy = core::cmp::min(
1393                self.block_size as usize - block_offset,
1394                bytes_in_page - bytes_written,
1395            );
1396
1397            // Copy to page
1398            unsafe {
1399                core::ptr::copy_nonoverlapping(
1400                    block_data.as_ptr().add(block_offset),
1401                    page_ptr,
1402                    bytes_to_copy,
1403                );
1404                page_ptr = page_ptr.add(bytes_to_copy);
1405            }
1406
1407            bytes_written += bytes_to_copy;
1408
1409            if bytes_written >= bytes_in_page {
1410                break;
1411            }
1412        }
1413
1414        Ok(())
1415    }
1416
1417    /// Write a single page (4096 bytes) of file content from physical memory.
1418    ///
1419    /// This is used by the page cache manager for writeback.
1420    pub fn write_page_content(
1421        &self,
1422        inode_num: u32,
1423        page_index: u64,
1424        paddr: usize,
1425    ) -> Result<(), FileSystemError> {
1426        profile_scope!("ext2::write_page_content");
1427
1428        // TODO: Phase 2 - Implement page writeback
1429        let _ = (inode_num, page_index, paddr);
1430        Err(FileSystemError::new(
1431            FileSystemErrorKind::NotSupported,
1432            "Page writeback not yet implemented",
1433        ))
1434    }
1435
1436    /// Write an inode to disk
1437    fn write_inode(&self, inode_number: u32, inode: &Ext2Inode) -> Result<(), FileSystemError> {
1438        profile_scope!("ext2::write_inode");
1439        // Calculate which block group contains this inode
1440        let inodes_per_group = self.superblock.inodes_per_group;
1441        let group_number = (inode_number - 1) / inodes_per_group;
1442        let inode_index = (inode_number - 1) % inodes_per_group;
1443
1444        // Read the block group descriptor to get the inode table location
1445        let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
1446        let bgd_sector = self.block_to_sector(bgd_block);
1447        let bgd_request = Box::new(crate::device::block::request::BlockIORequest {
1448            request_type: crate::device::block::request::BlockIORequestType::Read,
1449            sector: bgd_sector as usize,
1450            sector_count: (self.block_size / 512) as usize,
1451            head: 0,
1452            cylinder: 0,
1453            buffer: vec![0u8; self.block_size as usize],
1454        });
1455
1456        self.block_device.enqueue_request(bgd_request);
1457        let bgd_results = self.block_device.process_requests();
1458
1459        let bgd_data = if let Some(result) = bgd_results.first() {
1460            match &result.result {
1461                Ok(_) => result.request.buffer.clone(),
1462                Err(_) => {
1463                    return Err(FileSystemError::new(
1464                        FileSystemErrorKind::IoError,
1465                        "Failed to read block group descriptors",
1466                    ));
1467                }
1468            }
1469        } else {
1470            return Err(FileSystemError::new(
1471                FileSystemErrorKind::IoError,
1472                "No result from BGD read",
1473            ));
1474        };
1475
1476        // Parse the block group descriptor
1477        let bgd_offset = (group_number as usize) * 32; // Each BGD is 32 bytes
1478        if bgd_offset + 32 > bgd_data.len() {
1479            return Err(FileSystemError::new(
1480                FileSystemErrorKind::InvalidData,
1481                "Block group descriptor offset out of bounds",
1482            ));
1483        }
1484
1485        // Extract inode table block from BGD
1486        let inode_table_block = u32::from_le_bytes([
1487            bgd_data[bgd_offset + 8],
1488            bgd_data[bgd_offset + 9],
1489            bgd_data[bgd_offset + 10],
1490            bgd_data[bgd_offset + 11],
1491        ]);
1492
1493        // Calculate the block and offset within that block for this inode
1494        let inode_size = self.superblock.get_inode_size() as u32;
1495        let inodes_per_block = self.block_size / inode_size;
1496        let block_offset = inode_index / inodes_per_block;
1497        let inode_offset_in_block = (inode_index % inodes_per_block) * inode_size;
1498
1499        let target_block = inode_table_block + block_offset;
1500        let target_sector = self.block_to_sector(target_block as u64);
1501
1502        // Read the current block containing the inode
1503        let read_request = Box::new(crate::device::block::request::BlockIORequest {
1504            request_type: crate::device::block::request::BlockIORequestType::Read,
1505            sector: target_sector as usize,
1506            sector_count: (self.block_size / 512) as usize,
1507            head: 0,
1508            cylinder: 0,
1509            buffer: vec![0u8; self.block_size as usize],
1510        });
1511
1512        self.block_device.enqueue_request(read_request);
1513        let read_results = self.block_device.process_requests();
1514
1515        let mut block_data = if let Some(result) = read_results.first() {
1516            match &result.result {
1517                Ok(_) => result.request.buffer.clone(),
1518                Err(_) => {
1519                    return Err(FileSystemError::new(
1520                        FileSystemErrorKind::IoError,
1521                        "Failed to read inode table block",
1522                    ));
1523                }
1524            }
1525        } else {
1526            return Err(FileSystemError::new(
1527                FileSystemErrorKind::IoError,
1528                "No result from inode table block read",
1529            ));
1530        };
1531
1532        // Write the inode data into the block
1533        let inode_bytes = unsafe {
1534            core::slice::from_raw_parts(
1535                inode as *const Ext2Inode as *const u8,
1536                core::mem::size_of::<Ext2Inode>(),
1537            )
1538        };
1539
1540        let start_offset = inode_offset_in_block as usize;
1541        let end_offset = start_offset + inode_bytes.len();
1542
1543        if end_offset > block_data.len() {
1544            return Err(FileSystemError::new(
1545                FileSystemErrorKind::InvalidData,
1546                "Inode data would exceed block boundary",
1547            ));
1548        }
1549
1550        block_data[start_offset..end_offset].copy_from_slice(inode_bytes);
1551
1552        // Write the modified block back to disk
1553        let write_request = Box::new(crate::device::block::request::BlockIORequest {
1554            request_type: crate::device::block::request::BlockIORequestType::Write,
1555            sector: target_sector as usize,
1556            sector_count: (self.block_size / 512) as usize,
1557            head: 0,
1558            cylinder: 0,
1559            buffer: block_data,
1560        });
1561
1562        self.block_device.enqueue_request(write_request);
1563        let write_results = self.block_device.process_requests();
1564
1565        if let Some(result) = write_results.first() {
1566            match &result.result {
1567                Ok(_) => {
1568                    // Also update the cache
1569                    let mut cache = self.inode_cache.lock();
1570                    cache.insert(inode_number, inode.clone());
1571                    Ok(())
1572                }
1573                Err(_) => Err(FileSystemError::new(
1574                    FileSystemErrorKind::IoError,
1575                    "Failed to write inode to disk",
1576                )),
1577            }
1578        } else {
1579            Err(FileSystemError::new(
1580                FileSystemErrorKind::IoError,
1581                "No result from inode write",
1582            ))
1583        }
1584    }
1585
1586    /// Initialize a new directory with . and .. entries
1587    fn initialize_directory(
1588        &self,
1589        dir_inode_number: u32,
1590        parent_inode_number: u32,
1591    ) -> Result<(), FileSystemError> {
1592        profile_scope!("ext2::initialize_directory");
1593
1594        // Allocate a block for the directory
1595        let block_number = self.allocate_block()?;
1596
1597        // Create directory entries for . and ..
1598        let block_size = self.block_size as usize;
1599        let mut block_data = vec![0u8; block_size];
1600
1601        // Create "." entry
1602        let dot_entry_size = 12; // 4 (inode) + 2 (rec_len) + 1 (name_len) + 1 (file_type) + 1 (name) + 3 (padding)
1603        let dot_inode = dir_inode_number.to_le_bytes();
1604        let dot_rec_len = dot_entry_size as u16;
1605        let dot_name_len = 1u8;
1606        let dot_file_type = 2u8; // Directory
1607
1608        block_data[0..4].copy_from_slice(&dot_inode);
1609        block_data[4..6].copy_from_slice(&dot_rec_len.to_le_bytes());
1610        block_data[6] = dot_name_len;
1611        block_data[7] = dot_file_type;
1612        block_data[8] = b'.';
1613
1614        // Create ".." entry - takes up the rest of the block
1615        let dotdot_offset = dot_entry_size;
1616        let dotdot_rec_len = (block_size - dotdot_offset) as u16;
1617        let dotdot_name_len = 2u8;
1618        let dotdot_file_type = 2u8; // Directory
1619        let dotdot_inode = parent_inode_number.to_le_bytes();
1620
1621        block_data[dotdot_offset..dotdot_offset + 4].copy_from_slice(&dotdot_inode);
1622        block_data[dotdot_offset + 4..dotdot_offset + 6]
1623            .copy_from_slice(&dotdot_rec_len.to_le_bytes());
1624        block_data[dotdot_offset + 6] = dotdot_name_len;
1625        block_data[dotdot_offset + 7] = dotdot_file_type;
1626        block_data[dotdot_offset + 8] = b'.';
1627        block_data[dotdot_offset + 9] = b'.';
1628
1629        // Write the block to disk
1630        let block_sector = self.block_to_sector(block_number as u64);
1631        let request = Box::new(crate::device::block::request::BlockIORequest {
1632            request_type: crate::device::block::request::BlockIORequestType::Write,
1633            sector: block_sector as usize,
1634            sector_count: (self.block_size / 512) as usize,
1635            head: 0,
1636            cylinder: 0,
1637            buffer: block_data,
1638        });
1639
1640        // Submit write request
1641        self.block_device.enqueue_request(request);
1642        let results = self.block_device.process_requests();
1643
1644        if results.is_empty() || results[0].result.is_err() {
1645            return Err(FileSystemError::new(
1646                FileSystemErrorKind::InvalidData,
1647                "Failed to write directory block",
1648            ));
1649        }
1650
1651        // Update the directory inode to point to this block and set size
1652        let mut dir_inode = self.read_inode(dir_inode_number)?;
1653        dir_inode.block[0] = block_number as u32;
1654        dir_inode.size = block_size as u32;
1655        dir_inode.blocks = (self.block_size / 512).to_le(); // Number of 512-byte sectors
1656
1657        self.write_inode(dir_inode_number, &dir_inode)?;
1658
1659        Ok(())
1660    }
1661
1662    /// Allocate a new data block using proper bitmap management
1663    fn allocate_block(&self) -> Result<u64, FileSystemError> {
1664        profile_scope!("ext2::allocate_block");
1665
1666        // Try to allocate from any available group
1667        let total_groups = (self.superblock.blocks_count + self.superblock.blocks_per_group - 1)
1668            / self.superblock.blocks_per_group;
1669
1670        for group in 0..total_groups {
1671            match self.allocate_block_in_group(group) {
1672                Ok(block_num) => return Ok(block_num),
1673                Err(FileSystemError {
1674                    kind: FileSystemErrorKind::NoSpace,
1675                    ..
1676                }) => {
1677                    // Try next group
1678                    continue;
1679                }
1680                Err(e) => return Err(e),
1681            }
1682        }
1683
1684        Err(FileSystemError::new(
1685            FileSystemErrorKind::NoSpace,
1686            "No free blocks available in any group",
1687        ))
1688    }
1689
1690    /// Allocate a block in a specific group - OPTIMIZED VERSION  
1691    fn allocate_block_in_group(&self, group: u32) -> Result<u64, FileSystemError> {
1692        profile_scope!("ext2::allocate_block_in_group");
1693
1694        #[cfg(test)]
1695        crate::early_println!(
1696            "[ext2] allocate_block_in_group: Starting OPTIMIZED allocation for group {}",
1697            group
1698        );
1699
1700        // Read block group descriptor
1701        let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
1702        let bgd_sector = self.block_to_sector(bgd_block);
1703
1704        let request = Box::new(crate::device::block::request::BlockIORequest {
1705            request_type: crate::device::block::request::BlockIORequestType::Read,
1706            sector: bgd_sector as usize,
1707            sector_count: (self.block_size / 512) as usize,
1708            head: 0,
1709            cylinder: 0,
1710            buffer: vec![0u8; self.block_size as usize],
1711        });
1712
1713        self.block_device.enqueue_request(request);
1714        let results = self.block_device.process_requests();
1715
1716        let bgd_data = if let Some(result) = results.first() {
1717            match &result.result {
1718                Ok(_) => result.request.buffer.clone(),
1719                Err(_) => {
1720                    return Err(FileSystemError::new(
1721                        FileSystemErrorKind::IoError,
1722                        "Failed to read block group descriptor",
1723                    ));
1724                }
1725            }
1726        } else {
1727            return Err(FileSystemError::new(
1728                FileSystemErrorKind::IoError,
1729                "No result from block device read",
1730            ));
1731        };
1732
1733        let bgd_offset = (group * core::mem::size_of::<Ext2BlockGroupDescriptor>() as u32
1734            % self.block_size) as usize;
1735        let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset..])?;
1736
1737        // Check if there are free blocks
1738        if bgd.free_blocks_count == 0 {
1739            return Err(FileSystemError::new(
1740                FileSystemErrorKind::NoSpace,
1741                &format!("No free blocks in group {}", group),
1742            ));
1743        }
1744
1745        // Read block bitmap
1746        let bitmap_sector = self.block_to_sector(bgd.block_bitmap as u64);
1747        let request = Box::new(crate::device::block::request::BlockIORequest {
1748            request_type: crate::device::block::request::BlockIORequestType::Read,
1749            sector: bitmap_sector as usize,
1750            sector_count: (self.block_size / 512) as usize,
1751            head: 0,
1752            cylinder: 0,
1753            buffer: vec![0u8; self.block_size as usize],
1754        });
1755
1756        self.block_device.enqueue_request(request);
1757        let results = self.block_device.process_requests();
1758
1759        let mut bitmap_data = if let Some(result) = results.first() {
1760            match &result.result {
1761                Ok(_) => result.request.buffer.clone(),
1762                Err(_) => {
1763                    return Err(FileSystemError::new(
1764                        FileSystemErrorKind::IoError,
1765                        "Failed to read block bitmap",
1766                    ));
1767                }
1768            }
1769        } else {
1770            return Err(FileSystemError::new(
1771                FileSystemErrorKind::IoError,
1772                "No result from block device read",
1773            ));
1774        };
1775
1776        // Find first free block in bitmap
1777        let group_start_block = group * self.superblock.blocks_per_group;
1778        let data_start_block = if group == 0 {
1779            810.max(group_start_block)
1780        } else {
1781            let blocks_for_metadata = 3
1782                + (self.superblock.inodes_per_group * 128 + self.block_size - 1) / self.block_size;
1783            group_start_block + blocks_for_metadata
1784        };
1785
1786        let group_end_block = (group + 1) * self.superblock.blocks_per_group;
1787        let search_end = core::cmp::min(group_end_block, self.superblock.blocks_count as u32);
1788
1789        for block_num in data_start_block..search_end {
1790            let bit = block_num - group_start_block;
1791            let byte_index = (bit / 8) as usize;
1792            let bit_index = bit % 8;
1793
1794            if byte_index >= bitmap_data.len() {
1795                break;
1796            }
1797
1798            // Check if bit is free (0)
1799            if (bitmap_data[byte_index] & (1 << bit_index)) == 0 {
1800                // OPTIMIZATION: Batch bitmap + BGD updates
1801                bitmap_data[byte_index] |= 1 << bit_index;
1802
1803                #[cfg(test)]
1804                crate::early_println!(
1805                    "[ext2] allocate_block_in_group: Found free block {}, batching metadata updates",
1806                    block_num
1807                );
1808
1809                // Enqueue bitmap write
1810                let bitmap_write = Box::new(crate::device::block::request::BlockIORequest {
1811                    request_type: crate::device::block::request::BlockIORequestType::Write,
1812                    sector: bitmap_sector as usize,
1813                    sector_count: (self.block_size / 512) as usize,
1814                    head: 0,
1815                    cylinder: 0,
1816                    buffer: bitmap_data,
1817                });
1818                self.block_device.enqueue_request(bitmap_write);
1819
1820                // Prepare BGD update
1821                let mut updated_bgd_data = bgd_data.clone();
1822                let mut bgd_update =
1823                    Ext2BlockGroupDescriptor::from_bytes(&updated_bgd_data[bgd_offset..])?;
1824                let current_free_blocks = u16::from_le(bgd_update.free_blocks_count);
1825                bgd_update.free_blocks_count = (current_free_blocks.saturating_sub(1)).to_le();
1826                bgd_update.write_to_bytes(&mut updated_bgd_data[bgd_offset..]);
1827
1828                // Enqueue BGD write
1829                let bgd_write = Box::new(crate::device::block::request::BlockIORequest {
1830                    request_type: crate::device::block::request::BlockIORequestType::Write,
1831                    sector: bgd_sector as usize,
1832                    sector_count: (self.block_size / 512) as usize,
1833                    head: 0,
1834                    cylinder: 0,
1835                    buffer: updated_bgd_data,
1836                });
1837                self.block_device.enqueue_request(bgd_write);
1838
1839                // Process both writes in one batch
1840                #[cfg(test)]
1841                crate::early_println!(
1842                    "[ext2] allocate_block_in_group: Processing 2 writes in batch (bitmap + BGD)"
1843                );
1844                let write_results = self.block_device.process_requests();
1845
1846                if write_results.len() != 2 || write_results.iter().any(|r| r.result.is_err()) {
1847                    return Err(FileSystemError::new(
1848                        FileSystemErrorKind::IoError,
1849                        "Failed to write bitmap or BGD",
1850                    ));
1851                }
1852
1853                // Update superblock (separate for now - could be batched too)
1854                self.update_superblock_counts(-1, 0, 0)?;
1855
1856                #[cfg(test)]
1857                crate::early_println!(
1858                    "[ext2] allocate_block_in_group: Successfully allocated block {} (OPTIMIZED: reduced I/O ops)",
1859                    block_num
1860                );
1861                return Ok(block_num as u64);
1862            }
1863        }
1864
1865        Err(FileSystemError::new(
1866            FileSystemErrorKind::NoSpace,
1867            "No free blocks found",
1868        ))
1869    }
1870
1871    /// Allocate multiple contiguous blocks in a specific group - OPTIMIZED VERSION
1872    fn allocate_blocks_contiguous_in_group(
1873        &self,
1874        group: u32,
1875        count: u32,
1876    ) -> Result<Vec<u64>, FileSystemError> {
1877        profile_scope!("ext2::allocate_blocks_contiguous_in_group");
1878
1879        #[cfg(test)]
1880        crate::early_println!(
1881            "[ext2] allocate_blocks_contiguous_in_group: Starting allocation for {} blocks in group {}",
1882            count,
1883            group
1884        );
1885
1886        // Read block group descriptor
1887        let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
1888        let bgd_sector = self.block_to_sector(bgd_block);
1889
1890        let request = Box::new(crate::device::block::request::BlockIORequest {
1891            request_type: crate::device::block::request::BlockIORequestType::Read,
1892            sector: bgd_sector as usize,
1893            sector_count: (self.block_size / 512) as usize,
1894            head: 0,
1895            cylinder: 0,
1896            buffer: vec![0u8; self.block_size as usize],
1897        });
1898
1899        self.block_device.enqueue_request(request);
1900        let results = self.block_device.process_requests();
1901
1902        let bgd_data = if let Some(result) = results.first() {
1903            match &result.result {
1904                Ok(_) => result.request.buffer.clone(),
1905                Err(_) => {
1906                    return Err(FileSystemError::new(
1907                        FileSystemErrorKind::IoError,
1908                        "Failed to read block group descriptor",
1909                    ));
1910                }
1911            }
1912        } else {
1913            return Err(FileSystemError::new(
1914                FileSystemErrorKind::IoError,
1915                "No result from block device read",
1916            ));
1917        };
1918
1919        let bgd_offset = (group * core::mem::size_of::<Ext2BlockGroupDescriptor>() as u32
1920            % self.block_size) as usize;
1921        let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset..])?;
1922
1923        // Check if there are enough free blocks
1924        let free_blocks_count = u16::from_le(bgd.free_blocks_count);
1925        if free_blocks_count < count as u16 {
1926            return Err(FileSystemError::new(
1927                FileSystemErrorKind::NoSpace,
1928                &format!(
1929                    "Insufficient free blocks in group {} (need {}, have {})",
1930                    group, count, free_blocks_count
1931                ),
1932            ));
1933        }
1934
1935        // Read block bitmap
1936        let bitmap_sector = self.block_to_sector(bgd.block_bitmap as u64);
1937        let request = Box::new(crate::device::block::request::BlockIORequest {
1938            request_type: crate::device::block::request::BlockIORequestType::Read,
1939            sector: bitmap_sector as usize,
1940            sector_count: (self.block_size / 512) as usize,
1941            head: 0,
1942            cylinder: 0,
1943            buffer: vec![0u8; self.block_size as usize],
1944        });
1945
1946        self.block_device.enqueue_request(request);
1947        let results = self.block_device.process_requests();
1948
1949        let mut bitmap_data = if let Some(result) = results.first() {
1950            match &result.result {
1951                Ok(_) => result.request.buffer.clone(),
1952                Err(_) => {
1953                    return Err(FileSystemError::new(
1954                        FileSystemErrorKind::IoError,
1955                        "Failed to read block bitmap",
1956                    ));
1957                }
1958            }
1959        } else {
1960            return Err(FileSystemError::new(
1961                FileSystemErrorKind::IoError,
1962                "No result from block device read",
1963            ));
1964        };
1965
1966        // Find contiguous free blocks in bitmap
1967        let group_start_block = group * self.superblock.blocks_per_group;
1968        let data_start_block = if group == 0 {
1969            810.max(group_start_block)
1970        } else {
1971            let blocks_for_metadata = 3
1972                + (self.superblock.inodes_per_group * 128 + self.block_size - 1) / self.block_size;
1973            group_start_block + blocks_for_metadata
1974        };
1975
1976        let group_end_block = (group + 1) * self.superblock.blocks_per_group;
1977        let search_end = core::cmp::min(group_end_block, self.superblock.blocks_count as u32);
1978
1979        // Search for contiguous free blocks
1980        for start_block in data_start_block..(search_end.saturating_sub(count - 1)) {
1981            let mut all_free = true;
1982
1983            // Check if the next 'count' blocks are all free
1984            for offset in 0..count {
1985                let block_num = start_block + offset;
1986                let bit = block_num - group_start_block;
1987                let byte_index = (bit / 8) as usize;
1988                let bit_index = bit % 8;
1989
1990                if byte_index >= bitmap_data.len() {
1991                    all_free = false;
1992                    break;
1993                }
1994
1995                // Check if bit is used (1)
1996                if (bitmap_data[byte_index] & (1 << bit_index)) != 0 {
1997                    all_free = false;
1998                    break;
1999                }
2000            }
2001
2002            if all_free {
2003                // Mark all blocks as used and collect them
2004                let mut allocated_blocks = Vec::new();
2005                for offset in 0..count {
2006                    let block_num = start_block + offset;
2007                    let bit = block_num - group_start_block;
2008                    let byte_index = (bit / 8) as usize;
2009                    let bit_index = bit % 8;
2010
2011                    bitmap_data[byte_index] |= 1 << bit_index;
2012                    allocated_blocks.push(block_num as u64);
2013                }
2014
2015                #[cfg(test)]
2016                crate::early_println!(
2017                    "[ext2] allocate_blocks_contiguous_in_group: Found {} contiguous blocks starting at {}, batching updates",
2018                    count,
2019                    start_block
2020                );
2021
2022                // OPTIMIZATION: Batch bitmap + BGD updates
2023
2024                // Enqueue bitmap write
2025                let bitmap_write = Box::new(crate::device::block::request::BlockIORequest {
2026                    request_type: crate::device::block::request::BlockIORequestType::Write,
2027                    sector: bitmap_sector as usize,
2028                    sector_count: (self.block_size / 512) as usize,
2029                    head: 0,
2030                    cylinder: 0,
2031                    buffer: bitmap_data,
2032                });
2033                self.block_device.enqueue_request(bitmap_write);
2034
2035                // Prepare BGD update (reduce free_blocks_count by count)
2036                let mut updated_bgd_data = bgd_data.clone();
2037                let mut bgd_update =
2038                    Ext2BlockGroupDescriptor::from_bytes(&updated_bgd_data[bgd_offset..])?;
2039                let current_free_blocks = u16::from_le(bgd_update.free_blocks_count);
2040                bgd_update.free_blocks_count =
2041                    (current_free_blocks.saturating_sub(count as u16)).to_le();
2042                bgd_update.write_to_bytes(&mut updated_bgd_data[bgd_offset..]);
2043
2044                // Enqueue BGD write
2045                let bgd_write = Box::new(crate::device::block::request::BlockIORequest {
2046                    request_type: crate::device::block::request::BlockIORequestType::Write,
2047                    sector: bgd_sector as usize,
2048                    sector_count: (self.block_size / 512) as usize,
2049                    head: 0,
2050                    cylinder: 0,
2051                    buffer: updated_bgd_data,
2052                });
2053                self.block_device.enqueue_request(bgd_write);
2054
2055                // Process both writes in one batch
2056                #[cfg(test)]
2057                crate::early_println!(
2058                    "[ext2] allocate_blocks_contiguous_in_group: Processing 2 writes in batch for {} blocks",
2059                    count
2060                );
2061                let write_results = self.block_device.process_requests();
2062
2063                if write_results.len() != 2 || write_results.iter().any(|r| r.result.is_err()) {
2064                    return Err(FileSystemError::new(
2065                        FileSystemErrorKind::IoError,
2066                        "Failed to write bitmap or BGD",
2067                    ));
2068                }
2069
2070                // Update superblock (batch this in the future)
2071                self.update_superblock_counts(-(count as i32), 0, 0)?;
2072
2073                #[cfg(test)]
2074                crate::early_println!(
2075                    "[ext2] allocate_blocks_contiguous_in_group: Successfully allocated {} blocks starting at {} (MAJOR OPTIMIZATION: reduced from {} to ~3 I/O ops)",
2076                    count,
2077                    start_block,
2078                    count * 5
2079                );
2080                return Ok(allocated_blocks);
2081            }
2082        }
2083
2084        Err(FileSystemError::new(
2085            FileSystemErrorKind::NoSpace,
2086            &format!(
2087                "No {} contiguous free blocks found in group {}",
2088                count, group
2089            ),
2090        ))
2091    }
2092
2093    fn allocate_blocks_contiguous(&self, count: u32) -> Result<Vec<u64>, FileSystemError> {
2094        profile_scope!("ext2::allocate_blocks_contiguous");
2095
2096        if count == 0 {
2097            return Ok(Vec::new());
2098        }
2099
2100        // If only one block is needed, use regular allocation
2101        if count == 1 {
2102            let block = self.allocate_block()?;
2103            return Ok(vec![block]);
2104        }
2105
2106        // Calculate number of groups
2107        let group_count = (self.superblock.blocks_count + self.superblock.blocks_per_group - 1)
2108            / self.superblock.blocks_per_group;
2109
2110        // Strategy 1: Try to allocate full contiguous blocks in each group
2111        for group in 0..group_count {
2112            match self.allocate_blocks_contiguous_in_group(group, count) {
2113                Ok(blocks) => {
2114                    #[cfg(test)]
2115                    crate::early_println!(
2116                        "ext2: Allocated {} contiguous blocks starting at {} in group {}",
2117                        count,
2118                        blocks[0],
2119                        group
2120                    );
2121                    return Ok(blocks);
2122                }
2123                Err(FileSystemError {
2124                    kind: crate::fs::FileSystemErrorKind::NoSpace,
2125                    ..
2126                }) => {
2127                    // Continue to next group
2128                    continue;
2129                }
2130                Err(e) => {
2131                    // Other errors should be propagated
2132                    return Err(e);
2133                }
2134            }
2135        }
2136
2137        // Strategy 2: Try partial contiguous allocation (split into chunks)
2138        if count >= 6 {
2139            // Restored to original threshold for stability
2140            crate::early_println!(
2141                "ext2: Full contiguous allocation failed, trying partial contiguous allocation"
2142            );
2143            let mut allocated_blocks = Vec::new();
2144            let mut remaining = count;
2145
2146            // Try to allocate in decreasing chunk sizes
2147            let chunk_sizes = [count / 2, count / 3, count / 4, 8, 4]; // Reasonable chunk sizes
2148
2149            for &chunk_size in &chunk_sizes {
2150                if chunk_size == 0 || chunk_size >= remaining {
2151                    continue;
2152                }
2153
2154                while remaining >= chunk_size {
2155                    let mut allocated_chunk = false;
2156
2157                    // Try each group for this chunk size
2158                    for group in 0..group_count {
2159                        match self.allocate_blocks_contiguous_in_group(group, chunk_size) {
2160                            Ok(mut chunk_blocks) => {
2161                                #[cfg(test)]
2162                                crate::early_println!(
2163                                    "ext2: Allocated {} contiguous blocks (chunk) starting at {} in group {}",
2164                                    chunk_size,
2165                                    chunk_blocks[0],
2166                                    group
2167                                );
2168                                allocated_blocks.append(&mut chunk_blocks);
2169                                remaining -= chunk_size;
2170                                allocated_chunk = true;
2171                                break;
2172                            }
2173                            Err(FileSystemError {
2174                                kind: crate::fs::FileSystemErrorKind::NoSpace,
2175                                ..
2176                            }) => {
2177                                continue; // Try next group
2178                            }
2179                            Err(e) => {
2180                                // Cleanup and return error
2181                                for &block in &allocated_blocks {
2182                                    if let Err(free_err) = self.free_block(block as u32) {
2183                                        crate::early_println!(
2184                                            "ext2: Failed to free block {} during cleanup: {:?}",
2185                                            block,
2186                                            free_err
2187                                        );
2188                                    }
2189                                }
2190                                return Err(e);
2191                            }
2192                        }
2193                    }
2194
2195                    if !allocated_chunk {
2196                        break; // No group can satisfy this chunk size, try smaller
2197                    }
2198                }
2199
2200                if remaining == 0 {
2201                    crate::early_println!(
2202                        "ext2: Successfully allocated {} blocks using partial contiguous strategy",
2203                        count
2204                    );
2205                    return Ok(allocated_blocks);
2206                }
2207            }
2208
2209            // If we have some blocks allocated but not all, continue with individual allocation for remainder
2210            if !allocated_blocks.is_empty() && remaining > 0 {
2211                crate::early_println!(
2212                    "ext2: Partial contiguous allocation successful ({} blocks), using individual allocation for remaining {} blocks",
2213                    allocated_blocks.len(),
2214                    remaining
2215                );
2216
2217                for _ in 0..remaining {
2218                    match self.allocate_block() {
2219                        Ok(block) => allocated_blocks.push(block),
2220                        Err(e) => {
2221                            // Cleanup all allocated blocks
2222                            for &allocated_block in &allocated_blocks {
2223                                if let Err(free_err) = self.free_block(allocated_block as u32) {
2224                                    crate::early_println!(
2225                                        "ext2: Failed to free block {} during cleanup: {:?}",
2226                                        allocated_block,
2227                                        free_err
2228                                    );
2229                                }
2230                            }
2231                            return Err(e);
2232                        }
2233                    }
2234                }
2235
2236                crate::early_println!(
2237                    "ext2: Hybrid allocation completed: {} blocks total",
2238                    allocated_blocks.len()
2239                );
2240                return Ok(allocated_blocks);
2241            }
2242
2243            // Cleanup partial allocations if we couldn't complete
2244            for &block in &allocated_blocks {
2245                if let Err(free_err) = self.free_block(block as u32) {
2246                    crate::early_println!(
2247                        "ext2: Failed to free block {} during cleanup: {:?}",
2248                        block,
2249                        free_err
2250                    );
2251                }
2252            }
2253        }
2254
2255        // Strategy 3: Fall back to individual block allocation as last resort
2256        crate::early_println!(
2257            "ext2: All contiguous strategies failed for {} blocks, falling back to individual allocation",
2258            count
2259        );
2260        let mut blocks = Vec::new();
2261        for _ in 0..count {
2262            match self.allocate_block() {
2263                Ok(block) => blocks.push(block),
2264                Err(e) => {
2265                    // If individual allocation fails, we need to free the blocks we already allocated
2266                    for &allocated_block in &blocks {
2267                        if let Err(free_err) = self.free_block(allocated_block as u32) {
2268                            crate::early_println!(
2269                                "ext2: Failed to free block {} during cleanup: {:?}",
2270                                allocated_block,
2271                                free_err
2272                            );
2273                        }
2274                    }
2275                    return Err(e);
2276                }
2277            }
2278        }
2279
2280        #[cfg(test)]
2281        crate::early_println!("ext2: Allocated {} blocks individually as fallback", count);
2282        Ok(blocks)
2283    }
2284
2285    /// Allocate a new inode using proper bitmap management
2286    fn allocate_inode(&self) -> Result<u32, FileSystemError> {
2287        profile_scope!("ext2::allocate_inode");
2288        // For now, allocate from Group 0
2289        // Based on dumpe2fs: Group 0 free inodes: 30-2048
2290        let group = 0;
2291
2292        // Read block group descriptor for group 0
2293        let bgd_block = if self.block_size == 1024 { 2 } else { 1 }; // BGD in block 1 or 2
2294        let bgd_block_sector = self.block_to_sector(bgd_block);
2295
2296        let request = Box::new(crate::device::block::request::BlockIORequest {
2297            request_type: crate::device::block::request::BlockIORequestType::Read,
2298            sector: bgd_block_sector,
2299            sector_count: (self.block_size / 512) as usize,
2300            head: 0,
2301            cylinder: 0,
2302            buffer: vec![0u8; self.block_size as usize],
2303        });
2304
2305        self.block_device.enqueue_request(request);
2306        let results = self.block_device.process_requests();
2307
2308        let bgd_data = if let Some(result) = results.first() {
2309            match &result.result {
2310                Ok(_) => result.request.buffer.clone(),
2311                Err(_) => {
2312                    return Err(FileSystemError::new(
2313                        FileSystemErrorKind::IoError,
2314                        "Failed to read block group descriptor",
2315                    ));
2316                }
2317            }
2318        } else {
2319            return Err(FileSystemError::new(
2320                FileSystemErrorKind::IoError,
2321                "No result from block device read",
2322            ));
2323        };
2324
2325        let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data)?;
2326
2327        // Check if there are free inodes
2328        if bgd.free_inodes_count == 0 {
2329            return Err(FileSystemError::new(
2330                FileSystemErrorKind::NoSpace,
2331                "No free inodes in group 0",
2332            ));
2333        }
2334
2335        // Read inode bitmap
2336        let bitmap_sector = self.block_to_sector(bgd.inode_bitmap as u64);
2337        let request = Box::new(crate::device::block::request::BlockIORequest {
2338            request_type: crate::device::block::request::BlockIORequestType::Read,
2339            sector: bitmap_sector as usize,
2340            sector_count: (self.block_size / 512) as usize,
2341            head: 0,
2342            cylinder: 0,
2343            buffer: vec![0u8; self.block_size as usize],
2344        });
2345
2346        self.block_device.enqueue_request(request);
2347        let results = self.block_device.process_requests();
2348
2349        let mut bitmap_data = if let Some(result) = results.first() {
2350            match &result.result {
2351                Ok(_) => result.request.buffer.clone(),
2352                Err(_) => {
2353                    return Err(FileSystemError::new(
2354                        FileSystemErrorKind::IoError,
2355                        "Failed to read inode bitmap",
2356                    ));
2357                }
2358            }
2359        } else {
2360            return Err(FileSystemError::new(
2361                FileSystemErrorKind::IoError,
2362                "No result from block device read",
2363            ));
2364        };
2365
2366        // Find first free inode in bitmap
2367        // Start from inode 30 (which corresponds to bit 29 since inodes are 1-based but bitmap is 0-based)
2368        let start_inode = 30;
2369        let start_bit = start_inode - 1; // Convert to 0-based bit index
2370
2371        for bit in start_bit..self.superblock.inodes_per_group {
2372            let byte_index = (bit / 8) as usize;
2373            let bit_index = bit % 8;
2374
2375            if byte_index >= bitmap_data.len() {
2376                break;
2377            }
2378
2379            // Check if bit is free (0)
2380            if (bitmap_data[byte_index] & (1 << bit_index)) == 0 {
2381                // Mark inode as used (set bit to 1)
2382                bitmap_data[byte_index] |= 1 << bit_index;
2383
2384                // Write back bitmap
2385                let request = Box::new(crate::device::block::request::BlockIORequest {
2386                    request_type: crate::device::block::request::BlockIORequestType::Write,
2387                    sector: bitmap_sector as usize,
2388                    sector_count: (self.block_size / 512) as usize,
2389                    head: 0,
2390                    cylinder: 0,
2391                    buffer: bitmap_data,
2392                });
2393
2394                self.block_device.enqueue_request(request);
2395                let results = self.block_device.process_requests();
2396
2397                if let Some(result) = results.first() {
2398                    match &result.result {
2399                        Ok(_) => {
2400                            // Update group descriptor to reflect one less free inode
2401                            let mut bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data)?;
2402                            let current_free_inodes = u16::from_le(bgd.free_inodes_count);
2403                            bgd.free_inodes_count = (current_free_inodes.saturating_sub(1)).to_le();
2404                            self.update_group_descriptor(group, &bgd)?;
2405
2406                            // Update superblock free inodes count
2407                            self.update_superblock_counts(0, -1, 0)?;
2408                        }
2409                        Err(_) => {
2410                            return Err(FileSystemError::new(
2411                                FileSystemErrorKind::IoError,
2412                                "Failed to write inode bitmap",
2413                            ));
2414                        }
2415                    }
2416                }
2417
2418                let allocated_inode = bit + 1; // Convert back to 1-based inode number
2419
2420                // Debug: Allocated inode (disabled to reduce log noise)
2421                // crate::early_println!("EXT2: Allocated inode {} (bit {})", allocated_inode, bit);
2422
2423                return Ok(allocated_inode);
2424            }
2425        }
2426
2427        Err(FileSystemError::new(
2428            FileSystemErrorKind::NoSpace,
2429            "No free inodes found",
2430        ))
2431    }
2432
2433    /// Check if a file/directory already exists in the parent directory
2434    fn check_entry_exists(
2435        &self,
2436        parent_inode: u32,
2437        name: &String,
2438    ) -> Result<bool, FileSystemError> {
2439        // Read the parent directory inode
2440        let parent_dir_inode = self.read_inode(parent_inode)?;
2441
2442        if !parent_dir_inode.is_dir() {
2443            return Err(FileSystemError::new(
2444                FileSystemErrorKind::InvalidData,
2445                "Parent is not a directory",
2446            ));
2447        }
2448
2449        // Use the existing read_directory_entries method for robust parsing
2450        let entries = self.read_directory_entries(&parent_dir_inode)?;
2451
2452        // Check each entry for a name match
2453        for entry in entries {
2454            let entry_name = &entry.name;
2455
2456            if entry_name == name {
2457                return Ok(true); // Entry already exists
2458            }
2459        }
2460
2461        Ok(false) // Entry does not exist
2462    }
2463
2464    /// Add a directory entry to a parent directory
2465    fn add_directory_entry(
2466        &self,
2467        parent_inode: u32,
2468        name: &String,
2469        child_inode: u32,
2470        file_type: FileType,
2471    ) -> Result<(), FileSystemError> {
2472        profile_scope!("ext2::add_directory_entry");
2473
2474        // Read the parent directory inode
2475        let parent_dir_inode = self.read_inode(parent_inode)?;
2476
2477        if !parent_dir_inode.is_dir() {
2478            return Err(FileSystemError::new(
2479                FileSystemErrorKind::InvalidData,
2480                "Parent is not a directory",
2481            ));
2482        }
2483
2484        // Calculate the length of the new directory entry
2485        // Directory entry format: inode(4) + rec_len(2) + name_len(1) + file_type(1) + name + padding to 4-byte boundary
2486        let entry_name_len = name.len() as u8;
2487        let entry_total_len = ((8 + entry_name_len as usize + 3) / 4) * 4; // Round up to 4-byte boundary
2488
2489        // Convert FileType to ext2 file type
2490        let ext2_file_type = match file_type {
2491            FileType::RegularFile => 1,
2492            FileType::Directory => 2,
2493            FileType::CharDevice(_) => 3,
2494            FileType::BlockDevice(_) => 4,
2495            FileType::Pipe => 5,
2496            FileType::Socket(_) => 6,
2497            FileType::SymbolicLink(_) => 7,
2498            FileType::Unknown => 0,
2499        };
2500
2501        // Find a suitable block in the directory with enough space
2502        let blocks_in_dir = (parent_dir_inode.get_size() as u64 + self.block_size as u64 - 1)
2503            / self.block_size as u64;
2504
2505        for block_idx in 0..blocks_in_dir.max(1) {
2506            let block_num = self.get_inode_block(&parent_dir_inode, block_idx)?;
2507            if block_num == 0 {
2508                continue; // Sparse block
2509            }
2510
2511            // Read the directory block using cached method
2512            let mut block_data = self.read_block_cached(block_num)?;
2513
2514            // Parse directory entries to find available space
2515            let mut offset = 0;
2516            let mut last_entry_offset = 0;
2517            let mut last_entry_rec_len = 0;
2518
2519            while offset < self.block_size as usize {
2520                if offset + 8 > block_data.len() {
2521                    break;
2522                }
2523
2524                let entry = Ext2DirectoryEntryRaw::from_bytes(&block_data[offset..])?;
2525                let rec_len = entry.get_rec_len();
2526
2527                if rec_len == 0 {
2528                    break; // Invalid entry
2529                }
2530
2531                last_entry_offset = offset;
2532                last_entry_rec_len = rec_len as usize;
2533
2534                offset += rec_len as usize;
2535            }
2536
2537            // Calculate actual space used by the last entry
2538            if last_entry_offset > 0 {
2539                let last_entry =
2540                    Ext2DirectoryEntryRaw::from_bytes(&block_data[last_entry_offset..])?;
2541                let actual_last_entry_len = ((8 + last_entry.get_name_len() as usize + 3) / 4) * 4;
2542                let available_space = last_entry_rec_len - actual_last_entry_len;
2543
2544                if available_space >= entry_total_len {
2545                    // We have space! Adjust the last entry's rec_len and add our entry
2546
2547                    // Update last entry's rec_len to its actual size
2548                    let actual_rec_len_bytes = (actual_last_entry_len as u16).to_le_bytes();
2549                    block_data[last_entry_offset + 4] = actual_rec_len_bytes[0];
2550                    block_data[last_entry_offset + 5] = actual_rec_len_bytes[1];
2551
2552                    // Add our new entry
2553                    let new_entry_offset = last_entry_offset + actual_last_entry_len;
2554                    let remaining_space = last_entry_rec_len - actual_last_entry_len;
2555
2556                    // Write new entry header
2557                    let child_inode_bytes = child_inode.to_le_bytes();
2558                    let rec_len_bytes = (remaining_space as u16).to_le_bytes();
2559
2560                    block_data[new_entry_offset..new_entry_offset + 4]
2561                        .copy_from_slice(&child_inode_bytes);
2562                    block_data[new_entry_offset + 4..new_entry_offset + 6]
2563                        .copy_from_slice(&rec_len_bytes);
2564                    block_data[new_entry_offset + 6] = entry_name_len;
2565                    block_data[new_entry_offset + 7] = ext2_file_type;
2566
2567                    // Write name
2568                    block_data
2569                        [new_entry_offset + 8..new_entry_offset + 8 + entry_name_len as usize]
2570                        .copy_from_slice(name.as_bytes());
2571
2572                    // Write the updated block back to disk using cached method
2573                    self.write_block_cached(block_num, &block_data)?;
2574                    return Ok(());
2575                }
2576            }
2577        }
2578
2579        // If we get here, we couldn't find space in existing blocks
2580        // In a full implementation, we would allocate a new block for the directory
2581        Err(FileSystemError::new(
2582            FileSystemErrorKind::NoSpace,
2583            "No space available in directory for new entry",
2584        ))
2585    }
2586
2587    /// Remove a directory entry from a parent directory
2588    fn remove_directory_entry(
2589        &self,
2590        parent_inode: u32,
2591        name: &String,
2592    ) -> Result<(), FileSystemError> {
2593        // Read the parent directory inode
2594        let parent_dir_inode = self.read_inode(parent_inode)?;
2595
2596        if !parent_dir_inode.is_dir() {
2597            return Err(FileSystemError::new(
2598                FileSystemErrorKind::InvalidData,
2599                "Parent is not a directory",
2600            ));
2601        }
2602
2603        // Search through all directory blocks to find the entry to remove
2604        let blocks_in_dir = (parent_dir_inode.get_size() as u64 + self.block_size as u64 - 1)
2605            / self.block_size as u64;
2606
2607        for block_idx in 0..blocks_in_dir {
2608            let block_num = self.get_inode_block(&parent_dir_inode, block_idx)?;
2609            if block_num == 0 {
2610                continue; // Sparse block
2611            }
2612
2613            // Read the directory block using cached method
2614            let mut block_data = self.read_block_cached(block_num)?;
2615
2616            // Parse directory entries to find the one to remove
2617            let mut offset = 0;
2618            let mut prev_entry_offset = None;
2619
2620            while offset < self.block_size as usize {
2621                if offset + 8 > block_data.len() {
2622                    break;
2623                }
2624
2625                let entry = match Ext2DirectoryEntryRaw::from_bytes(&block_data[offset..]) {
2626                    Ok(entry) => entry,
2627                    Err(_) => break,
2628                };
2629
2630                let rec_len = entry.get_rec_len();
2631                if rec_len == 0 {
2632                    break; // Invalid entry
2633                }
2634
2635                let name_len = entry.get_name_len() as usize;
2636                if offset + 8 + name_len <= block_data.len() {
2637                    let entry_name_bytes = &block_data[offset + 8..offset + 8 + name_len];
2638                    if let Ok(entry_name) = core::str::from_utf8(entry_name_bytes) {
2639                        if entry_name == *name {
2640                            // Found the entry to remove!
2641                            if let Some(prev_offset) = prev_entry_offset {
2642                                // Extend the previous entry's rec_len to cover this entry
2643                                let prev_entry =
2644                                    Ext2DirectoryEntryRaw::from_bytes(&block_data[prev_offset..])?;
2645                                let new_rec_len = prev_entry.get_rec_len() + rec_len;
2646                                let new_rec_len_bytes = new_rec_len.to_le_bytes();
2647
2648                                block_data[prev_offset + 4] = new_rec_len_bytes[0];
2649                                block_data[prev_offset + 5] = new_rec_len_bytes[1];
2650                            } else {
2651                                // This is the first entry in the block, mark it as free by setting inode to 0
2652                                block_data[offset..offset + 4].fill(0);
2653                            }
2654
2655                            // Write the updated block back to disk using cached method
2656                            self.write_block_cached(block_num, &block_data)?;
2657                            return Ok(());
2658                        }
2659                    }
2660                }
2661
2662                prev_entry_offset = Some(offset);
2663                offset += rec_len as usize;
2664            }
2665        }
2666
2667        // Entry not found
2668        Err(FileSystemError::new(
2669            FileSystemErrorKind::NotFound,
2670            "Directory entry not found",
2671        ))
2672    }
2673
2674    /// Free an inode and update bitmaps and metadata
2675    fn free_inode(&self, inode_number: u32) -> Result<(), FileSystemError> {
2676        // Read the inode first to get its data blocks and determine if it's a directory
2677        let inode = self.read_inode(inode_number)?;
2678        let is_directory = inode.is_dir();
2679        let blocks_to_free = self.get_inode_data_blocks(&inode)?;
2680
2681        // Free all data blocks used by this inode
2682        for block_num in blocks_to_free {
2683            // Debug: Freeing data block (disabled to reduce log noise)
2684            // crate::early_println!("EXT2: Freeing data block {}", block_num);
2685            self.free_block(block_num)?;
2686        }
2687
2688        // Calculate which block group contains this inode
2689        let group = (inode_number - 1) / self.superblock.get_inodes_per_group();
2690        let local_inode = (inode_number - 1) % self.superblock.get_inodes_per_group();
2691
2692        // Read block group descriptor to find inode bitmap location
2693        let bgd_block = (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32)
2694            / self.block_size
2695            + if self.block_size == 1024 { 2 } else { 1 };
2696        let bgd_block_sector = self.block_to_sector(bgd_block as u64);
2697
2698        let request = Box::new(crate::device::block::request::BlockIORequest {
2699            request_type: crate::device::block::request::BlockIORequestType::Read,
2700            sector: bgd_block_sector as usize,
2701            sector_count: (self.block_size / 512) as usize,
2702            head: 0,
2703            cylinder: 0,
2704            buffer: vec![0u8; self.block_size as usize],
2705        });
2706
2707        self.block_device.enqueue_request(request);
2708        let results = self.block_device.process_requests();
2709
2710        let mut bgd_data = if let Some(result) = results.first() {
2711            match &result.result {
2712                Ok(_) => result.request.buffer.clone(),
2713                Err(_) => {
2714                    return Err(FileSystemError::new(
2715                        FileSystemErrorKind::IoError,
2716                        "Failed to read block group descriptor",
2717                    ));
2718                }
2719            }
2720        } else {
2721            return Err(FileSystemError::new(
2722                FileSystemErrorKind::IoError,
2723                "No result from block device read",
2724            ));
2725        };
2726
2727        let bgd_offset =
2728            (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
2729        let mut bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset as usize..])?;
2730
2731        // Read the inode bitmap
2732        let inode_bitmap_block = bgd.get_inode_bitmap();
2733        let bitmap_sector = self.block_to_sector(inode_bitmap_block as u64);
2734
2735        let request = Box::new(crate::device::block::request::BlockIORequest {
2736            request_type: crate::device::block::request::BlockIORequestType::Read,
2737            sector: bitmap_sector as usize,
2738            sector_count: (self.block_size / 512) as usize,
2739            head: 0,
2740            cylinder: 0,
2741            buffer: vec![0u8; self.block_size as usize],
2742        });
2743
2744        self.block_device.enqueue_request(request);
2745        let results = self.block_device.process_requests();
2746
2747        let mut bitmap_data = if let Some(result) = results.first() {
2748            match &result.result {
2749                Ok(_) => result.request.buffer.clone(),
2750                Err(_) => {
2751                    return Err(FileSystemError::new(
2752                        FileSystemErrorKind::IoError,
2753                        "Failed to read inode bitmap",
2754                    ));
2755                }
2756            }
2757        } else {
2758            return Err(FileSystemError::new(
2759                FileSystemErrorKind::IoError,
2760                "No result from block device read",
2761            ));
2762        };
2763
2764        // Clear the bit for this inode (mark as free)
2765        let byte_index = (local_inode / 8) as usize;
2766        let bit_index = (local_inode % 8) as u8;
2767
2768        if byte_index >= bitmap_data.len() {
2769            return Err(FileSystemError::new(
2770                FileSystemErrorKind::InvalidData,
2771                "Inode bitmap index out of bounds",
2772            ));
2773        }
2774
2775        // Clear the bit (0 = free, 1 = used in ext2)
2776        bitmap_data[byte_index] &= !(1 << bit_index);
2777
2778        // Write the updated bitmap back to disk
2779        let write_request = Box::new(crate::device::block::request::BlockIORequest {
2780            request_type: crate::device::block::request::BlockIORequestType::Write,
2781            sector: bitmap_sector as usize,
2782            sector_count: (self.block_size / 512) as usize,
2783            head: 0,
2784            cylinder: 0,
2785            buffer: bitmap_data,
2786        });
2787
2788        self.block_device.enqueue_request(write_request);
2789        let write_results = self.block_device.process_requests();
2790
2791        if let Some(write_result) = write_results.first() {
2792            match &write_result.result {
2793                Ok(_) => {}
2794                Err(_) => {
2795                    return Err(FileSystemError::new(
2796                        FileSystemErrorKind::IoError,
2797                        "Failed to write inode to disk",
2798                    ));
2799                }
2800            }
2801        } else {
2802            return Err(FileSystemError::new(
2803                FileSystemErrorKind::IoError,
2804                "No result from inode write",
2805            ));
2806        }
2807
2808        // Update block group descriptor statistics
2809        bgd.set_free_inodes_count(bgd.get_free_inodes_count() + 1);
2810        if is_directory {
2811            bgd.set_used_dirs_count(bgd.get_used_dirs_count().saturating_sub(1));
2812        }
2813
2814        // Write updated block group descriptor
2815        bgd.write_to_bytes(&mut bgd_data[bgd_offset as usize..]);
2816        let write_bgd_request = Box::new(crate::device::block::request::BlockIORequest {
2817            request_type: crate::device::block::request::BlockIORequestType::Write,
2818            sector: bgd_block_sector as usize,
2819            sector_count: (self.block_size / 512) as usize,
2820            head: 0,
2821            cylinder: 0,
2822            buffer: bgd_data,
2823        });
2824
2825        self.block_device.enqueue_request(write_bgd_request);
2826        let bgd_write_results = self.block_device.process_requests();
2827
2828        if let Some(bgd_write_result) = bgd_write_results.first() {
2829            match &bgd_write_result.result {
2830                Ok(_) => {}
2831                Err(_) => {
2832                    return Err(FileSystemError::new(
2833                        FileSystemErrorKind::IoError,
2834                        "Failed to write updated block group descriptor",
2835                    ));
2836                }
2837            }
2838        } else {
2839            return Err(FileSystemError::new(
2840                FileSystemErrorKind::IoError,
2841                "No response from BGD write",
2842            ));
2843        }
2844
2845        self.clear_inode_on_disk(inode_number)?;
2846
2847        // Update superblock statistics
2848        self.update_superblock_free_counts(0, 1)?;
2849
2850        // Remove from inode cache if present
2851        {
2852            let mut cache = self.inode_cache.lock();
2853            cache.remove(inode_number);
2854        }
2855
2856        Ok(())
2857    }
2858
2859    fn clear_inode_on_disk(&self, inode_number: u32) -> Result<(), FileSystemError> {
2860        let inode = Ext2Inode::empty();
2861        self.write_inode(inode_number, &inode)?;
2862
2863        Ok(())
2864    }
2865
2866    /// Write the entire content of a file given its inode number
2867    pub fn write_file_content(
2868        &self,
2869        inode_num: u32,
2870        content: &[u8],
2871    ) -> Result<(), FileSystemError> {
2872        profile_scope!("ext2::write_file_content");
2873
2874        #[cfg(test)]
2875        crate::early_println!(
2876            "[ext2] write_file_content: inode={}, content_len={}",
2877            inode_num,
2878            content.len()
2879        );
2880
2881        // Read the current inode
2882        let mut inode = self.read_inode(inode_num)?;
2883
2884        // Calculate the number of blocks needed
2885        let blocks_needed = if content.is_empty() {
2886            0
2887        } else {
2888            ((content.len() as u64 + self.block_size as u64 - 1) / self.block_size as u64) as u32
2889        };
2890
2891        #[cfg(test)]
2892        crate::early_println!("[ext2] write_file_content: blocks_needed={}", blocks_needed);
2893
2894        // Allocate blocks as needed
2895        let mut block_list = Vec::new();
2896        let mut new_block_assignments = Vec::new(); // (logical_block_index, block_number)
2897        if blocks_needed > 0 {
2898            // Use batched block reading to get existing blocks
2899            let existing_blocks = self.get_inode_blocks(&inode, 0, blocks_needed as u64)?;
2900
2901            // Find contiguous ranges of blocks that need allocation
2902            let mut allocation_ranges = Vec::new(); // (start_idx, count)
2903            let mut current_start = None;
2904            let mut current_count = 0;
2905
2906            for (block_idx, &existing_block) in existing_blocks.iter().enumerate() {
2907                if existing_block == 0 {
2908                    // Need to allocate a new block
2909                    if current_start.is_none() {
2910                        current_start = Some(block_idx);
2911                        current_count = 1;
2912                    } else {
2913                        current_count += 1;
2914                    }
2915                } else {
2916                    // Existing block, finalize any current allocation range
2917                    if let Some(start) = current_start {
2918                        allocation_ranges.push((start, current_count));
2919                        current_start = None;
2920                        current_count = 0;
2921                    }
2922                    #[cfg(test)]
2923                    crate::early_println!(
2924                        "[ext2] write_file_content: reusing existing block {} for logical block {}",
2925                        existing_block,
2926                        block_idx
2927                    );
2928                    block_list.push(existing_block);
2929                }
2930            }
2931
2932            // Finalize any remaining allocation range
2933            if let Some(start) = current_start {
2934                allocation_ranges.push((start, current_count));
2935            }
2936
2937            // Perform allocations using multi-block allocation where beneficial
2938            for (start_idx, count) in allocation_ranges {
2939                if count >= 3 {
2940                    // Use multi-block allocation for 3+ blocks for better efficiency
2941                    #[cfg(test)]
2942                    crate::early_println!(
2943                        "[ext2] write_file_content: using multi-block allocation for {} blocks starting at logical block {}",
2944                        count,
2945                        start_idx
2946                    );
2947
2948                    let allocated_blocks = self.allocate_blocks_contiguous(count as u32)?;
2949
2950                    for (i, &block_num) in allocated_blocks.iter().enumerate() {
2951                        let logical_idx = start_idx + i;
2952                        new_block_assignments.push((logical_idx as u64, block_num as u32));
2953
2954                        // Insert at the correct position in block_list
2955                        while block_list.len() <= logical_idx {
2956                            block_list.push(0);
2957                        }
2958                        block_list[logical_idx] = block_num;
2959
2960                        #[cfg(test)]
2961                        crate::early_println!(
2962                            "[ext2] write_file_content: multi-allocated block {} for logical block {}",
2963                            block_num,
2964                            logical_idx
2965                        );
2966                    }
2967                } else {
2968                    // Use individual allocation for small ranges
2969                    for i in 0..count {
2970                        let logical_idx = start_idx + i;
2971                        let new_block = self.allocate_block()?;
2972
2973                        #[cfg(test)]
2974                        crate::early_println!(
2975                            "[ext2] write_file_content: individually allocated block {} for logical block {}",
2976                            new_block,
2977                            logical_idx
2978                        );
2979
2980                        new_block_assignments.push((logical_idx as u64, new_block as u32));
2981
2982                        // Insert at the correct position in block_list
2983                        while block_list.len() <= logical_idx {
2984                            block_list.push(0);
2985                        }
2986                        block_list[logical_idx] = new_block;
2987                    }
2988                }
2989            }
2990        }
2991
2992        // Apply all new block assignments at once using simple batch function
2993        if !new_block_assignments.is_empty() {
2994            self.set_inode_blocks_simple_batch(&mut inode, &new_block_assignments)?;
2995        }
2996
2997        // Write content to blocks using batching
2998        let mut remaining = content.len();
2999        let mut content_offset = 0;
3000        let mut write_blocks = BTreeMap::new();
3001
3002        for &block_num in block_list.iter() {
3003            if remaining == 0 {
3004                break;
3005            }
3006
3007            let bytes_to_write = core::cmp::min(remaining, self.block_size as usize);
3008            let mut block_data = vec![0u8; self.block_size as usize];
3009
3010            // Copy content to block buffer
3011            block_data[..bytes_to_write]
3012                .copy_from_slice(&content[content_offset..content_offset + bytes_to_write]);
3013
3014            #[cfg(test)]
3015            crate::early_println!(
3016                "[ext2] write_file_content: preparing block {} ({} bytes) for batch write",
3017                block_num,
3018                bytes_to_write
3019            );
3020
3021            // Add to batch write map instead of writing immediately
3022            write_blocks.insert(block_num, block_data);
3023
3024            remaining -= bytes_to_write;
3025            content_offset += bytes_to_write;
3026        }
3027
3028        // Write all content blocks in one batch
3029        if !write_blocks.is_empty() {
3030            #[cfg(test)]
3031            crate::early_println!(
3032                "[ext2] write_file_content: batch writing {} content blocks",
3033                write_blocks.len()
3034            );
3035            self.write_blocks_cached(&write_blocks)?;
3036        }
3037
3038        // Update inode size, block count, and modification time
3039        inode.size = content.len() as u32;
3040        inode.mtime = 0; // TODO: Use proper timestamp when available
3041
3042        // Update i_blocks field (count in 512-byte sectors)
3043        inode.blocks = blocks_needed * (self.block_size / 512);
3044
3045        // Write updated inode to disk
3046        self.write_inode(inode_num, &inode)?;
3047
3048        // Update inode cache with LRU eviction
3049        {
3050            let mut cache = self.inode_cache.lock();
3051            cache.insert(inode_num, inode);
3052        }
3053
3054        Ok(())
3055    }
3056
3057    /// Convert ext2 inode mode to FileType
3058    pub fn file_type_from_inode(
3059        &self,
3060        inode: &Ext2Inode,
3061        _inode_number: u32,
3062    ) -> Result<FileType, FileSystemError> {
3063        let mode = inode.get_mode();
3064        let file_type_bits = mode & EXT2_S_IFMT;
3065
3066        match file_type_bits {
3067            EXT2_S_IFREG => Ok(FileType::RegularFile),
3068            EXT2_S_IFDIR => Ok(FileType::Directory),
3069            EXT2_S_IFLNK => {
3070                // For symlinks, we need to read the target path
3071                let size = inode.get_size() as usize;
3072
3073                if size <= 60 {
3074                    // Fast symlink: target stored in inode.block array
3075                    // Use safe byte-level access to read the block data
3076                    let inode_bytes = unsafe {
3077                        core::slice::from_raw_parts(
3078                            inode as *const Ext2Inode as *const u8,
3079                            core::mem::size_of::<Ext2Inode>(),
3080                        )
3081                    };
3082                    // Block array starts at offset 40 in the inode structure
3083                    let block_start_offset = 40;
3084                    let block_bytes = &inode_bytes[block_start_offset..block_start_offset + 60];
3085
3086                    let target_bytes = &block_bytes[..size];
3087                    let target = String::from_utf8(target_bytes.to_vec()).map_err(|_| {
3088                        FileSystemError::new(
3089                            FileSystemErrorKind::InvalidData,
3090                            "Invalid UTF-8 in fast symlink target",
3091                        )
3092                    })?;
3093                    Ok(FileType::SymbolicLink(target))
3094                } else {
3095                    // Slow symlink: target stored in data blocks
3096                    // For now, return a placeholder - this will be resolved when read_link is called
3097                    Ok(FileType::SymbolicLink("".to_string()))
3098                }
3099            }
3100            EXT2_S_IFCHR => {
3101                // Character device
3102                if let Some((major, minor)) = inode.get_device_info() {
3103                    let device_info = crate::fs::DeviceFileInfo {
3104                        device_id: ((major << 8) | minor) as usize,
3105                        device_type: crate::device::DeviceType::Char,
3106                    };
3107                    Ok(FileType::CharDevice(device_info))
3108                } else {
3109                    Err(FileSystemError::new(
3110                        FileSystemErrorKind::InvalidData,
3111                        "Invalid character device information",
3112                    ))
3113                }
3114            }
3115            EXT2_S_IFBLK => {
3116                // Block device
3117                if let Some((major, minor)) = inode.get_device_info() {
3118                    let device_info = crate::fs::DeviceFileInfo {
3119                        device_id: ((major << 8) | minor) as usize,
3120                        device_type: crate::device::DeviceType::Block,
3121                    };
3122                    Ok(FileType::BlockDevice(device_info))
3123                } else {
3124                    Err(FileSystemError::new(
3125                        FileSystemErrorKind::InvalidData,
3126                        "Invalid block device information",
3127                    ))
3128                }
3129            }
3130            EXT2_S_IFIFO => Ok(FileType::Pipe),
3131            EXT2_S_IFSOCK => Ok(FileType::Socket(SocketFileInfo {
3132                socket_id: crate::fs::UNBOUND_SOCKET_ID,
3133            })), // Socket ID will be bound at runtime
3134            _ => Ok(FileType::Unknown),
3135        }
3136    }
3137
3138    /// Get all data blocks used by an inode
3139    fn get_inode_data_blocks(&self, inode: &Ext2Inode) -> Result<Vec<u32>, FileSystemError> {
3140        let mut blocks = Vec::new();
3141
3142        // Check if this is a symbolic link
3143        let mode = inode.get_mode();
3144        let is_symlink = (mode & EXT2_S_IFMT) == EXT2_S_IFLNK;
3145
3146        if is_symlink && inode.get_size() <= 60 {
3147            // Fast symlink: target is stored in inode.block array, no data blocks used
3148            return Ok(blocks);
3149        }
3150
3151        let blocks_in_file =
3152            (inode.get_size() as u64 + self.block_size as u64 - 1) / self.block_size as u64;
3153
3154        if blocks_in_file == 0 {
3155            return Ok(blocks);
3156        }
3157
3158        // Use batched block reading for better performance
3159        let block_nums = self.get_inode_blocks(inode, 0, blocks_in_file)?;
3160
3161        for &block_num in &block_nums {
3162            if block_num != 0 {
3163                blocks.push(block_num as u32);
3164            }
3165        }
3166
3167        Ok(blocks)
3168    }
3169
3170    /// Free a block and update bitmaps
3171    fn free_block(&self, block_number: u32) -> Result<(), FileSystemError> {
3172        if block_number == 0 {
3173            return Ok(()); // Block 0 is not a valid block
3174        }
3175
3176        // Calculate which block group contains this block
3177        let group = (block_number - 1) / self.superblock.get_blocks_per_group();
3178        let local_block = (block_number - 1) % self.superblock.get_blocks_per_group();
3179
3180        // Read block group descriptor
3181        let bgd_block = (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32)
3182            / self.block_size
3183            + if self.block_size == 1024 { 2 } else { 1 };
3184        let bgd_block_sector = self.block_to_sector(bgd_block as u64);
3185
3186        let request = Box::new(crate::device::block::request::BlockIORequest {
3187            request_type: crate::device::block::request::BlockIORequestType::Read,
3188            sector: bgd_block_sector as usize,
3189            sector_count: (self.block_size / 512) as usize,
3190            head: 0,
3191            cylinder: 0,
3192            buffer: vec![0u8; self.block_size as usize],
3193        });
3194
3195        self.block_device.enqueue_request(request);
3196        let results = self.block_device.process_requests();
3197
3198        let mut bgd_data = if let Some(result) = results.first() {
3199            match &result.result {
3200                Ok(_) => result.request.buffer.clone(),
3201                Err(_) => {
3202                    return Err(FileSystemError::new(
3203                        FileSystemErrorKind::IoError,
3204                        "Failed to read block group descriptor",
3205                    ));
3206                }
3207            }
3208        } else {
3209            return Err(FileSystemError::new(
3210                FileSystemErrorKind::IoError,
3211                "No result from block device read",
3212            ));
3213        };
3214
3215        let bgd_offset =
3216            (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
3217        let mut bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset as usize..])?;
3218
3219        // Read the block bitmap
3220        let block_bitmap_block = bgd.get_block_bitmap();
3221        let bitmap_sector = self.block_to_sector(block_bitmap_block as u64);
3222
3223        let request = Box::new(crate::device::block::request::BlockIORequest {
3224            request_type: crate::device::block::request::BlockIORequestType::Read,
3225            sector: bitmap_sector as usize,
3226            sector_count: (self.block_size / 512) as usize,
3227            head: 0,
3228            cylinder: 0,
3229            buffer: vec![0u8; self.block_size as usize],
3230        });
3231
3232        self.block_device.enqueue_request(request);
3233        let results = self.block_device.process_requests();
3234
3235        let mut bitmap_data = if let Some(result) = results.first() {
3236            match &result.result {
3237                Ok(_) => result.request.buffer.clone(),
3238                Err(_) => {
3239                    return Err(FileSystemError::new(
3240                        FileSystemErrorKind::IoError,
3241                        "Failed to read block bitmap",
3242                    ));
3243                }
3244            }
3245        } else {
3246            return Err(FileSystemError::new(
3247                FileSystemErrorKind::IoError,
3248                "No result from block device read",
3249            ));
3250        };
3251
3252        // Clear the bit for this block (mark as free)
3253        let byte_index = (local_block / 8) as usize;
3254        let bit_index = (local_block % 8) as u8;
3255
3256        if byte_index >= bitmap_data.len() {
3257            return Err(FileSystemError::new(
3258                FileSystemErrorKind::InvalidData,
3259                "Block bitmap index out of bounds",
3260            ));
3261        }
3262
3263        // Clear the bit (0 = free, 1 = used in ext2)
3264        bitmap_data[byte_index] &= !(1 << bit_index);
3265
3266        // Write the updated bitmap back to disk
3267        let write_request = Box::new(crate::device::block::request::BlockIORequest {
3268            request_type: crate::device::block::request::BlockIORequestType::Write,
3269            sector: bitmap_sector as usize,
3270            sector_count: (self.block_size / 512) as usize,
3271            head: 0,
3272            cylinder: 0,
3273            buffer: bitmap_data,
3274        });
3275
3276        self.block_device.enqueue_request(write_request);
3277        let write_results = self.block_device.process_requests();
3278
3279        if let Some(write_result) = write_results.first() {
3280            match &write_result.result {
3281                Ok(_) => {}
3282                Err(_) => {
3283                    return Err(FileSystemError::new(
3284                        FileSystemErrorKind::IoError,
3285                        "Failed to write updated block bitmap",
3286                    ));
3287                }
3288            }
3289        } else {
3290            return Err(FileSystemError::new(
3291                FileSystemErrorKind::IoError,
3292                "No response from block bitmap write",
3293            ));
3294        }
3295
3296        // Update block group descriptor
3297        bgd.set_free_blocks_count(bgd.get_free_blocks_count() + 1);
3298
3299        // Write updated block group descriptor
3300        bgd.write_to_bytes(&mut bgd_data[bgd_offset as usize..]);
3301        let write_bgd_request = Box::new(crate::device::block::request::BlockIORequest {
3302            request_type: crate::device::block::request::BlockIORequestType::Write,
3303            sector: bgd_block_sector as usize,
3304            sector_count: (self.block_size / 512) as usize,
3305            head: 0,
3306            cylinder: 0,
3307            buffer: bgd_data,
3308        });
3309
3310        self.block_device.enqueue_request(write_bgd_request);
3311        let bgd_write_results = self.block_device.process_requests();
3312
3313        if let Some(bgd_write_result) = bgd_write_results.first() {
3314            match &bgd_write_result.result {
3315                Ok(_) => {
3316                    // Update superblock free blocks count
3317                    self.update_superblock_counts(1, 0, 0)?;
3318                }
3319                Err(_) => {
3320                    return Err(FileSystemError::new(
3321                        FileSystemErrorKind::IoError,
3322                        "Failed to write updated block group descriptor",
3323                    ));
3324                }
3325            }
3326        } else {
3327            return Err(FileSystemError::new(
3328                FileSystemErrorKind::IoError,
3329                "No response from BGD write",
3330            ));
3331        }
3332
3333        Ok(())
3334    }
3335
3336    /// Set the block number for a logical block within an inode
3337    fn set_inode_block(
3338        &self,
3339        inode: &mut Ext2Inode,
3340        logical_block: u64,
3341        block_number: u32,
3342    ) -> Result<(), FileSystemError> {
3343        profile_scope!("ext2::set_inode_block");
3344        let blocks_per_indirect = self.block_size / 4; // Each pointer is 4 bytes
3345
3346        if logical_block < 12 {
3347            // Direct blocks
3348            inode.block[logical_block as usize] = block_number;
3349            Ok(())
3350        } else if logical_block < 12 + blocks_per_indirect as u64 {
3351            // Single indirect
3352            let index = logical_block - 12;
3353
3354            // If no indirect block exists, allocate one
3355            if inode.block[12] == 0 {
3356                let indirect_block = self.allocate_block()? as u32;
3357                inode.block[12] = indirect_block;
3358
3359                // Clear the indirect block
3360                let clear_data = vec![0u8; self.block_size as usize];
3361                self.write_block_cached(indirect_block as u64, &clear_data)?;
3362            }
3363
3364            let indirect_block = inode.block[12];
3365
3366            // Read the indirect block
3367            let mut indirect_data = self.read_block_cached(indirect_block as u64)?;
3368
3369            // Update the block pointer
3370            let offset = index as usize * 4;
3371            let block_bytes = block_number.to_le_bytes();
3372            indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3373
3374            // Write back the indirect block
3375            self.write_block_cached(indirect_block as u64, &indirect_data)?;
3376            Ok(())
3377        } else if logical_block
3378            < 12 + blocks_per_indirect as u64
3379                + blocks_per_indirect as u64 * blocks_per_indirect as u64
3380        {
3381            // Double indirect
3382            let offset_in_double = logical_block - 12 - blocks_per_indirect as u64;
3383            let first_indirect_index = offset_in_double / blocks_per_indirect as u64;
3384            let second_indirect_index = offset_in_double % blocks_per_indirect as u64;
3385
3386            // If no double indirect block exists, allocate one
3387            if inode.block[13] == 0 {
3388                let double_indirect_block = self.allocate_block()? as u32;
3389                inode.block[13] = double_indirect_block;
3390
3391                // Clear the double indirect block
3392                let clear_data = vec![0u8; self.block_size as usize];
3393                let clear_request = Box::new(crate::device::block::request::BlockIORequest {
3394                    request_type: crate::device::block::request::BlockIORequestType::Write,
3395                    sector: self.block_to_sector(double_indirect_block as u64),
3396                    sector_count: (self.block_size / 512) as usize,
3397                    head: 0,
3398                    cylinder: 0,
3399                    buffer: clear_data,
3400                });
3401
3402                self.block_device.enqueue_request(clear_request);
3403                let _results = self.block_device.process_requests();
3404            }
3405
3406            let double_indirect_block = inode.block[13];
3407            let double_indirect_sector = self.block_to_sector(double_indirect_block as u64);
3408
3409            // Read the double indirect block
3410            let request = Box::new(crate::device::block::request::BlockIORequest {
3411                request_type: crate::device::block::request::BlockIORequestType::Read,
3412                sector: double_indirect_sector as usize,
3413                sector_count: (self.block_size / 512) as usize,
3414                head: 0,
3415                cylinder: 0,
3416                buffer: vec![0u8; self.block_size as usize],
3417            });
3418
3419            self.block_device.enqueue_request(request);
3420            let results = self.block_device.process_requests();
3421
3422            let mut double_indirect_data = if let Some(result) = results.first() {
3423                match &result.result {
3424                    Ok(_) => result.request.buffer.clone(),
3425                    Err(_) => {
3426                        return Err(FileSystemError::new(
3427                            FileSystemErrorKind::IoError,
3428                            "Failed to read double indirect block",
3429                        ));
3430                    }
3431                }
3432            } else {
3433                return Err(FileSystemError::new(
3434                    FileSystemErrorKind::IoError,
3435                    "No result from double indirect block read",
3436                ));
3437            };
3438
3439            // Get or create the first level indirect block
3440            let mut first_indirect_ptr = u32::from_le_bytes([
3441                double_indirect_data[first_indirect_index as usize * 4],
3442                double_indirect_data[first_indirect_index as usize * 4 + 1],
3443                double_indirect_data[first_indirect_index as usize * 4 + 2],
3444                double_indirect_data[first_indirect_index as usize * 4 + 3],
3445            ]);
3446
3447            if first_indirect_ptr == 0 {
3448                // Allocate a new first level indirect block
3449                first_indirect_ptr = self.allocate_block()? as u32;
3450
3451                // Update the double indirect block
3452                let first_indirect_bytes = first_indirect_ptr.to_le_bytes();
3453                let offset = first_indirect_index as usize * 4;
3454                double_indirect_data[offset..offset + 4].copy_from_slice(&first_indirect_bytes);
3455
3456                // Write back the double indirect block
3457                let write_request = Box::new(crate::device::block::request::BlockIORequest {
3458                    request_type: crate::device::block::request::BlockIORequestType::Write,
3459                    sector: double_indirect_sector as usize,
3460                    sector_count: (self.block_size / 512) as usize,
3461                    head: 0,
3462                    cylinder: 0,
3463                    buffer: double_indirect_data.clone(),
3464                });
3465
3466                self.block_device.enqueue_request(write_request);
3467                let write_results = self.block_device.process_requests();
3468
3469                if let Some(write_result) = write_results.first() {
3470                    match &write_result.result {
3471                        Ok(_) => {}
3472                        Err(_) => {
3473                            return Err(FileSystemError::new(
3474                                FileSystemErrorKind::IoError,
3475                                "Failed to write double indirect block",
3476                            ));
3477                        }
3478                    }
3479                } else {
3480                    return Err(FileSystemError::new(
3481                        FileSystemErrorKind::IoError,
3482                        "No response from double indirect block write",
3483                    ));
3484                }
3485
3486                // Clear the new first level indirect block
3487                let clear_data = vec![0u8; self.block_size as usize];
3488                let clear_request = Box::new(crate::device::block::request::BlockIORequest {
3489                    request_type: crate::device::block::request::BlockIORequestType::Write,
3490                    sector: self.block_to_sector(first_indirect_ptr as u64),
3491                    sector_count: (self.block_size / 512) as usize,
3492                    head: 0,
3493                    cylinder: 0,
3494                    buffer: clear_data,
3495                });
3496
3497                self.block_device.enqueue_request(clear_request);
3498                let _results = self.block_device.process_requests();
3499            }
3500
3501            // Read the first level indirect block
3502            let first_indirect_sector = self.block_to_sector(first_indirect_ptr as u64);
3503            let request = Box::new(crate::device::block::request::BlockIORequest {
3504                request_type: crate::device::block::request::BlockIORequestType::Read,
3505                sector: first_indirect_sector as usize,
3506                sector_count: (self.block_size / 512) as usize,
3507                head: 0,
3508                cylinder: 0,
3509                buffer: vec![0u8; self.block_size as usize],
3510            });
3511
3512            self.block_device.enqueue_request(request);
3513            let results = self.block_device.process_requests();
3514
3515            let mut first_indirect_data = if let Some(result) = results.first() {
3516                match &result.result {
3517                    Ok(_) => result.request.buffer.clone(),
3518                    Err(_) => {
3519                        return Err(FileSystemError::new(
3520                            FileSystemErrorKind::IoError,
3521                            "Failed to read first level indirect block",
3522                        ));
3523                    }
3524                }
3525            } else {
3526                return Err(FileSystemError::new(
3527                    FileSystemErrorKind::IoError,
3528                    "No result from first level indirect block read",
3529                ));
3530            };
3531
3532            // Update the block pointer in the first level indirect block
3533            let offset = second_indirect_index as usize * 4;
3534            let block_bytes = block_number.to_le_bytes();
3535            first_indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3536
3537            // Write back the first level indirect block
3538            let write_request = Box::new(crate::device::block::request::BlockIORequest {
3539                request_type: crate::device::block::request::BlockIORequestType::Write,
3540                sector: first_indirect_sector as usize,
3541                sector_count: (self.block_size / 512) as usize,
3542                head: 0,
3543                cylinder: 0,
3544                buffer: first_indirect_data,
3545            });
3546
3547            self.block_device.enqueue_request(write_request);
3548            let write_results = self.block_device.process_requests();
3549
3550            if let Some(write_result) = write_results.first() {
3551                match &write_result.result {
3552                    Ok(_) => Ok(()),
3553                    Err(_) => Err(FileSystemError::new(
3554                        FileSystemErrorKind::IoError,
3555                        "Failed to write first level indirect block",
3556                    )),
3557                }
3558            } else {
3559                Err(FileSystemError::new(
3560                    FileSystemErrorKind::IoError,
3561                    "No response from first level indirect block write",
3562                ))
3563            }
3564        } else {
3565            // Triple indirect blocks - not implemented yet
3566            Err(FileSystemError::new(
3567                FileSystemErrorKind::NotSupported,
3568                "Triple indirect blocks not yet supported",
3569            ))
3570        }
3571    }
3572
3573    /// Set multiple inode blocks efficiently by batching operations for Single Indirect blocks
3574    fn set_inode_blocks_simple_batch(
3575        &self,
3576        inode: &mut Ext2Inode,
3577        assignments: &[(u64, u32)],
3578    ) -> Result<(), FileSystemError> {
3579        profile_scope!("ext2::set_inode_blocks_simple_batch");
3580
3581        let blocks_per_indirect = self.block_size / 4;
3582        let mut indirect_blocks_cache = alloc::collections::BTreeMap::new();
3583        let mut double_indirect_cache = alloc::collections::BTreeMap::new();
3584        let mut _batched_writes = 0;
3585        let mut new_indirect_blocks = Vec::new();
3586
3587        // PRE-ALLOCATE: Calculate needed indirect blocks and allocate in batch
3588        let mut needed_indirect_blocks = 0u32;
3589        let mut needed_first_level_indirects = alloc::collections::BTreeSet::new();
3590        let mut need_single_indirect = false;
3591        let mut need_double_indirect = false;
3592
3593        for &(logical_block, _) in assignments {
3594            if logical_block >= 12 && logical_block < 12 + blocks_per_indirect as u64 {
3595                if inode.block[12] == 0 {
3596                    need_single_indirect = true;
3597                }
3598            } else if logical_block >= 12 + blocks_per_indirect as u64 {
3599                if inode.block[13] == 0 {
3600                    need_double_indirect = true;
3601                }
3602
3603                // Calculate which first-level indirect blocks we need
3604                let double_base = 12 + blocks_per_indirect as u64;
3605                let double_offset = logical_block - double_base;
3606                let first_indirect_index = double_offset / blocks_per_indirect as u64;
3607                needed_first_level_indirects.insert(first_indirect_index);
3608            }
3609        }
3610
3611        // Count total needed indirect blocks
3612        if need_single_indirect {
3613            needed_indirect_blocks += 1;
3614        }
3615        if need_double_indirect {
3616            needed_indirect_blocks += 1;
3617        }
3618        needed_indirect_blocks += needed_first_level_indirects.len() as u32;
3619
3620        // Batch allocate all needed indirect blocks
3621        let allocated_indirect_blocks = if needed_indirect_blocks > 0 {
3622            #[cfg(test)]
3623            crate::early_println!(
3624                "[ext2] set_inode_blocks_simple_batch: Pre-allocating {} indirect blocks",
3625                needed_indirect_blocks
3626            );
3627            self.allocate_blocks_contiguous(needed_indirect_blocks)?
3628        } else {
3629            Vec::new()
3630        };
3631
3632        let mut indirect_block_index = 0;
3633
3634        for &(logical_block, block_number) in assignments {
3635            // crate::early_println!("[ext2] DEBUG: Processing assignment: logical_block={}, block_number={}", logical_block, block_number);
3636
3637            if logical_block < 12 {
3638                // Direct blocks - immediate update
3639                inode.block[logical_block as usize] = block_number;
3640            } else if logical_block < 12 + blocks_per_indirect as u64 {
3641                // Single indirect blocks - batch these
3642                let index = logical_block - 12;
3643
3644                // Ensure indirect block exists
3645                if inode.block[12] == 0 {
3646                    let indirect_block = if indirect_block_index < allocated_indirect_blocks.len() {
3647                        let block = allocated_indirect_blocks[indirect_block_index] as u32;
3648                        indirect_block_index += 1;
3649                        block
3650                    } else {
3651                        return Err(FileSystemError::new(
3652                            FileSystemErrorKind::NoSpace,
3653                            "Not enough pre-allocated indirect blocks",
3654                        ));
3655                    };
3656
3657                    inode.block[12] = indirect_block;
3658                    new_indirect_blocks.push(indirect_block);
3659                    indirect_blocks_cache
3660                        .insert(indirect_block, vec![0u8; self.block_size as usize]);
3661                }
3662
3663                let indirect_block = inode.block[12];
3664
3665                // Cache the indirect block data
3666                if !indirect_blocks_cache.contains_key(&indirect_block) {
3667                    if new_indirect_blocks.contains(&indirect_block) {
3668                        // New block, start with zeros
3669                        indirect_blocks_cache
3670                            .insert(indirect_block, vec![0u8; self.block_size as usize]);
3671                    } else {
3672                        // Existing block, read from disk
3673                        // crate::early_println!("[ext2] DEBUG: Reading indirect block {} for caching", indirect_block);
3674                        let data = self.read_block_cached(indirect_block as u64)?;
3675                        indirect_blocks_cache.insert(indirect_block, data);
3676                    }
3677                }
3678
3679                // Update in cache
3680                if let Some(indirect_data) = indirect_blocks_cache.get_mut(&indirect_block) {
3681                    let offset = index as usize * 4;
3682                    let block_bytes = block_number.to_le_bytes();
3683                    indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3684                    _batched_writes += 1;
3685                }
3686            } else if logical_block
3687                < 12 + blocks_per_indirect as u64
3688                    + blocks_per_indirect as u64 * blocks_per_indirect as u64
3689            {
3690                // Double indirect blocks - OPTIMIZE THIS TOO!
3691                let double_base = 12 + blocks_per_indirect as u64;
3692
3693                // Check for underflow before calculation
3694                if logical_block < double_base {
3695                    crate::early_println!(
3696                        "[ext2] ERROR: Double indirect block calculation would underflow: logical_block={}, double_base={}",
3697                        logical_block,
3698                        double_base
3699                    );
3700                    return Err(FileSystemError::new(
3701                        FileSystemErrorKind::InvalidData,
3702                        "Double indirect block calculation underflow",
3703                    ));
3704                }
3705
3706                let double_offset = logical_block - double_base;
3707                let first_indirect_index = double_offset / blocks_per_indirect as u64;
3708                let second_indirect_index = double_offset % blocks_per_indirect as u64;
3709
3710                // crate::early_println!("[ext2] DEBUG: Double indirect calculation: logical_block={}, double_offset={}, first_idx={}, second_idx={}",
3711                //                       logical_block, double_offset, first_indirect_index, second_indirect_index);
3712
3713                // Ensure double indirect block exists
3714                if inode.block[13] == 0 {
3715                    let double_indirect_block =
3716                        if indirect_block_index < allocated_indirect_blocks.len() {
3717                            let block = allocated_indirect_blocks[indirect_block_index] as u32;
3718                            indirect_block_index += 1;
3719                            block
3720                        } else {
3721                            return Err(FileSystemError::new(
3722                                FileSystemErrorKind::NoSpace,
3723                                "Not enough pre-allocated double indirect blocks",
3724                            ));
3725                        };
3726
3727                    inode.block[13] = double_indirect_block;
3728                    new_indirect_blocks.push(double_indirect_block);
3729                    double_indirect_cache
3730                        .insert(double_indirect_block, vec![0u8; self.block_size as usize]);
3731                }
3732
3733                let double_indirect_block = inode.block[13];
3734
3735                // Cache double indirect block
3736                if !double_indirect_cache.contains_key(&double_indirect_block) {
3737                    if new_indirect_blocks.contains(&double_indirect_block) {
3738                        double_indirect_cache
3739                            .insert(double_indirect_block, vec![0u8; self.block_size as usize]);
3740                    } else {
3741                        // crate::early_println!("[ext2] DEBUG: Reading double indirect block {} for caching", double_indirect_block);
3742                        let data = self.read_block_cached(double_indirect_block as u64)?;
3743                        double_indirect_cache.insert(double_indirect_block, data);
3744                    }
3745                }
3746
3747                // Get first level indirect block pointer
3748                let first_indirect_ptr =
3749                    if let Some(double_data) = double_indirect_cache.get(&double_indirect_block) {
3750                        let ptr_offset = first_indirect_index as usize * 4;
3751                        u32::from_le_bytes([
3752                            double_data[ptr_offset],
3753                            double_data[ptr_offset + 1],
3754                            double_data[ptr_offset + 2],
3755                            double_data[ptr_offset + 3],
3756                        ])
3757                    } else {
3758                        0
3759                    };
3760
3761                // Allocate first level indirect block if needed
3762                let first_indirect_block = if first_indirect_ptr == 0 {
3763                    let new_block = if indirect_block_index < allocated_indirect_blocks.len() {
3764                        let block = allocated_indirect_blocks[indirect_block_index] as u32;
3765                        indirect_block_index += 1;
3766                        block
3767                    } else {
3768                        return Err(FileSystemError::new(
3769                            FileSystemErrorKind::NoSpace,
3770                            "Not enough pre-allocated first-level indirect blocks",
3771                        ));
3772                    };
3773
3774                    new_indirect_blocks.push(new_block);
3775
3776                    // Update double indirect block in cache
3777                    if let Some(double_data) = double_indirect_cache.get_mut(&double_indirect_block)
3778                    {
3779                        let ptr_offset = first_indirect_index as usize * 4;
3780                        let block_bytes = new_block.to_le_bytes();
3781                        double_data[ptr_offset..ptr_offset + 4].copy_from_slice(&block_bytes);
3782                    }
3783
3784                    // Initialize first level block cache
3785                    indirect_blocks_cache.insert(new_block, vec![0u8; self.block_size as usize]);
3786                    new_block
3787                } else {
3788                    first_indirect_ptr
3789                };
3790
3791                // Cache first level indirect block if not already cached
3792                if !indirect_blocks_cache.contains_key(&first_indirect_block) {
3793                    if new_indirect_blocks.contains(&first_indirect_block) {
3794                        indirect_blocks_cache
3795                            .insert(first_indirect_block, vec![0u8; self.block_size as usize]);
3796                    } else {
3797                        // crate::early_println!("[ext2] DEBUG: Reading first-level indirect block {} for caching", first_indirect_block);
3798                        let data = self.read_block_cached(first_indirect_block as u64)?;
3799                        indirect_blocks_cache.insert(first_indirect_block, data);
3800                    }
3801                }
3802
3803                // Update first level indirect block in cache
3804                if let Some(indirect_data) = indirect_blocks_cache.get_mut(&first_indirect_block) {
3805                    let offset = second_indirect_index as usize * 4;
3806                    let block_bytes = block_number.to_le_bytes();
3807                    indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3808                    _batched_writes += 1;
3809                }
3810            } else {
3811                // Triple indirect and beyond - fall back to individual calls
3812                // crate::early_println!("[ext2] DEBUG: Fallback to individual set_inode_block for logical_block {} (triple indirect)", logical_block);
3813                self.set_inode_block(inode, logical_block, block_number)?;
3814            }
3815        }
3816
3817        // Write back all cached indirect blocks at once using batched write
3818        let total_indirect_blocks = indirect_blocks_cache.len() + double_indirect_cache.len();
3819        if total_indirect_blocks > 0 {
3820            let mut write_blocks = BTreeMap::new();
3821
3822            // Add single and first-level indirect blocks
3823            for (block_num, data) in indirect_blocks_cache {
3824                // crate::early_println!("[ext2] DEBUG: Adding indirect block {} to write batch", block_num);
3825                if block_num as u64 > (1u64 << 32) {
3826                    // crate::early_println!("[ext2] ERROR: Invalid indirect block number: {}", block_num);
3827                    return Err(FileSystemError::new(
3828                        FileSystemErrorKind::InvalidData,
3829                        "Invalid indirect block number",
3830                    ));
3831                }
3832                write_blocks.insert(block_num as u64, data);
3833            }
3834
3835            // Add double indirect blocks
3836            for (block_num, data) in double_indirect_cache {
3837                // crate::early_println!("[ext2] DEBUG: Adding double indirect block {} to write batch", block_num);
3838                if block_num as u64 > (1u64 << 32) {
3839                    // crate::early_println!("[ext2] ERROR: Invalid double indirect block number: {}", block_num);
3840                    return Err(FileSystemError::new(
3841                        FileSystemErrorKind::InvalidData,
3842                        "Invalid double indirect block number",
3843                    ));
3844                }
3845                write_blocks.insert(block_num as u64, data);
3846            }
3847
3848            // crate::early_println!("[ext2] DEBUG: Batch writing {} indirect blocks (single + double indirect)", write_blocks.len());
3849            self.write_blocks_cached(&write_blocks)?;
3850        }
3851
3852        // crate::early_println!("[ext2] set_inode_blocks_simple_batch: completed {} assignments, {} batched writes",
3853        //     assignments.len(), batched_writes);
3854        Ok(())
3855    }
3856
3857    /// Update group descriptor on disk
3858    fn update_group_descriptor(
3859        &self,
3860        group: u32,
3861        bgd: &Ext2BlockGroupDescriptor,
3862    ) -> Result<(), FileSystemError> {
3863        let bgd_block = (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32)
3864            / self.block_size
3865            + if self.block_size == 1024 { 2 } else { 1 };
3866        let bgd_block_sector = self.block_to_sector(bgd_block as u64);
3867
3868        let request = Box::new(crate::device::block::request::BlockIORequest {
3869            request_type: crate::device::block::request::BlockIORequestType::Read,
3870            sector: bgd_block_sector as usize,
3871            sector_count: (self.block_size / 512) as usize,
3872            head: 0,
3873            cylinder: 0,
3874            buffer: vec![0u8; self.block_size as usize],
3875        });
3876
3877        self.block_device.enqueue_request(request);
3878        let results = self.block_device.process_requests();
3879
3880        let mut bgd_data = if let Some(result) = results.first() {
3881            match &result.result {
3882                Ok(_) => result.request.buffer.clone(),
3883                Err(_) => {
3884                    return Err(FileSystemError::new(
3885                        FileSystemErrorKind::IoError,
3886                        "Failed to read block group descriptor block",
3887                    ));
3888                }
3889            }
3890        } else {
3891            return Err(FileSystemError::new(
3892                FileSystemErrorKind::IoError,
3893                "No result from block device read",
3894            ));
3895        };
3896
3897        let bgd_offset =
3898            (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
3899        bgd.write_to_bytes(&mut bgd_data[bgd_offset as usize..]);
3900
3901        let write_request = Box::new(crate::device::block::request::BlockIORequest {
3902            request_type: crate::device::block::request::BlockIORequestType::Write,
3903            sector: bgd_block_sector as usize,
3904            sector_count: (self.block_size / 512) as usize,
3905            head: 0,
3906            cylinder: 0,
3907            buffer: bgd_data,
3908        });
3909
3910        self.block_device.enqueue_request(write_request);
3911        let write_results = self.block_device.process_requests();
3912
3913        if let Some(write_result) = write_results.first() {
3914            match &write_result.result {
3915                Ok(_) => Ok(()),
3916                Err(_) => Err(FileSystemError::new(
3917                    FileSystemErrorKind::IoError,
3918                    "Failed to write updated block group descriptor",
3919                )),
3920            }
3921        } else {
3922            Err(FileSystemError::new(
3923                FileSystemErrorKind::IoError,
3924                "No response from BGD write",
3925            ))
3926        }
3927    }
3928
3929    /// Update superblock counts (blocks, inodes, directories)
3930    fn update_superblock_counts(
3931        &self,
3932        block_delta: i32,
3933        inode_delta: i32,
3934        _dir_delta: i32,
3935    ) -> Result<(), FileSystemError> {
3936        // Read superblock
3937        let request = Box::new(crate::device::block::request::BlockIORequest {
3938            request_type: crate::device::block::request::BlockIORequestType::Read,
3939            sector: 2,
3940            sector_count: 2,
3941            head: 0,
3942            cylinder: 0,
3943            buffer: vec![0u8; 1024],
3944        });
3945
3946        self.block_device.enqueue_request(request);
3947        let results = self.block_device.process_requests();
3948
3949        let mut superblock_data = if let Some(result) = results.first() {
3950            match &result.result {
3951                Ok(_) => result.request.buffer.clone(),
3952                Err(_) => {
3953                    return Err(FileSystemError::new(
3954                        FileSystemErrorKind::IoError,
3955                        "Failed to read superblock",
3956                    ));
3957                }
3958            }
3959        } else {
3960            return Err(FileSystemError::new(
3961                FileSystemErrorKind::IoError,
3962                "No result from superblock read",
3963            ));
3964        };
3965
3966        // Update counts
3967        if block_delta != 0 {
3968            let current = u32::from_le_bytes([
3969                superblock_data[12],
3970                superblock_data[13],
3971                superblock_data[14],
3972                superblock_data[15],
3973            ]);
3974            let new_count = if block_delta < 0 {
3975                current.saturating_sub((-block_delta) as u32)
3976            } else {
3977                current.saturating_add(block_delta as u32)
3978            };
3979            let bytes = new_count.to_le_bytes();
3980            superblock_data[12..16].copy_from_slice(&bytes);
3981
3982            // Debug: Updated free_blocks_count (disabled to reduce log noise)
3983            // crate::early_println!("EXT2: Updated free_blocks_count: {} -> {} (delta: {})",
3984            //                       current, new_count, block_delta);
3985        }
3986
3987        if inode_delta != 0 {
3988            let current = u32::from_le_bytes([
3989                superblock_data[16],
3990                superblock_data[17],
3991                superblock_data[18],
3992                superblock_data[19],
3993            ]);
3994            let new_count = if inode_delta < 0 {
3995                current.saturating_sub((-inode_delta) as u32)
3996            } else {
3997                current.saturating_add(inode_delta as u32)
3998            };
3999            let bytes = new_count.to_le_bytes();
4000            superblock_data[16..20].copy_from_slice(&bytes);
4001
4002            // Debug: Updated free_inodes_count (disabled to reduce log noise)
4003            // crate::early_println!("EXT2: Updated free_inodes_count: {} -> {} (delta: {})",
4004            //                       current, new_count, inode_delta);
4005        }
4006
4007        // Write back superblock
4008        let write_request = Box::new(crate::device::block::request::BlockIORequest {
4009            request_type: crate::device::block::request::BlockIORequestType::Write,
4010            sector: 2,
4011            sector_count: 2,
4012            head: 0,
4013            cylinder: 0,
4014            buffer: superblock_data,
4015        });
4016
4017        self.block_device.enqueue_request(write_request);
4018        let write_results = self.block_device.process_requests();
4019
4020        if let Some(write_result) = write_results.first() {
4021            match &write_result.result {
4022                Ok(_) => {
4023                    // Debug: Superblock successfully updated (disabled to reduce log noise)
4024                    // crate::early_println!("EXT2: Superblock successfully updated");
4025                    Ok(())
4026                }
4027                Err(_) => Err(FileSystemError::new(
4028                    FileSystemErrorKind::IoError,
4029                    "Failed to write updated superblock",
4030                )),
4031            }
4032        } else {
4033            Err(FileSystemError::new(
4034                FileSystemErrorKind::IoError,
4035                "No response from superblock write",
4036            ))
4037        }
4038    }
4039
4040    /// Update superblock free counts (blocks and inodes)
4041    fn update_superblock_free_counts(
4042        &self,
4043        block_delta: i32,
4044        inode_delta: i32,
4045    ) -> Result<(), FileSystemError> {
4046        self.update_superblock_counts(block_delta, inode_delta, 0)
4047    }
4048
4049    /// Read multiple filesystem blocks with improved LRU cache and batching
4050    /// Optimized for fast path when all blocks are cached
4051    fn read_blocks_cached(&self, block_nums: &[u64]) -> Result<Vec<Vec<u8>>, FileSystemError> {
4052        profile_scope!("ext2::read_blocks_cached");
4053
4054        // Fast path: if only one block, try cache-only first
4055        if block_nums.len() == 1 {
4056            let block_num = block_nums[0];
4057            let mut cache = self.block_cache.lock();
4058            if let Some(data) = cache.get(block_num) {
4059                return Ok(vec![data]);
4060            }
4061            drop(cache);
4062        }
4063
4064        // Slower path: multiple blocks or cache miss
4065        let mut results = Vec::with_capacity(block_nums.len());
4066        let mut missing_blocks = Vec::new();
4067        let mut cache = self.block_cache.lock();
4068
4069        // Check cache for existing blocks, maintain order
4070        for &block_num in block_nums {
4071            if let Some(data) = cache.get(block_num) {
4072                results.push((block_num, data));
4073            } else {
4074                missing_blocks.push(block_num);
4075                results.push((block_num, Vec::new())); // Placeholder
4076            }
4077        }
4078
4079        // Drop the lock before I/O
4080        drop(cache);
4081
4082        // If all blocks were cached, return immediately
4083        if missing_blocks.is_empty() {
4084            return Ok(results.into_iter().map(|(_, data)| data).collect());
4085        }
4086
4087        if !missing_blocks.is_empty() {
4088            // Sort missing blocks to find contiguous ranges
4089            missing_blocks.sort();
4090
4091            // Store information about each request range for later processing
4092            let mut request_ranges = Vec::new();
4093
4094            let mut i = 0;
4095            while i < missing_blocks.len() {
4096                let start_block = missing_blocks[i];
4097                let mut count = 1;
4098
4099                // Count consecutive blocks
4100                while i + count < missing_blocks.len()
4101                    && missing_blocks[i + count] == start_block + count as u64
4102                {
4103                    count += 1;
4104                }
4105
4106                let start_sector = self.block_to_sector(start_block);
4107                let num_sectors = count * self.sectors_per_block() as usize;
4108                let buffer_size = count * self.block_size as usize;
4109
4110                let request = Box::new(crate::device::block::request::BlockIORequest {
4111                    request_type: crate::device::block::request::BlockIORequestType::Read,
4112                    sector: start_sector,
4113                    sector_count: num_sectors,
4114                    head: 0,
4115                    cylinder: 0,
4116                    buffer: vec![0u8; buffer_size],
4117                });
4118
4119                // Store range info for later processing
4120                request_ranges.push((start_block, count));
4121
4122                // Enqueue the request but don't process yet
4123                self.block_device.enqueue_request(request);
4124                i += count; // Move to the next non-consecutive block
4125            }
4126
4127            // Process all enqueued requests in one batch
4128            let read_results = self.block_device.process_requests();
4129
4130            // Validate that we got the expected number of results
4131            if read_results.len() != request_ranges.len() {
4132                return Err(FileSystemError::new(
4133                    FileSystemErrorKind::DeviceError,
4134                    "Mismatch between requested and received block count",
4135                ));
4136            }
4137
4138            // Process results and update cache
4139            let mut cache = self.block_cache.lock();
4140            let mut missing_data = HashMap::new();
4141
4142            for (result_idx, result) in read_results.iter().enumerate() {
4143                if result.result.is_err() {
4144                    return Err(FileSystemError::new(
4145                        FileSystemErrorKind::DeviceError,
4146                        "Failed to read blocks from device",
4147                    ));
4148                }
4149
4150                let (start_block, count) = request_ranges[result_idx];
4151                let data = &result.request.buffer;
4152
4153                // Validate buffer size matches expectations
4154                let expected_size = count * self.block_size as usize;
4155                if data.len() != expected_size {
4156                    // Try to handle gracefully - truncate or pad buffer to expected size
4157                    let mut corrected_data = data.clone();
4158                    if data.len() > expected_size {
4159                        corrected_data.truncate(expected_size);
4160                    } else {
4161                        corrected_data.resize(expected_size, 0);
4162                    }
4163
4164                    // Process with corrected data
4165                    for j in 0..count {
4166                        let current_block = start_block + j as u64;
4167                        let offset = j * self.block_size as usize;
4168                        let end_offset = offset + self.block_size as usize;
4169
4170                        if end_offset <= corrected_data.len() {
4171                            let block_data = corrected_data[offset..end_offset].to_vec();
4172                            missing_data.insert(current_block, block_data.clone());
4173                            cache.insert(current_block, block_data);
4174                        } else {
4175                            return Err(FileSystemError::new(
4176                                FileSystemErrorKind::DeviceError,
4177                                "Buffer corruption detected",
4178                            ));
4179                        }
4180                    }
4181                    continue; // Skip normal processing for this result
4182                }
4183
4184                for j in 0..count {
4185                    let current_block = start_block + j as u64;
4186                    let offset = j * self.block_size as usize;
4187                    let end_offset = offset + self.block_size as usize;
4188
4189                    let block_data = data[offset..end_offset].to_vec();
4190                    missing_data.insert(current_block, block_data.clone());
4191                    cache.insert(current_block, block_data);
4192                }
4193            }
4194
4195            // Update results with missing data
4196            for i in 0..results.len() {
4197                let (block_num, ref data) = results[i];
4198                if data.is_empty() {
4199                    // This was a placeholder for missing block
4200                    if let Some(fetched_data) = missing_data.get(&block_num) {
4201                        results[i] = (block_num, fetched_data.clone());
4202                    }
4203                }
4204            }
4205        }
4206
4207        // Return results in the order of original block_nums, extracting data only
4208        let result: Vec<Vec<u8>> = results.into_iter().map(|(_, data)| data).collect();
4209
4210        #[cfg(test)]
4211        unsafe {
4212            static mut CALL_COUNT: u64 = 0;
4213            CALL_COUNT += 1;
4214            // Print cache stats periodically (every 100th call)
4215            if CALL_COUNT % 100 == 0 {
4216                let cache = self.block_cache.lock();
4217                cache.print_stats("Block");
4218            }
4219        }
4220
4221        Ok(result)
4222    }
4223
4224    /// Write multiple filesystem blocks with write-through to device and cache update
4225    fn write_blocks_cached(&self, blocks: &BTreeMap<u64, Vec<u8>>) -> Result<(), FileSystemError> {
4226        profile_scope!("ext2::write_blocks_cached");
4227
4228        if blocks.is_empty() {
4229            return Ok(());
4230        }
4231
4232        #[cfg(test)]
4233        crate::early_println!(
4234            "[ext2] write_blocks_cached: {} blocks to write",
4235            blocks.len()
4236        );
4237
4238        // Debug: Check for invalid block numbers
4239        for (block_num, _) in blocks.iter() {
4240            if *block_num > (1u64 << 32) {
4241                // Check for very large values that could be negative casts
4242                crate::early_println!(
4243                    "[ext2] ERROR: Invalid block number detected: {} (0x{:x})",
4244                    block_num,
4245                    block_num
4246                );
4247                panic!("Invalid block number: {} (0x{:x})", block_num, block_num);
4248            }
4249        }
4250
4251        let mut sorted_blocks: Vec<_> = blocks.iter().collect();
4252        sorted_blocks.sort_by_key(|(k, _)| *k);
4253
4254        // Store information about each request range for later processing
4255        let mut request_ranges = Vec::new();
4256
4257        let mut i = 0;
4258        while i < sorted_blocks.len() {
4259            let start_block = *sorted_blocks[i].0;
4260            let mut count = 1;
4261            let mut data_to_write = sorted_blocks[i].1.clone();
4262
4263            // Count consecutive blocks and combine their data
4264            while i + count < sorted_blocks.len()
4265                && *sorted_blocks[i + count].0 == start_block + count as u64
4266            {
4267                data_to_write.extend_from_slice(sorted_blocks[i + count].1);
4268                count += 1;
4269            }
4270
4271            let start_sector = self.block_to_sector(start_block);
4272            let num_sectors = count * self.sectors_per_block() as usize;
4273
4274            let request = Box::new(crate::device::block::request::BlockIORequest {
4275                request_type: crate::device::block::request::BlockIORequestType::Write,
4276                sector: start_sector,
4277                sector_count: num_sectors,
4278                head: 0,
4279                cylinder: 0,
4280                buffer: data_to_write,
4281            });
4282
4283            // Store range info for later processing
4284            request_ranges.push((start_block, count));
4285
4286            // Enqueue the request but don't process yet
4287            self.block_device.enqueue_request(request);
4288            i += count; // Move to the next non-consecutive block
4289        }
4290
4291        // Process all enqueued requests in one batch
4292        #[cfg(test)]
4293        crate::early_println!(
4294            "[ext2] write_blocks_cached: Processing {} requests in batch",
4295            request_ranges.len()
4296        );
4297        let write_results = self.block_device.process_requests();
4298
4299        // Validate that we got the expected number of results
4300        if write_results.len() != request_ranges.len() {
4301            return Err(FileSystemError::new(
4302                FileSystemErrorKind::DeviceError,
4303                "Mismatch between requested and received write count",
4304            ));
4305        }
4306
4307        // Check for any write errors and update cache
4308        let mut cache = self.block_cache.lock();
4309        for (result_idx, result) in write_results.iter().enumerate() {
4310            if result.result.is_err() {
4311                return Err(FileSystemError::new(
4312                    FileSystemErrorKind::DeviceError,
4313                    "Failed to write blocks to device",
4314                ));
4315            }
4316
4317            let (start_block, count) = request_ranges[result_idx];
4318
4319            // Invalidate cache for successfully written blocks (write-through cache)
4320            // No need to update cache with written data since it's already on disk
4321            for j in 0..count {
4322                let current_block = start_block + j as u64;
4323                cache.remove(current_block);
4324            }
4325        }
4326
4327        Ok(())
4328    }
4329
4330    /// Sectors per filesystem block
4331    fn sectors_per_block(&self) -> u64 {
4332        (self.block_size as u64) / 512
4333    }
4334
4335    /// Convert ext2 block number to starting sector index
4336    fn block_to_sector(&self, block_num: u64) -> usize {
4337        // Validate block number range
4338        if block_num > (1u64 << 32) {
4339            crate::early_println!(
4340                "[ext2] ERROR: block_to_sector called with invalid block_num: {} (0x{:x})",
4341                block_num,
4342                block_num
4343            );
4344            panic!(
4345                "block_to_sector: invalid block_num: {} (0x{:x})",
4346                block_num, block_num
4347            );
4348        }
4349
4350        // Check for reasonable upper bound (e.g., filesystem shouldn't have more than 2^30 blocks)
4351        if block_num > (1u64 << 30) {
4352            #[cfg(test)]
4353            crate::early_println!(
4354                "[ext2] WARNING: block_to_sector called with very large block_num: {}",
4355                block_num
4356            );
4357        }
4358
4359        (block_num * self.sectors_per_block()) as usize
4360    }
4361
4362    /// Read one filesystem block with LRU cache
4363    fn read_block_cached(&self, block_num: u64) -> Result<Vec<u8>, FileSystemError> {
4364        // Use the batched read_blocks_cached for efficiency
4365        let blocks = self.read_blocks_cached(&[block_num])?;
4366        if let Some(block_data) = blocks.into_iter().next() {
4367            Ok(block_data)
4368        } else {
4369            Err(FileSystemError::new(
4370                FileSystemErrorKind::DeviceError,
4371                "Failed to read single block",
4372            ))
4373        }
4374    }
4375
4376    /// Write one filesystem block with write-through to device and cache update
4377    fn write_block_cached(&self, block_num: u64, data: &[u8]) -> Result<(), FileSystemError> {
4378        // Note: This is just a wrapper around write_blocks_cached, no separate profiling
4379        // to avoid double counting in profiler
4380        self.write_blocks_cached(&BTreeMap::from([(block_num, data.to_vec())]))
4381    }
4382
4383    /// Print cache statistics for debugging
4384    pub fn print_cache_stats(&self) {
4385        let inode_cache = self.inode_cache.lock();
4386        let block_cache = self.block_cache.lock();
4387
4388        inode_cache.print_stats("Inode");
4389        block_cache.print_stats("Block");
4390    }
4391}
4392
4393impl FileSystemOperations for Ext2FileSystem {
4394    fn fs_id(&self) -> FileSystemId {
4395        self.fs_id
4396    }
4397
4398    fn lookup(
4399        &self,
4400        parent: &Arc<dyn VfsNode>,
4401        name: &String,
4402    ) -> Result<Arc<dyn VfsNode>, FileSystemError> {
4403        // Cast parent to Ext2Node
4404        let ext2_parent = parent.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4405            FileSystemError::new(
4406                FileSystemErrorKind::InvalidOperation,
4407                "Parent node is not an Ext2Node",
4408            )
4409        })?;
4410
4411        // Read parent inode
4412        let parent_inode = self.read_inode(ext2_parent.inode_number())?;
4413
4414        // Ensure parent is a directory
4415        if parent_inode.mode & EXT2_S_IFMT != EXT2_S_IFDIR {
4416            return Err(FileSystemError::new(
4417                FileSystemErrorKind::NotADirectory,
4418                "Parent is not a directory",
4419            ));
4420        }
4421
4422        // Read directory entries
4423        let entries = self.read_directory_entries(&parent_inode)?;
4424
4425        // Find the requested entry
4426        for entry in entries {
4427            let entry_name = entry.name_str()?;
4428            if entry_name == *name {
4429                // Read the inode for this entry
4430                let child_inode = self.read_inode(entry.entry.inode)?;
4431
4432                // Use file_type_from_inode to get the correct file type including device files
4433                let file_type = self.file_type_from_inode(&child_inode, entry.entry.inode)?;
4434
4435                // Use inode number as stable file_id for page cache identity
4436                let file_id = entry.entry.inode as u64;
4437
4438                // Create new node
4439                let node = Ext2Node::new(entry.entry.inode, file_type, file_id);
4440
4441                // Set filesystem reference from parent
4442                if let Some(fs_ref) = ext2_parent.filesystem() {
4443                    node.set_filesystem(fs_ref);
4444                }
4445
4446                return Ok(Arc::new(node));
4447            }
4448        }
4449
4450        Err(FileSystemError::new(
4451            FileSystemErrorKind::NotFound,
4452            "File not found",
4453        ))
4454    }
4455
4456    fn readdir(
4457        &self,
4458        node: &Arc<dyn VfsNode>,
4459    ) -> Result<Vec<DirectoryEntryInternal>, FileSystemError> {
4460        // Cast node to Ext2Node
4461        let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4462            FileSystemError::new(
4463                FileSystemErrorKind::InvalidOperation,
4464                "Node is not an Ext2Node",
4465            )
4466        })?;
4467
4468        // Read inode
4469        let inode = self.read_inode(ext2_node.inode_number())?;
4470
4471        // Ensure this is a directory
4472        if inode.mode & EXT2_S_IFMT != EXT2_S_IFDIR {
4473            return Err(FileSystemError::new(
4474                FileSystemErrorKind::NotADirectory,
4475                "Node is not a directory",
4476            ));
4477        }
4478
4479        // Read directory entries
4480        let entries = self.read_directory_entries(&inode)?;
4481
4482        // Convert to internal format
4483        let mut result = Vec::new();
4484        for entry in entries {
4485            let name = entry.name_str()?;
4486            let child_inode = self.read_inode(entry.entry.inode)?;
4487
4488            // Use file_type_from_inode to get the correct file type including device files
4489            let file_type = self.file_type_from_inode(&child_inode, entry.entry.inode)?;
4490
4491            result.push(DirectoryEntryInternal {
4492                name,
4493                file_type,
4494                file_id: entry.entry.inode as u64,
4495            });
4496        }
4497
4498        Ok(result)
4499    }
4500
4501    fn open(
4502        &self,
4503        node: &Arc<dyn VfsNode>,
4504        _flags: u32,
4505    ) -> Result<Arc<dyn FileObject>, FileSystemError> {
4506        #[cfg(test)]
4507        crate::early_println!("[ext2] open: Starting open operation");
4508
4509        let file_type = node.file_type()?;
4510
4511        #[cfg(test)]
4512        crate::early_println!("[ext2] open: File type = {:?}", file_type);
4513
4514        match file_type {
4515            FileType::RegularFile => {
4516                #[cfg(test)]
4517                crate::early_println!("[ext2] open: Opening regular file");
4518
4519                let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4520                    FileSystemError::new(
4521                        FileSystemErrorKind::InvalidOperation,
4522                        "Node is not an Ext2Node",
4523                    )
4524                })?;
4525                let file_obj = Arc::new(Ext2FileObject::new(
4526                    ext2_node.inode_number(),
4527                    ext2_node.id(),
4528                ));
4529
4530                // Set filesystem reference
4531                if let Some(fs_weak) = ext2_node.filesystem() {
4532                    file_obj.set_filesystem(fs_weak);
4533                }
4534
4535                Ok(file_obj)
4536            }
4537            FileType::Directory => {
4538                #[cfg(test)]
4539                crate::early_println!("[ext2] open: Opening directory");
4540
4541                let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4542                    FileSystemError::new(
4543                        FileSystemErrorKind::InvalidOperation,
4544                        "Node is not an Ext2Node",
4545                    )
4546                })?;
4547                let dir_obj = Arc::new(Ext2DirectoryObject::new(
4548                    ext2_node.inode_number(),
4549                    ext2_node.id(),
4550                ));
4551
4552                // Set filesystem reference
4553                if let Some(fs_weak) = ext2_node.filesystem() {
4554                    dir_obj.set_filesystem(fs_weak);
4555                }
4556
4557                Ok(dir_obj)
4558            }
4559            FileType::CharDevice(device_info) => {
4560                #[cfg(test)]
4561                crate::early_println!(
4562                    "[ext2] Opening character device file: device_id={}",
4563                    device_info.device_id
4564                );
4565
4566                let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4567                    FileSystemError::new(
4568                        FileSystemErrorKind::InvalidOperation,
4569                        "Node is not an Ext2Node",
4570                    )
4571                })?;
4572                let char_device_obj =
4573                    Arc::new(Ext2CharDeviceFileObject::new(device_info, ext2_node.id()));
4574
4575                // Set filesystem reference
4576                if let Some(fs_weak) = ext2_node.filesystem() {
4577                    char_device_obj.set_filesystem(fs_weak);
4578                }
4579
4580                #[cfg(test)]
4581                crate::early_println!("[ext2] Character device file object created successfully");
4582
4583                Ok(char_device_obj)
4584            }
4585            _ => {
4586                #[cfg(test)]
4587                crate::early_println!("[ext2] open: Unsupported file type: {:?}", file_type);
4588
4589                Err(FileSystemError::new(
4590                    FileSystemErrorKind::NotSupported,
4591                    "Unsupported file type for open operation",
4592                ))
4593            }
4594        }
4595    }
4596
4597    fn create(
4598        &self,
4599        parent: &Arc<dyn VfsNode>,
4600        name: &String,
4601        file_type: FileType,
4602        _mode: u32,
4603    ) -> Result<Arc<dyn VfsNode>, FileSystemError> {
4604        let ext2_parent = parent.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4605            FileSystemError::new(
4606                FileSystemErrorKind::NotSupported,
4607                "Invalid node type for ext2",
4608            )
4609        })?;
4610
4611        // Check if it's a directory
4612        match ext2_parent.file_type() {
4613            Ok(FileType::Directory) => {}
4614            Ok(_) => {
4615                return Err(FileSystemError::new(
4616                    FileSystemErrorKind::NotADirectory,
4617                    "Parent is not a directory",
4618                ));
4619            }
4620            Err(e) => return Err(e),
4621        }
4622
4623        // Check if the entry already exists
4624        if self.check_entry_exists(ext2_parent.inode_number(), name)? {
4625            return Err(FileSystemError::new(
4626                FileSystemErrorKind::AlreadyExists,
4627                "File or directory already exists",
4628            ));
4629        }
4630
4631        // Allocate an inode from the ext2 filesystem
4632        let new_inode_number = self.allocate_inode()?;
4633        let file_id = new_inode_number as u64;
4634
4635        // Create the inode structure on disk
4636        let mode = match &file_type {
4637            FileType::RegularFile => EXT2_S_IFREG | 0o644,
4638            FileType::Directory => EXT2_S_IFDIR | 0o755,
4639            FileType::SymbolicLink(_) => EXT2_S_IFLNK | 0o777,
4640            FileType::CharDevice(_) => EXT2_S_IFCHR | 0o666,
4641            FileType::BlockDevice(_) => EXT2_S_IFBLK | 0o666,
4642            FileType::Pipe => EXT2_S_IFIFO | 0o666,
4643            FileType::Socket(_) => EXT2_S_IFSOCK | 0o666,
4644            _ => {
4645                return Err(FileSystemError::new(
4646                    FileSystemErrorKind::NotSupported,
4647                    "Unsupported file type for ext2",
4648                ));
4649            }
4650        } as u16;
4651
4652        // Create new inode with proper initialization
4653        let initial_nlinks: u16 = if file_type == FileType::Directory {
4654            2
4655        } else {
4656            1
4657        }; // Directory gets "." and initial link
4658        let mut new_inode = Ext2Inode {
4659            mode: mode.to_le(),
4660            uid: 0_u16.to_le(),
4661            size: 0_u32.to_le(),
4662            atime: 0_u32.to_le(),
4663            ctime: 0_u32.to_le(),
4664            mtime: 0_u32.to_le(),
4665            dtime: 0_u32.to_le(),
4666            gid: 0_u16.to_le(),
4667            links_count: initial_nlinks.to_le(),
4668            blocks: 0_u32.to_le(),
4669            flags: 0_u32.to_le(),
4670            osd1: 0_u32.to_le(),
4671            block: [0_u32; 15],
4672            generation: 0_u32.to_le(),
4673            file_acl: 0_u32.to_le(),
4674            dir_acl: 0_u32.to_le(),
4675            faddr: 0_u32.to_le(),
4676            osd2: [0u8; 12],
4677        };
4678
4679        // Handle symbolic link target path storage
4680        if let FileType::SymbolicLink(target_path) = &file_type {
4681            let target_bytes = target_path.as_bytes();
4682            new_inode.size = (target_bytes.len() as u32).to_le();
4683
4684            if target_bytes.len() <= 60 {
4685                // Fast symlink: store target path directly in inode.block array
4686                // Clear the block array first
4687                new_inode.block = [0u32; 15];
4688                // Copy target path bytes into the block array safely
4689                // Convert to byte array representation and back to avoid alignment issues
4690                let mut block_as_bytes = [0u8; 60];
4691                block_as_bytes[..target_bytes.len()].copy_from_slice(target_bytes);
4692
4693                // Copy the bytes to the block array as u32 values
4694                for (i, chunk) in block_as_bytes.chunks(4).enumerate() {
4695                    if i >= 15 {
4696                        break;
4697                    }
4698                    let mut val = [0u8; 4];
4699                    val[..chunk.len()].copy_from_slice(chunk);
4700                    new_inode.block[i] = u32::from_le_bytes(val);
4701                }
4702            } else {
4703                // Slow symlink: allocate a block and store target path there
4704                let block_number = self.allocate_block()? as u32;
4705                new_inode.block[0] = block_number.to_le();
4706                new_inode.blocks = (self.block_size / 512).to_le(); // Update block count
4707
4708                // Write target path to the allocated block
4709                let mut block_data = vec![0u8; self.block_size as usize];
4710                block_data[..target_bytes.len()].copy_from_slice(target_bytes);
4711
4712                let write_request = Box::new(crate::device::block::request::BlockIORequest {
4713                    request_type: crate::device::block::request::BlockIORequestType::Write,
4714                    sector: self.block_to_sector(block_number as u64),
4715                    sector_count: (self.block_size / 512) as usize,
4716                    head: 0,
4717                    cylinder: 0,
4718                    buffer: block_data,
4719                });
4720
4721                self.block_device.enqueue_request(write_request);
4722                let write_results = self.block_device.process_requests();
4723
4724                if let Some(write_result) = write_results.first() {
4725                    match &write_result.result {
4726                        Ok(_) => {}
4727                        Err(_) => {
4728                            return Err(FileSystemError::new(
4729                                FileSystemErrorKind::IoError,
4730                                "Failed to write symlink target data",
4731                            ));
4732                        }
4733                    }
4734                } else {
4735                    return Err(FileSystemError::new(
4736                        FileSystemErrorKind::IoError,
4737                        "No response from symlink target write",
4738                    ));
4739                }
4740            }
4741        }
4742
4743        // Handle device file information storage
4744        if let FileType::CharDevice(device_info) | FileType::BlockDevice(device_info) = &file_type {
4745            // Store device major/minor numbers in the first direct block pointer
4746            // This follows standard ext2 practice for device files
4747            // Extract major and minor from device_id (assuming device_id is major<<8 | minor)
4748            let major = (device_info.device_id >> 8) & 0xFF;
4749            let minor = device_info.device_id & 0xFF;
4750            let device_id = (major << 8) | minor;
4751            new_inode.block[0] = (device_id as u32).to_le();
4752            new_inode.size = 0_u32.to_le(); // Device files have no size
4753        }
4754
4755        // Write the inode to disk
4756        self.write_inode(new_inode_number, &new_inode)?;
4757
4758        // Add directory entry to parent directory
4759        self.add_directory_entry(
4760            ext2_parent.inode_number(),
4761            name,
4762            new_inode_number,
4763            file_type.clone(),
4764        )?;
4765
4766        // Initialize directory contents if it's a directory
4767        if matches!(file_type, FileType::Directory) {
4768            self.initialize_directory(new_inode_number, ext2_parent.inode_number())?;
4769
4770            // Update parent directory's nlinks count (removing ".." entry)
4771            let mut parent_inode = self.read_inode(ext2_parent.inode_number())?;
4772            parent_inode.links_count = (u16::from_le(parent_inode.links_count) + 1).to_le();
4773            self.write_inode(ext2_parent.inode_number(), &parent_inode)?;
4774
4775            // Update group descriptor to reflect one more directory
4776            let group = 0; // For now, we only use group 0
4777            let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
4778            let bgd_block_sector = self.block_to_sector(bgd_block);
4779
4780            let request = Box::new(crate::device::block::request::BlockIORequest {
4781                request_type: crate::device::block::request::BlockIORequestType::Read,
4782                sector: bgd_block_sector,
4783                sector_count: (self.block_size / 512) as usize,
4784                head: 0,
4785                cylinder: 0,
4786                buffer: vec![0u8; self.block_size as usize],
4787            });
4788
4789            self.block_device.enqueue_request(request);
4790            let results = self.block_device.process_requests();
4791
4792            if let Some(result) = results.first() {
4793                if let Ok(_) = &result.result {
4794                    let bgd_data = &result.request.buffer;
4795                    let mut bgd = Ext2BlockGroupDescriptor::from_bytes(bgd_data)?;
4796                    let current_dirs = u16::from_le(bgd.used_dirs_count);
4797                    bgd.used_dirs_count = (current_dirs + 1).to_le();
4798                    self.update_group_descriptor(group, &bgd)?;
4799                }
4800            }
4801        }
4802
4803        // Create new node
4804        let new_node = match &file_type {
4805            FileType::RegularFile => Arc::new(Ext2Node::new(
4806                new_inode_number,
4807                FileType::RegularFile,
4808                file_id,
4809            )),
4810            FileType::Directory => Arc::new(Ext2Node::new(
4811                new_inode_number,
4812                FileType::Directory,
4813                file_id,
4814            )),
4815            FileType::SymbolicLink(_) => {
4816                Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4817            }
4818            FileType::CharDevice(_) => {
4819                Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4820            }
4821            FileType::BlockDevice(_) => {
4822                Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4823            }
4824            FileType::Pipe => Arc::new(Ext2Node::new(new_inode_number, FileType::Pipe, file_id)),
4825            FileType::Socket(_) => {
4826                Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4827            }
4828            _ => {
4829                return Err(FileSystemError::new(
4830                    FileSystemErrorKind::NotSupported,
4831                    "Unsupported file type for ext2",
4832                ));
4833            }
4834        };
4835
4836        // Set filesystem reference
4837        if let Some(fs_ref) = ext2_parent.filesystem() {
4838            new_node.set_filesystem(fs_ref);
4839        }
4840
4841        Ok(new_node)
4842    }
4843
4844    fn remove(&self, parent: &Arc<dyn VfsNode>, name: &String) -> Result<(), FileSystemError> {
4845        // Prevent deletion of special entries
4846        if name == "." || name == ".." {
4847            return Err(FileSystemError::new(
4848                FileSystemErrorKind::InvalidOperation,
4849                "Cannot delete '.' or '..' entries",
4850            ));
4851        }
4852
4853        let ext2_parent = parent.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4854            FileSystemError::new(
4855                FileSystemErrorKind::NotSupported,
4856                "Invalid node type for ext2",
4857            )
4858        })?;
4859
4860        // Check if it's a directory
4861        match ext2_parent.file_type() {
4862            Ok(FileType::Directory) => {}
4863            Ok(_) => {
4864                return Err(FileSystemError::new(
4865                    FileSystemErrorKind::NotADirectory,
4866                    "Parent is not a directory",
4867                ));
4868            }
4869            Err(e) => return Err(e),
4870        }
4871
4872        // Try to lookup the file to ensure it exists and get its inode number
4873        let node = self.lookup(parent, name)?;
4874        let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4875            FileSystemError::new(
4876                FileSystemErrorKind::NotSupported,
4877                "Invalid node type for ext2",
4878            )
4879        })?;
4880
4881        let inode_number = ext2_node.inode_number();
4882
4883        // Check if the node being deleted is a directory
4884        let is_directory = match ext2_node.file_type() {
4885            Ok(FileType::Directory) => true,
4886            _ => false,
4887        };
4888
4889        // Remove the directory entry from the parent directory
4890        self.remove_directory_entry(ext2_parent.inode_number(), name)?;
4891
4892        // If deleting a directory, update parent directory's link count
4893        // (removing the ".." entry decrements parent's link count)
4894        if is_directory {
4895            let mut parent_inode = self.read_inode(ext2_parent.inode_number())?;
4896            let current_links = u16::from_le(parent_inode.links_count);
4897            if current_links > 0 {
4898                parent_inode.links_count = (current_links - 1).to_le();
4899                self.write_inode(ext2_parent.inode_number(), &parent_inode)?;
4900            }
4901        }
4902
4903        // Free the inode and its data blocks
4904        self.free_inode(inode_number)?;
4905
4906        // Invalidate page cache entries for this file to avoid stale data after delete/recreate
4907        {
4908            use crate::fs::vfs_v2::cache::CacheId;
4909            use crate::mem::page_cache::PageCacheManager;
4910            let fs_id = self.fs_id().get();
4911            let cache_id = CacheId::new((fs_id << 32) | (inode_number as u64));
4912            PageCacheManager::global().invalidate(cache_id);
4913        }
4914
4915        Ok(())
4916    }
4917
4918    fn root_node(&self) -> Arc<dyn VfsNode> {
4919        self.root.read().clone()
4920    }
4921
4922    fn name(&self) -> &str {
4923        &self.name
4924    }
4925
4926    fn as_any(&self) -> &dyn Any {
4927        self
4928    }
4929}
4930
4931/// Register the ext2 driver with the filesystem driver manager
4932fn register_driver() {
4933    let manager = get_fs_driver_manager();
4934    manager.register_driver(Box::new(Ext2Driver));
4935}
4936
4937driver_initcall!(register_driver);