1use alloc::{
25 boxed::Box,
26 collections::BTreeMap,
27 format,
28 string::{String, ToString},
29 sync::Arc,
30 vec,
31 vec::Vec,
32};
33use core::{any::Any, mem};
34use hashbrown::HashMap;
35use spin::{Mutex, rwlock::RwLock};
36
37use crate::{
38 DeviceManager,
39 device::block::BlockDevice,
40 driver_initcall,
41 fs::{
42 FileObject, FileSystemError, FileSystemErrorKind, FileType, SocketFileInfo,
43 get_fs_driver_manager, params::FileSystemParams,
44 },
45 profile_scope,
46 task::mytask,
47};
48
49use super::super::{
50 core::{DirectoryEntryInternal, FileSystemId, FileSystemOperations, VfsNode},
51 manager::get_global_vfs_manager,
52};
53
54pub mod driver;
55pub mod node;
56pub mod structures;
57
58#[cfg(test)]
59pub mod tests;
60
61#[cfg(test)]
62pub mod char_device_tests;
63
64pub use driver::Ext2Driver;
65pub use node::{Ext2CharDeviceFileObject, Ext2DirectoryObject, Ext2FileObject, Ext2Node};
66pub use structures::*;
67
68#[derive(Debug, Clone)]
73pub struct Ext2Params {
74 pub device_path: Option<String>,
76 pub device_id: Option<usize>,
78 pub options: BTreeMap<String, String>,
80}
81
82impl Ext2Params {
83 pub fn new() -> Self {
85 Self {
86 device_path: None,
87 device_id: None,
88 options: BTreeMap::new(),
89 }
90 }
91
92 pub fn from_option_string(options: &str) -> Result<Self, FileSystemError> {
97 let mut params = Self::new();
98
99 for option in options.split(',') {
100 let option = option.trim();
101 if option.is_empty() {
102 continue;
103 }
104
105 if let Some((key, value)) = option.split_once('=') {
106 match key {
107 "device" => {
108 params.device_path = Some(value.to_string());
109 }
110 _ => {
111 params.options.insert(key.to_string(), value.to_string());
112 }
113 }
114 } else {
115 params
117 .options
118 .insert(option.to_string(), "true".to_string());
119 }
120 }
121
122 if params.device_path.is_none() {
123 return Err(FileSystemError::new(
124 FileSystemErrorKind::InvalidData,
125 "Device path is required for ext2 filesystem",
126 ));
127 }
128
129 Ok(params)
130 }
131
132 pub fn resolve_device(&mut self) -> Result<(), FileSystemError> {
134 if let Some(device_path) = &self.device_path {
135 let vfs_manager = {
137 if let Some(task) = mytask() {
138 task.vfs
139 .read()
140 .as_ref()
141 .cloned()
142 .unwrap_or_else(|| get_global_vfs_manager())
143 } else {
144 get_global_vfs_manager()
145 }
146 };
147
148 let (entry, _mount_point) = vfs_manager.resolve_path(device_path).map_err(|e| {
150 FileSystemError::new(
151 FileSystemErrorKind::DeviceError,
152 format!("Failed to resolve device path '{}': {:?}", device_path, e),
153 )
154 })?;
155
156 let node = entry.node();
158 let metadata = node.metadata().map_err(|e| {
159 FileSystemError::new(
160 FileSystemErrorKind::DeviceError,
161 format!("Failed to get device metadata: {:?}", e),
162 )
163 })?;
164
165 if let FileType::BlockDevice(device_info) = metadata.file_type {
167 self.device_id = Some(device_info.device_id);
168 Ok(())
169 } else {
170 Err(FileSystemError::new(
171 FileSystemErrorKind::DeviceError,
172 format!("'{}' is not a block device", device_path),
173 ))
174 }
175 } else {
176 Err(FileSystemError::new(
177 FileSystemErrorKind::InvalidData,
178 "No device path specified",
179 ))
180 }
181 }
182
183 pub fn get_device_id(&self) -> Option<usize> {
185 self.device_id
186 }
187
188 pub fn get_option(&self, key: &str) -> Option<&String> {
190 self.options.get(key)
191 }
192
193 pub fn is_readonly(&self) -> bool {
195 self.options.get("ro").is_some()
196 || (self.options.get("rw").is_none() && self.options.get("ro").is_none())
197 }
198
199 pub fn create_filesystem(&mut self) -> Result<Arc<Ext2FileSystem>, FileSystemError> {
201 if self.device_id.is_none() {
205 self.resolve_device()?;
207 }
208
209 let device_id = self.device_id.ok_or_else(|| {
211 FileSystemError::new(FileSystemErrorKind::DeviceError, "Device ID not resolved")
213 })?;
214
215 let device = DeviceManager::get_manager()
219 .get_device(device_id)
220 .ok_or_else(|| {
221 FileSystemError::new(
223 FileSystemErrorKind::DeviceError,
224 format!("Device with ID {} not found", device_id),
225 )
226 })?;
227
228 let block_device = device.into_block_device().ok_or_else(|| {
232 FileSystemError::new(
234 FileSystemErrorKind::DeviceError,
235 "Device is not a block device",
236 )
237 })?;
238
239 Ext2FileSystem::new(block_device)
243 }
244}
245impl FileSystemParams for Ext2Params {
246 fn as_any(&self) -> &dyn Any {
247 self
248 }
249
250 fn to_string_map(&self) -> BTreeMap<String, String> {
251 let mut map = self.options.clone();
252 if let Some(device_path) = &self.device_path {
253 map.insert("device".to_string(), device_path.clone());
254 }
255 map
256 }
257
258 fn from_string_map(map: &BTreeMap<String, String>) -> Result<Self, String> {
259 let mut params = Ext2Params::new();
260
261 for (key, value) in map {
262 match key.as_str() {
263 "device" => {
264 params.device_path = Some(value.clone());
265 }
266 _ => {
267 params.options.insert(key.clone(), value.clone());
268 }
269 }
270 }
271
272 if params.device_path.is_none() {
273 return Err("Device path is required for ext2 filesystem".to_string());
274 }
275
276 Ok(params)
277 }
278}
279
280pub struct Ext2FileSystem {
286 fs_id: FileSystemId,
288 block_device: Arc<dyn BlockDevice>,
290 superblock: Box<Ext2Superblock>,
292 block_size: u32,
294 root_inode: u32,
296 root: RwLock<Arc<Ext2Node>>,
298 name: String,
300 next_file_id: Mutex<u64>,
302 inode_cache: Mutex<InodeLruCache>,
304 block_cache: Mutex<BlockLruCache>,
306}
307
308#[derive(Debug)]
310struct InodeLruNode {
311 inode_num: u32,
312 inode: Ext2Inode,
313 access_count: u64,
314 prev: Option<NodeId>,
315 next: Option<NodeId>,
316}
317
318struct InodeLruCache {
320 map: HashMap<u32, NodeId>,
322 nodes: HashMap<NodeId, InodeLruNode>,
324 head: Option<NodeId>,
326 tail: Option<NodeId>,
328 next_id: NodeId,
330 max_size: usize,
332 hits: u64,
334 misses: u64,
335 access_counter: u64,
337}
338
339impl InodeLruCache {
340 fn new(max_size: usize) -> Self {
341 Self {
342 map: HashMap::new(),
343 nodes: HashMap::new(),
344 head: None,
345 tail: None,
346 next_id: 0,
347 max_size,
348 hits: 0,
349 misses: 0,
350 access_counter: 0,
351 }
352 }
353
354 fn get(&mut self, inode_num: u32) -> Option<Ext2Inode> {
356 if let Some(&node_id) = self.map.get(&inode_num) {
357 self.hits += 1;
358 self.move_to_head(node_id);
360 self.nodes.get(&node_id).map(|node| node.inode.clone())
362 } else {
363 self.misses += 1;
364 None
365 }
366 }
367
368 fn insert(&mut self, inode_num: u32, inode: Ext2Inode) {
370 self.access_counter += 1;
371
372 if let Some(&node_id) = self.map.get(&inode_num) {
374 if let Some(node) = self.nodes.get_mut(&node_id) {
375 node.inode = inode;
376 node.access_count = self.access_counter;
377 }
378 self.move_to_head(node_id);
379 return;
380 }
381
382 if self.nodes.len() >= self.max_size {
384 self.remove_tail();
385 }
386
387 let new_node_id = self.next_id;
389 self.next_id = self.next_id.wrapping_add(1);
390
391 let new_node = InodeLruNode {
392 inode_num,
393 inode,
394 access_count: self.access_counter,
395 prev: None,
396 next: self.head,
397 };
398
399 self.nodes.insert(new_node_id, new_node);
400 self.map.insert(inode_num, new_node_id);
401
402 if let Some(old_head) = self.head {
404 if let Some(old_head_node) = self.nodes.get_mut(&old_head) {
405 old_head_node.prev = Some(new_node_id);
406 }
407 }
408
409 self.head = Some(new_node_id);
411 if self.tail.is_none() {
412 self.tail = Some(new_node_id);
413 }
414 }
415
416 fn remove(&mut self, inode_num: u32) {
418 if let Some(&node_id) = self.map.get(&inode_num) {
419 self.remove_node(node_id);
420 self.map.remove(&inode_num);
421 }
422 }
423
424 fn move_to_head(&mut self, node_id: NodeId) {
426 if self.head == Some(node_id) {
428 return;
429 }
430
431 self.remove_node_from_list(node_id);
433
434 if let Some(node) = self.nodes.get_mut(&node_id) {
436 node.prev = None;
437 node.next = self.head;
438 }
439
440 if let Some(old_head) = self.head {
442 if let Some(old_head_node) = self.nodes.get_mut(&old_head) {
443 old_head_node.prev = Some(node_id);
444 }
445 }
446
447 self.head = Some(node_id);
448
449 if self.tail.is_none() {
451 self.tail = Some(node_id);
452 }
453 }
454
455 fn remove_tail(&mut self) {
457 if let Some(tail_id) = self.tail {
458 if let Some(tail_node) = self.nodes.get(&tail_id) {
459 let inode_num = tail_node.inode_num;
460 self.map.remove(&inode_num);
461 }
462 self.remove_node(tail_id);
463 }
464 }
465
466 fn remove_node(&mut self, node_id: NodeId) {
468 self.remove_node_from_list(node_id);
469 self.nodes.remove(&node_id);
470 }
471
472 fn remove_node_from_list(&mut self, node_id: NodeId) {
474 if let Some(node) = self.nodes.get(&node_id) {
475 let prev_id = node.prev;
476 let next_id = node.next;
477
478 if let Some(prev_id) = prev_id {
480 if let Some(prev_node) = self.nodes.get_mut(&prev_id) {
481 prev_node.next = next_id;
482 }
483 } else {
484 self.head = next_id;
486 }
487
488 if let Some(next_id) = next_id {
490 if let Some(next_node) = self.nodes.get_mut(&next_id) {
491 next_node.prev = prev_id;
492 }
493 } else {
494 self.tail = prev_id;
496 }
497 }
498 }
499
500 fn len(&self) -> usize {
501 self.nodes.len()
502 }
503
504 fn get_stats(&self) -> (u64, u64, usize) {
506 (self.hits, self.misses, self.nodes.len())
507 }
508
509 fn print_stats(&self, cache_name: &str) {
511 let total = self.hits + self.misses;
512 let hit_rate = if total > 0 {
513 (self.hits * 100) / total
514 } else {
515 0
516 };
517 crate::early_println!(
518 "[ext2] {} Cache Stats: hits={}, misses={}, size={}, hit_rate={}%",
519 cache_name,
520 self.hits,
521 self.misses,
522 self.nodes.len(),
523 hit_rate
524 );
525 }
526}
527
528type NodeId = u32;
530
531#[derive(Debug)]
533struct LruNode {
534 block_num: u64,
535 data: Vec<u8>,
536 prev: Option<NodeId>,
537 next: Option<NodeId>,
538}
539
540struct BlockLruCache {
543 map: HashMap<u64, NodeId>,
545 nodes: HashMap<NodeId, LruNode>,
547 head: Option<NodeId>,
549 tail: Option<NodeId>,
551 next_id: NodeId,
553 max_size: usize,
555 hits: u64,
557 misses: u64,
558}
559
560impl BlockLruCache {
561 fn new(max_size: usize) -> Self {
562 Self {
563 map: HashMap::new(),
564 nodes: HashMap::new(),
565 head: None,
566 tail: None,
567 next_id: 0,
568 max_size,
569 hits: 0,
570 misses: 0,
571 }
572 }
573
574 fn get(&mut self, block_num: u64) -> Option<Vec<u8>> {
575 if let Some(&node_id) = self.map.get(&block_num) {
576 self.hits += 1;
577 self.move_to_head(node_id);
579 self.nodes.get(&node_id).map(|node| node.data.clone())
581 } else {
582 self.misses += 1;
583 None
584 }
585 }
586
587 fn move_to_head(&mut self, node_id: NodeId) {
589 if Some(node_id) == self.head {
590 return; }
592
593 self.remove_from_list(node_id);
595
596 self.add_to_head(node_id);
598 }
599
600 fn remove_from_list(&mut self, node_id: NodeId) {
602 if let Some(node) = self.nodes.get(&node_id) {
603 let prev = node.prev;
604 let next = node.next;
605
606 if let Some(prev_id) = prev {
608 if let Some(prev_node) = self.nodes.get_mut(&prev_id) {
609 prev_node.next = next;
610 }
611 } else {
612 self.head = next;
614 }
615
616 if let Some(next_id) = next {
618 if let Some(next_node) = self.nodes.get_mut(&next_id) {
619 next_node.prev = prev;
620 }
621 } else {
622 self.tail = prev;
624 }
625 }
626 }
627
628 fn add_to_head(&mut self, node_id: NodeId) {
630 if let Some(node) = self.nodes.get_mut(&node_id) {
631 node.prev = None;
632 node.next = self.head;
633 }
634
635 if let Some(old_head) = self.head {
636 if let Some(old_head_node) = self.nodes.get_mut(&old_head) {
637 old_head_node.prev = Some(node_id);
638 }
639 } else {
640 self.tail = Some(node_id);
642 }
643
644 self.head = Some(node_id);
645 }
646
647 fn insert(&mut self, block_num: u64, block_data: Vec<u8>) {
648 if let Some(&existing_id) = self.map.get(&block_num) {
650 if let Some(existing_node) = self.nodes.get_mut(&existing_id) {
651 existing_node.data = block_data;
652 }
653 self.move_to_head(existing_id);
654 return;
655 }
656
657 if self.nodes.len() >= self.max_size {
659 if let Some(tail_id) = self.tail {
660 if let Some(tail_node) = self.nodes.get(&tail_id) {
661 let tail_block_num = tail_node.block_num;
662 self.map.remove(&tail_block_num);
663 }
664 self.remove_from_list(tail_id);
665 self.nodes.remove(&tail_id);
666 }
667 }
668
669 let node_id = self.next_id;
671 self.next_id += 1;
672
673 let new_node = LruNode {
674 block_num,
675 data: block_data,
676 prev: None,
677 next: None,
678 };
679
680 self.nodes.insert(node_id, new_node);
682 self.map.insert(block_num, node_id);
683
684 self.add_to_head(node_id);
686 }
687
688 fn remove(&mut self, block_num: u64) {
689 if let Some(&node_id) = self.map.get(&block_num) {
690 self.map.remove(&block_num);
691 self.remove_from_list(node_id);
692 self.nodes.remove(&node_id);
693 }
694 }
695
696 fn len(&self) -> usize {
697 self.nodes.len()
698 }
699
700 fn get_stats(&self) -> (u64, u64, usize) {
702 (self.hits, self.misses, self.nodes.len())
703 }
704
705 fn print_stats(&self, cache_name: &str) {
707 let total = self.hits + self.misses;
708 let hit_rate = if total > 0 {
709 (self.hits * 100) / total
710 } else {
711 0
712 };
713 crate::early_println!(
714 "[ext2] {} Cache Stats: hits={}, misses={}, size={}, hit_rate={}%",
715 cache_name,
716 self.hits,
717 self.misses,
718 self.nodes.len(),
719 hit_rate
720 );
721 }
722}
723
724impl Ext2FileSystem {
725 pub fn new(block_device: Arc<dyn BlockDevice>) -> Result<Arc<Self>, FileSystemError> {
727 let request = Box::new(crate::device::block::request::BlockIORequest {
729 request_type: crate::device::block::request::BlockIORequestType::Read,
730 sector: 2, sector_count: 2, head: 0,
733 cylinder: 0,
734 buffer: vec![0u8; 1024],
735 });
736
737 block_device.enqueue_request(request);
738 let results = block_device.process_requests();
739
740 let superblock_data = if let Some(result) = results.first() {
741 match &result.result {
742 Ok(_) => result.request.buffer.clone(),
743 Err(_) => {
744 return Err(FileSystemError::new(
745 FileSystemErrorKind::IoError,
746 "Failed to read ext2 superblock",
747 ));
748 }
749 }
750 } else {
751 return Err(FileSystemError::new(
752 FileSystemErrorKind::IoError,
753 "No result from block device read",
754 ));
755 };
756
757 let superblock = Ext2Superblock::from_bytes_boxed(&superblock_data)?;
759
760 let block_size = superblock.get_block_size();
761 let root_inode = EXT2_ROOT_INO;
762
763 let root = Ext2Node::new(
765 root_inode,
766 FileType::Directory,
767 1, );
769
770 let fs = Arc::new(Self {
771 fs_id: FileSystemId::new(),
772 block_device,
773 superblock,
774 block_size,
775 root_inode,
776 root: RwLock::new(Arc::new(root)),
777 name: "ext2".to_string(),
778 next_file_id: Mutex::new(2), inode_cache: Mutex::new(InodeLruCache::new(8192)),
780 block_cache: Mutex::new(BlockLruCache::new(8192)),
781 });
782
783 let fs_weak = Arc::downgrade(&(fs.clone() as Arc<dyn FileSystemOperations>));
785 fs.root.read().set_filesystem(fs_weak);
786
787 Ok(fs)
788 }
789
790 pub fn new_from_device_id(device_id: usize) -> Result<Arc<Self>, FileSystemError> {
792 let device = DeviceManager::get_manager()
794 .get_device(device_id)
795 .ok_or_else(|| {
796 FileSystemError::new(
797 FileSystemErrorKind::DeviceError,
798 format!("Device with ID {} not found", device_id),
799 )
800 })?;
801
802 let block_device = device.into_block_device().ok_or_else(|| {
804 FileSystemError::new(
805 FileSystemErrorKind::DeviceError,
806 format!("Device with ID {} is not a block device", device_id),
807 )
808 })?;
809
810 Self::new(block_device)
812 }
813
814 pub fn new_from_params(params: &Ext2Params) -> Result<Arc<Self>, FileSystemError> {
816 if let Some(device_id) = params.get_device_id() {
817 Self::new_from_device_id(device_id)
818 } else {
819 Err(FileSystemError::new(
820 FileSystemErrorKind::InvalidData,
821 "Device ID not resolved in parameters",
822 ))
823 }
824 }
825
826 pub fn read_inode(&self, inode_num: u32) -> Result<Ext2Inode, FileSystemError> {
828 profile_scope!("ext2::read_inode");
829 {
831 let mut cache = self.inode_cache.lock();
832 if let Some(inode) = cache.get(inode_num) {
833 return Ok(inode);
834 }
835 }
836
837 let group = (inode_num - 1) / self.superblock.inodes_per_group;
839 let local_inode = (inode_num - 1) % self.superblock.inodes_per_group;
840
841 let bgd_table_start_block = if self.block_size == 1024 { 2 } else { 1 };
846 let bgd_block = bgd_table_start_block
847 + (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) / self.block_size;
848 let bgd_block_sector = self.block_to_sector(bgd_block as u64);
849
850 let request = Box::new(crate::device::block::request::BlockIORequest {
851 request_type: crate::device::block::request::BlockIORequestType::Read,
852 sector: bgd_block_sector,
853 sector_count: (self.block_size / 512) as usize,
854 head: 0,
855 cylinder: 0,
856 buffer: vec![0u8; self.block_size as usize],
857 });
858
859 self.block_device.enqueue_request(request);
860 let results = self.block_device.process_requests();
861
862 let bgd_data = if let Some(result) = results.first() {
863 match &result.result {
864 Ok(_) => &result.request.buffer,
865 Err(_) => {
866 return Err(FileSystemError::new(
867 FileSystemErrorKind::IoError,
868 "Failed to read block group descriptor",
869 ));
870 }
871 }
872 } else {
873 return Err(FileSystemError::new(
874 FileSystemErrorKind::IoError,
875 "No result from block device read",
876 ));
877 };
878
879 let bgd_offset =
880 (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
881 let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset as usize..])?;
882
883 let inode_size = self.superblock.get_inode_size() as u32;
885 let inode_block = bgd.inode_table + (local_inode * inode_size) / self.block_size;
886 let inode_offset = (local_inode * inode_size) % self.block_size;
887
888 #[cfg(test)]
889 crate::early_println!(
890 "[ext2] read_inode: Reading inode {} from block {}, offset {}, inode_size={}",
891 inode_num,
892 inode_block,
893 inode_offset,
894 inode_size
895 );
896
897 let inode_sector = self.block_to_sector(inode_block as u64);
899 let request = Box::new(crate::device::block::request::BlockIORequest {
900 request_type: crate::device::block::request::BlockIORequestType::Read,
901 sector: inode_sector,
902 sector_count: (self.block_size / 512) as usize,
903 head: 0,
904 cylinder: 0,
905 buffer: vec![0u8; self.block_size as usize],
906 });
907
908 self.block_device.enqueue_request(request);
909 let results = self.block_device.process_requests();
910
911 let inode_data = if let Some(result) = results.first() {
912 match &result.result {
913 Ok(_) => &result.request.buffer,
914 Err(_) => {
915 return Err(FileSystemError::new(
916 FileSystemErrorKind::IoError,
917 "Failed to read inode",
918 ));
919 }
920 }
921 } else {
922 return Err(FileSystemError::new(
923 FileSystemErrorKind::IoError,
924 "No result from block device read",
925 ));
926 };
927
928 let inode = Ext2Inode::from_bytes(&inode_data[inode_offset as usize..])?;
929
930 {
932 let mut cache = self.inode_cache.lock();
933 cache.insert(inode_num, inode);
934 }
935
936 #[cfg(test)]
937 unsafe {
938 static mut INODE_CALL_COUNT: u64 = 0;
939 INODE_CALL_COUNT += 1;
940 if INODE_CALL_COUNT % 50 == 0 {
942 let cache = self.inode_cache.lock();
943 cache.print_stats("Inode");
944 }
945 }
946
947 Ok(inode)
948 }
949
950 pub fn read_directory_entries(
952 &self,
953 inode: &Ext2Inode,
954 ) -> Result<Vec<Ext2DirectoryEntry>, FileSystemError> {
955 profile_scope!("ext2::read_directory_entries");
956
957 let mut entries = Vec::new();
958 let num_blocks = (inode.size as u64 + self.block_size as u64 - 1) / self.block_size as u64;
959
960 if num_blocks == 0 {
961 return Ok(entries);
962 }
963
964 let block_nums = self.get_inode_blocks(inode, 0, num_blocks)?;
966
967 let mut valid_blocks = Vec::new();
969 for &block_num in &block_nums {
970 if block_num > 0 {
971 valid_blocks.push(block_num);
972 }
973 }
974
975 if valid_blocks.is_empty() {
976 return Ok(entries);
977 }
978
979 let blocks_data = self.read_blocks_cached(&valid_blocks)?;
981
982 for block_data in blocks_data {
984 let mut offset = 0;
985 while offset < self.block_size as usize {
986 if offset + 8 > self.block_size as usize {
987 break;
988 }
989
990 let entry = Ext2DirectoryEntry::from_bytes(&block_data[offset..])?;
991 if entry.entry.inode == 0 {
992 let rec_len = entry.entry.rec_len;
995 if rec_len == 0 {
996 break;
997 }
998 offset += rec_len as usize;
999 continue;
1000 }
1001
1002 let rec_len = entry.entry.rec_len;
1003 entries.push(entry);
1004 offset += rec_len as usize;
1005
1006 if rec_len == 0 {
1007 break;
1008 }
1009 }
1010 }
1011
1012 Ok(entries)
1013 }
1014
1015 fn get_inode_block(
1017 &self,
1018 inode: &Ext2Inode,
1019 logical_block: u64,
1020 ) -> Result<u64, FileSystemError> {
1021 profile_scope!("ext2::get_inode_block");
1022 let blocks_per_indirect = self.block_size / 4; if logical_block < 12 {
1025 Ok(inode.block[logical_block as usize] as u64)
1027 } else if logical_block < 12 + blocks_per_indirect as u64 {
1028 let indirect_block = inode.block[12] as u64;
1030 if indirect_block == 0 {
1031 return Ok(0);
1032 }
1033
1034 let index = logical_block - 12;
1035 let indirect_data = self.read_block_cached(indirect_block)?;
1036
1037 let block_ptr = u32::from_le_bytes([
1038 indirect_data[index as usize * 4],
1039 indirect_data[index as usize * 4 + 1],
1040 indirect_data[index as usize * 4 + 2],
1041 indirect_data[index as usize * 4 + 3],
1042 ]);
1043
1044 Ok(block_ptr as u64)
1045 } else if logical_block
1046 < 12 + blocks_per_indirect as u64
1047 + blocks_per_indirect as u64 * blocks_per_indirect as u64
1048 {
1049 let double_indirect_block = inode.block[13] as u64;
1051 if double_indirect_block == 0 {
1052 return Ok(0);
1053 }
1054
1055 let offset_in_double = logical_block - 12 - blocks_per_indirect as u64;
1056 let first_indirect_index = offset_in_double / blocks_per_indirect as u64;
1057 let second_indirect_index = offset_in_double % blocks_per_indirect as u64;
1058
1059 let double_indirect_data = self.read_block_cached(double_indirect_block)?;
1061
1062 let first_indirect_ptr = u32::from_le_bytes([
1064 double_indirect_data[first_indirect_index as usize * 4],
1065 double_indirect_data[first_indirect_index as usize * 4 + 1],
1066 double_indirect_data[first_indirect_index as usize * 4 + 2],
1067 double_indirect_data[first_indirect_index as usize * 4 + 3],
1068 ]);
1069
1070 if first_indirect_ptr == 0 {
1071 return Ok(0);
1072 }
1073
1074 let first_indirect_data = self.read_block_cached(first_indirect_ptr as u64)?;
1076
1077 let block_ptr = u32::from_le_bytes([
1079 first_indirect_data[second_indirect_index as usize * 4],
1080 first_indirect_data[second_indirect_index as usize * 4 + 1],
1081 first_indirect_data[second_indirect_index as usize * 4 + 2],
1082 first_indirect_data[second_indirect_index as usize * 4 + 3],
1083 ]);
1084
1085 Ok(block_ptr as u64)
1086 } else {
1087 Err(FileSystemError::new(
1089 FileSystemErrorKind::NotSupported,
1090 "Triple indirect blocks not yet supported",
1091 ))
1092 }
1093 }
1094
1095 fn get_inode_blocks(
1099 &self,
1100 inode: &Ext2Inode,
1101 start_logical_block: u64,
1102 count: u64,
1103 ) -> Result<Vec<u64>, FileSystemError> {
1104 profile_scope!("ext2::get_inode_blocks");
1105 if count == 0 {
1106 return Ok(Vec::new());
1107 }
1108
1109 let blocks_per_indirect = self.block_size / 4; let mut result = Vec::with_capacity(count as usize);
1111
1112 let mut current_block = start_logical_block;
1113 let end_block = start_logical_block + count;
1114
1115 while current_block < end_block {
1118 if current_block < 12 {
1119 let direct_end = (end_block).min(12);
1121 for i in current_block..direct_end {
1122 result.push(inode.block[i as usize] as u64);
1123 }
1124 current_block = direct_end;
1125 } else if current_block < 12 + blocks_per_indirect as u64 {
1126 let indirect_block = inode.block[12] as u64;
1128 if indirect_block == 0 {
1129 let indirect_end = (end_block).min(12 + blocks_per_indirect as u64);
1131 for _ in current_block..indirect_end {
1132 result.push(0);
1133 }
1134 current_block = indirect_end;
1135 } else {
1136 let indirect_data = self.read_block_cached(indirect_block)?;
1137 let indirect_end = (end_block).min(12 + blocks_per_indirect as u64);
1138
1139 for logical_block in current_block..indirect_end {
1140 let index = logical_block - 12;
1141 let block_ptr = u32::from_le_bytes([
1142 indirect_data[index as usize * 4],
1143 indirect_data[index as usize * 4 + 1],
1144 indirect_data[index as usize * 4 + 2],
1145 indirect_data[index as usize * 4 + 3],
1146 ]);
1147 result.push(block_ptr as u64);
1148 }
1149 current_block = indirect_end;
1150 }
1151 } else if current_block
1152 < 12 + blocks_per_indirect as u64
1153 + blocks_per_indirect as u64 * blocks_per_indirect as u64
1154 {
1155 let double_indirect_block = inode.block[13] as u64;
1157 if double_indirect_block == 0 {
1158 let double_indirect_end = (end_block).min(
1160 12 + blocks_per_indirect as u64
1161 + blocks_per_indirect as u64 * blocks_per_indirect as u64,
1162 );
1163 for _ in current_block..double_indirect_end {
1164 result.push(0);
1165 }
1166 current_block = double_indirect_end;
1167 } else {
1168 let double_indirect_data = self.read_block_cached(double_indirect_block)?;
1170 let double_indirect_end = (end_block).min(
1171 12 + blocks_per_indirect as u64
1172 + blocks_per_indirect as u64 * blocks_per_indirect as u64,
1173 );
1174
1175 let first_single_indirect_start = 12 + blocks_per_indirect as u64;
1176 let first_single_indirect_index = ((current_block
1177 - first_single_indirect_start)
1178 / blocks_per_indirect as u64)
1179 as usize;
1180 let offset_in_first_single_indirect = ((current_block
1181 - first_single_indirect_start)
1182 % blocks_per_indirect as u64)
1183 as usize;
1184
1185 let last_single_indirect_index =
1186 ((double_indirect_end - 1 - first_single_indirect_start)
1187 / blocks_per_indirect as u64) as usize;
1188
1189 for single_indirect_index in
1190 first_single_indirect_index..=last_single_indirect_index
1191 {
1192 let single_indirect_ptr = u32::from_le_bytes([
1193 double_indirect_data[single_indirect_index * 4],
1194 double_indirect_data[single_indirect_index * 4 + 1],
1195 double_indirect_data[single_indirect_index * 4 + 2],
1196 double_indirect_data[single_indirect_index * 4 + 3],
1197 ]) as u64;
1198
1199 if single_indirect_ptr == 0 {
1200 let blocks_in_this_indirect = if single_indirect_index
1202 == last_single_indirect_index
1203 {
1204 let offset_in_last_single_indirect =
1205 ((double_indirect_end - 1 - first_single_indirect_start)
1206 % blocks_per_indirect as u64)
1207 as usize
1208 + 1;
1209 if single_indirect_index == first_single_indirect_index {
1210 offset_in_last_single_indirect - offset_in_first_single_indirect
1211 } else {
1212 offset_in_last_single_indirect
1213 }
1214 } else if single_indirect_index == first_single_indirect_index {
1215 blocks_per_indirect as usize - offset_in_first_single_indirect
1216 } else {
1217 blocks_per_indirect as usize
1218 };
1219
1220 for _ in 0..blocks_in_this_indirect {
1221 result.push(0);
1222 }
1223 } else {
1224 let single_indirect_data =
1226 self.read_block_cached(single_indirect_ptr)?;
1227
1228 let start_offset =
1229 if single_indirect_index == first_single_indirect_index {
1230 offset_in_first_single_indirect
1231 } else {
1232 0
1233 };
1234 let end_offset = if single_indirect_index == last_single_indirect_index
1235 {
1236 ((double_indirect_end - 1 - first_single_indirect_start)
1237 % blocks_per_indirect as u64)
1238 as usize
1239 + 1
1240 } else {
1241 blocks_per_indirect as usize
1242 };
1243
1244 for offset in start_offset..end_offset {
1245 let block_ptr = u32::from_le_bytes([
1246 single_indirect_data[offset * 4],
1247 single_indirect_data[offset * 4 + 1],
1248 single_indirect_data[offset * 4 + 2],
1249 single_indirect_data[offset * 4 + 3],
1250 ]);
1251 result.push(block_ptr as u64);
1252 }
1253 }
1254 }
1255 current_block = double_indirect_end;
1256 }
1257 } else {
1258 return Err(FileSystemError::new(
1260 FileSystemErrorKind::NotSupported,
1261 "Triple indirect blocks not yet supported",
1262 ));
1263 }
1264 }
1265
1266 Ok(result)
1267 }
1268
1269 pub fn read_file_content(
1271 &self,
1272 inode_num: u32,
1273 size: usize,
1274 ) -> Result<Vec<u8>, FileSystemError> {
1275 profile_scope!("ext2::read_file_content");
1276 let inode = self.read_inode(inode_num)?;
1277 let mut content = Vec::with_capacity(size);
1278
1279 let num_blocks = (size as u64 + self.block_size as u64 - 1) / self.block_size as u64;
1280 if num_blocks == 0 {
1281 return Ok(content);
1282 }
1283
1284 let block_nums = self.get_inode_blocks(&inode, 0, num_blocks)?;
1286
1287 let mut block_nums_to_read = Vec::new();
1288 for &block_num in block_nums.iter() {
1289 if block_num > 0 {
1290 block_nums_to_read.push(block_num);
1291 } else {
1292 if !block_nums_to_read.is_empty() {
1294 let blocks_data = self.read_blocks_cached(&block_nums_to_read)?;
1295 for data in blocks_data {
1296 content.extend_from_slice(&data);
1297 }
1298 block_nums_to_read.clear();
1299 }
1300 let len_to_add = core::cmp::min(self.block_size as usize, size - content.len());
1302 content.extend(core::iter::repeat(0).take(len_to_add));
1303 }
1304 }
1305
1306 if !block_nums_to_read.is_empty() {
1307 let blocks_data = self.read_blocks_cached(&block_nums_to_read)?;
1308 for data in blocks_data {
1309 content.extend_from_slice(&data);
1310 }
1311 }
1312
1313 content.truncate(size);
1315 Ok(content)
1316 }
1317
1318 pub fn read_page_content(
1323 &self,
1324 inode_num: u32,
1325 page_index: u64,
1326 paddr: usize,
1327 ) -> Result<(), FileSystemError> {
1328 use crate::environment::PAGE_SIZE;
1329
1330 profile_scope!("ext2::read_page_content");
1331
1332 let inode = self.read_inode(inode_num)?;
1333 let file_size = inode.size as u64;
1334 let page_offset = page_index * PAGE_SIZE as u64;
1335
1336 unsafe {
1338 core::ptr::write_bytes(paddr as *mut u8, 0, PAGE_SIZE);
1339 }
1340
1341 if page_offset >= file_size {
1343 return Ok(());
1344 }
1345
1346 let bytes_in_page = if page_offset + PAGE_SIZE as u64 > file_size {
1348 (file_size - page_offset) as usize
1349 } else {
1350 PAGE_SIZE
1351 };
1352
1353 let start_block = page_offset / self.block_size as u64;
1355 let end_block = (page_offset + bytes_in_page as u64 + self.block_size as u64 - 1)
1356 / self.block_size as u64;
1357 let num_blocks = end_block - start_block;
1358
1359 if num_blocks == 0 {
1360 return Ok(());
1361 }
1362
1363 let block_nums = self.get_inode_blocks(&inode, start_block, num_blocks)?;
1365
1366 let mut page_ptr = paddr as *mut u8;
1367 let mut bytes_written = 0usize;
1368
1369 for (i, &block_num) in block_nums.iter().enumerate() {
1370 if block_num == 0 {
1371 let bytes_to_skip =
1373 core::cmp::min(self.block_size as usize, bytes_in_page - bytes_written);
1374 unsafe {
1375 page_ptr = page_ptr.add(bytes_to_skip);
1376 }
1377 bytes_written += bytes_to_skip;
1378 continue;
1379 }
1380
1381 let block_data = self.read_block_cached(block_num)?;
1383
1384 let block_offset = if i == 0 {
1386 (page_offset % self.block_size as u64) as usize
1387 } else {
1388 0
1389 };
1390
1391 let bytes_to_copy = core::cmp::min(
1393 self.block_size as usize - block_offset,
1394 bytes_in_page - bytes_written,
1395 );
1396
1397 unsafe {
1399 core::ptr::copy_nonoverlapping(
1400 block_data.as_ptr().add(block_offset),
1401 page_ptr,
1402 bytes_to_copy,
1403 );
1404 page_ptr = page_ptr.add(bytes_to_copy);
1405 }
1406
1407 bytes_written += bytes_to_copy;
1408
1409 if bytes_written >= bytes_in_page {
1410 break;
1411 }
1412 }
1413
1414 Ok(())
1415 }
1416
1417 pub fn write_page_content(
1421 &self,
1422 inode_num: u32,
1423 page_index: u64,
1424 paddr: usize,
1425 ) -> Result<(), FileSystemError> {
1426 profile_scope!("ext2::write_page_content");
1427
1428 let _ = (inode_num, page_index, paddr);
1430 Err(FileSystemError::new(
1431 FileSystemErrorKind::NotSupported,
1432 "Page writeback not yet implemented",
1433 ))
1434 }
1435
1436 fn write_inode(&self, inode_number: u32, inode: &Ext2Inode) -> Result<(), FileSystemError> {
1438 profile_scope!("ext2::write_inode");
1439 let inodes_per_group = self.superblock.inodes_per_group;
1441 let group_number = (inode_number - 1) / inodes_per_group;
1442 let inode_index = (inode_number - 1) % inodes_per_group;
1443
1444 let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
1446 let bgd_sector = self.block_to_sector(bgd_block);
1447 let bgd_request = Box::new(crate::device::block::request::BlockIORequest {
1448 request_type: crate::device::block::request::BlockIORequestType::Read,
1449 sector: bgd_sector as usize,
1450 sector_count: (self.block_size / 512) as usize,
1451 head: 0,
1452 cylinder: 0,
1453 buffer: vec![0u8; self.block_size as usize],
1454 });
1455
1456 self.block_device.enqueue_request(bgd_request);
1457 let bgd_results = self.block_device.process_requests();
1458
1459 let bgd_data = if let Some(result) = bgd_results.first() {
1460 match &result.result {
1461 Ok(_) => result.request.buffer.clone(),
1462 Err(_) => {
1463 return Err(FileSystemError::new(
1464 FileSystemErrorKind::IoError,
1465 "Failed to read block group descriptors",
1466 ));
1467 }
1468 }
1469 } else {
1470 return Err(FileSystemError::new(
1471 FileSystemErrorKind::IoError,
1472 "No result from BGD read",
1473 ));
1474 };
1475
1476 let bgd_offset = (group_number as usize) * 32; if bgd_offset + 32 > bgd_data.len() {
1479 return Err(FileSystemError::new(
1480 FileSystemErrorKind::InvalidData,
1481 "Block group descriptor offset out of bounds",
1482 ));
1483 }
1484
1485 let inode_table_block = u32::from_le_bytes([
1487 bgd_data[bgd_offset + 8],
1488 bgd_data[bgd_offset + 9],
1489 bgd_data[bgd_offset + 10],
1490 bgd_data[bgd_offset + 11],
1491 ]);
1492
1493 let inode_size = self.superblock.get_inode_size() as u32;
1495 let inodes_per_block = self.block_size / inode_size;
1496 let block_offset = inode_index / inodes_per_block;
1497 let inode_offset_in_block = (inode_index % inodes_per_block) * inode_size;
1498
1499 let target_block = inode_table_block + block_offset;
1500 let target_sector = self.block_to_sector(target_block as u64);
1501
1502 let read_request = Box::new(crate::device::block::request::BlockIORequest {
1504 request_type: crate::device::block::request::BlockIORequestType::Read,
1505 sector: target_sector as usize,
1506 sector_count: (self.block_size / 512) as usize,
1507 head: 0,
1508 cylinder: 0,
1509 buffer: vec![0u8; self.block_size as usize],
1510 });
1511
1512 self.block_device.enqueue_request(read_request);
1513 let read_results = self.block_device.process_requests();
1514
1515 let mut block_data = if let Some(result) = read_results.first() {
1516 match &result.result {
1517 Ok(_) => result.request.buffer.clone(),
1518 Err(_) => {
1519 return Err(FileSystemError::new(
1520 FileSystemErrorKind::IoError,
1521 "Failed to read inode table block",
1522 ));
1523 }
1524 }
1525 } else {
1526 return Err(FileSystemError::new(
1527 FileSystemErrorKind::IoError,
1528 "No result from inode table block read",
1529 ));
1530 };
1531
1532 let inode_bytes = unsafe {
1534 core::slice::from_raw_parts(
1535 inode as *const Ext2Inode as *const u8,
1536 core::mem::size_of::<Ext2Inode>(),
1537 )
1538 };
1539
1540 let start_offset = inode_offset_in_block as usize;
1541 let end_offset = start_offset + inode_bytes.len();
1542
1543 if end_offset > block_data.len() {
1544 return Err(FileSystemError::new(
1545 FileSystemErrorKind::InvalidData,
1546 "Inode data would exceed block boundary",
1547 ));
1548 }
1549
1550 block_data[start_offset..end_offset].copy_from_slice(inode_bytes);
1551
1552 let write_request = Box::new(crate::device::block::request::BlockIORequest {
1554 request_type: crate::device::block::request::BlockIORequestType::Write,
1555 sector: target_sector as usize,
1556 sector_count: (self.block_size / 512) as usize,
1557 head: 0,
1558 cylinder: 0,
1559 buffer: block_data,
1560 });
1561
1562 self.block_device.enqueue_request(write_request);
1563 let write_results = self.block_device.process_requests();
1564
1565 if let Some(result) = write_results.first() {
1566 match &result.result {
1567 Ok(_) => {
1568 let mut cache = self.inode_cache.lock();
1570 cache.insert(inode_number, inode.clone());
1571 Ok(())
1572 }
1573 Err(_) => Err(FileSystemError::new(
1574 FileSystemErrorKind::IoError,
1575 "Failed to write inode to disk",
1576 )),
1577 }
1578 } else {
1579 Err(FileSystemError::new(
1580 FileSystemErrorKind::IoError,
1581 "No result from inode write",
1582 ))
1583 }
1584 }
1585
1586 fn initialize_directory(
1588 &self,
1589 dir_inode_number: u32,
1590 parent_inode_number: u32,
1591 ) -> Result<(), FileSystemError> {
1592 profile_scope!("ext2::initialize_directory");
1593
1594 let block_number = self.allocate_block()?;
1596
1597 let block_size = self.block_size as usize;
1599 let mut block_data = vec![0u8; block_size];
1600
1601 let dot_entry_size = 12; let dot_inode = dir_inode_number.to_le_bytes();
1604 let dot_rec_len = dot_entry_size as u16;
1605 let dot_name_len = 1u8;
1606 let dot_file_type = 2u8; block_data[0..4].copy_from_slice(&dot_inode);
1609 block_data[4..6].copy_from_slice(&dot_rec_len.to_le_bytes());
1610 block_data[6] = dot_name_len;
1611 block_data[7] = dot_file_type;
1612 block_data[8] = b'.';
1613
1614 let dotdot_offset = dot_entry_size;
1616 let dotdot_rec_len = (block_size - dotdot_offset) as u16;
1617 let dotdot_name_len = 2u8;
1618 let dotdot_file_type = 2u8; let dotdot_inode = parent_inode_number.to_le_bytes();
1620
1621 block_data[dotdot_offset..dotdot_offset + 4].copy_from_slice(&dotdot_inode);
1622 block_data[dotdot_offset + 4..dotdot_offset + 6]
1623 .copy_from_slice(&dotdot_rec_len.to_le_bytes());
1624 block_data[dotdot_offset + 6] = dotdot_name_len;
1625 block_data[dotdot_offset + 7] = dotdot_file_type;
1626 block_data[dotdot_offset + 8] = b'.';
1627 block_data[dotdot_offset + 9] = b'.';
1628
1629 let block_sector = self.block_to_sector(block_number as u64);
1631 let request = Box::new(crate::device::block::request::BlockIORequest {
1632 request_type: crate::device::block::request::BlockIORequestType::Write,
1633 sector: block_sector as usize,
1634 sector_count: (self.block_size / 512) as usize,
1635 head: 0,
1636 cylinder: 0,
1637 buffer: block_data,
1638 });
1639
1640 self.block_device.enqueue_request(request);
1642 let results = self.block_device.process_requests();
1643
1644 if results.is_empty() || results[0].result.is_err() {
1645 return Err(FileSystemError::new(
1646 FileSystemErrorKind::InvalidData,
1647 "Failed to write directory block",
1648 ));
1649 }
1650
1651 let mut dir_inode = self.read_inode(dir_inode_number)?;
1653 dir_inode.block[0] = block_number as u32;
1654 dir_inode.size = block_size as u32;
1655 dir_inode.blocks = (self.block_size / 512).to_le(); self.write_inode(dir_inode_number, &dir_inode)?;
1658
1659 Ok(())
1660 }
1661
1662 fn allocate_block(&self) -> Result<u64, FileSystemError> {
1664 profile_scope!("ext2::allocate_block");
1665
1666 let total_groups = (self.superblock.blocks_count + self.superblock.blocks_per_group - 1)
1668 / self.superblock.blocks_per_group;
1669
1670 for group in 0..total_groups {
1671 match self.allocate_block_in_group(group) {
1672 Ok(block_num) => return Ok(block_num),
1673 Err(FileSystemError {
1674 kind: FileSystemErrorKind::NoSpace,
1675 ..
1676 }) => {
1677 continue;
1679 }
1680 Err(e) => return Err(e),
1681 }
1682 }
1683
1684 Err(FileSystemError::new(
1685 FileSystemErrorKind::NoSpace,
1686 "No free blocks available in any group",
1687 ))
1688 }
1689
1690 fn allocate_block_in_group(&self, group: u32) -> Result<u64, FileSystemError> {
1692 profile_scope!("ext2::allocate_block_in_group");
1693
1694 #[cfg(test)]
1695 crate::early_println!(
1696 "[ext2] allocate_block_in_group: Starting OPTIMIZED allocation for group {}",
1697 group
1698 );
1699
1700 let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
1702 let bgd_sector = self.block_to_sector(bgd_block);
1703
1704 let request = Box::new(crate::device::block::request::BlockIORequest {
1705 request_type: crate::device::block::request::BlockIORequestType::Read,
1706 sector: bgd_sector as usize,
1707 sector_count: (self.block_size / 512) as usize,
1708 head: 0,
1709 cylinder: 0,
1710 buffer: vec![0u8; self.block_size as usize],
1711 });
1712
1713 self.block_device.enqueue_request(request);
1714 let results = self.block_device.process_requests();
1715
1716 let bgd_data = if let Some(result) = results.first() {
1717 match &result.result {
1718 Ok(_) => result.request.buffer.clone(),
1719 Err(_) => {
1720 return Err(FileSystemError::new(
1721 FileSystemErrorKind::IoError,
1722 "Failed to read block group descriptor",
1723 ));
1724 }
1725 }
1726 } else {
1727 return Err(FileSystemError::new(
1728 FileSystemErrorKind::IoError,
1729 "No result from block device read",
1730 ));
1731 };
1732
1733 let bgd_offset = (group * core::mem::size_of::<Ext2BlockGroupDescriptor>() as u32
1734 % self.block_size) as usize;
1735 let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset..])?;
1736
1737 if bgd.free_blocks_count == 0 {
1739 return Err(FileSystemError::new(
1740 FileSystemErrorKind::NoSpace,
1741 &format!("No free blocks in group {}", group),
1742 ));
1743 }
1744
1745 let bitmap_sector = self.block_to_sector(bgd.block_bitmap as u64);
1747 let request = Box::new(crate::device::block::request::BlockIORequest {
1748 request_type: crate::device::block::request::BlockIORequestType::Read,
1749 sector: bitmap_sector as usize,
1750 sector_count: (self.block_size / 512) as usize,
1751 head: 0,
1752 cylinder: 0,
1753 buffer: vec![0u8; self.block_size as usize],
1754 });
1755
1756 self.block_device.enqueue_request(request);
1757 let results = self.block_device.process_requests();
1758
1759 let mut bitmap_data = if let Some(result) = results.first() {
1760 match &result.result {
1761 Ok(_) => result.request.buffer.clone(),
1762 Err(_) => {
1763 return Err(FileSystemError::new(
1764 FileSystemErrorKind::IoError,
1765 "Failed to read block bitmap",
1766 ));
1767 }
1768 }
1769 } else {
1770 return Err(FileSystemError::new(
1771 FileSystemErrorKind::IoError,
1772 "No result from block device read",
1773 ));
1774 };
1775
1776 let group_start_block = group * self.superblock.blocks_per_group;
1778 let data_start_block = if group == 0 {
1779 810.max(group_start_block)
1780 } else {
1781 let blocks_for_metadata = 3
1782 + (self.superblock.inodes_per_group * 128 + self.block_size - 1) / self.block_size;
1783 group_start_block + blocks_for_metadata
1784 };
1785
1786 let group_end_block = (group + 1) * self.superblock.blocks_per_group;
1787 let search_end = core::cmp::min(group_end_block, self.superblock.blocks_count as u32);
1788
1789 for block_num in data_start_block..search_end {
1790 let bit = block_num - group_start_block;
1791 let byte_index = (bit / 8) as usize;
1792 let bit_index = bit % 8;
1793
1794 if byte_index >= bitmap_data.len() {
1795 break;
1796 }
1797
1798 if (bitmap_data[byte_index] & (1 << bit_index)) == 0 {
1800 bitmap_data[byte_index] |= 1 << bit_index;
1802
1803 #[cfg(test)]
1804 crate::early_println!(
1805 "[ext2] allocate_block_in_group: Found free block {}, batching metadata updates",
1806 block_num
1807 );
1808
1809 let bitmap_write = Box::new(crate::device::block::request::BlockIORequest {
1811 request_type: crate::device::block::request::BlockIORequestType::Write,
1812 sector: bitmap_sector as usize,
1813 sector_count: (self.block_size / 512) as usize,
1814 head: 0,
1815 cylinder: 0,
1816 buffer: bitmap_data,
1817 });
1818 self.block_device.enqueue_request(bitmap_write);
1819
1820 let mut updated_bgd_data = bgd_data.clone();
1822 let mut bgd_update =
1823 Ext2BlockGroupDescriptor::from_bytes(&updated_bgd_data[bgd_offset..])?;
1824 let current_free_blocks = u16::from_le(bgd_update.free_blocks_count);
1825 bgd_update.free_blocks_count = (current_free_blocks.saturating_sub(1)).to_le();
1826 bgd_update.write_to_bytes(&mut updated_bgd_data[bgd_offset..]);
1827
1828 let bgd_write = Box::new(crate::device::block::request::BlockIORequest {
1830 request_type: crate::device::block::request::BlockIORequestType::Write,
1831 sector: bgd_sector as usize,
1832 sector_count: (self.block_size / 512) as usize,
1833 head: 0,
1834 cylinder: 0,
1835 buffer: updated_bgd_data,
1836 });
1837 self.block_device.enqueue_request(bgd_write);
1838
1839 #[cfg(test)]
1841 crate::early_println!(
1842 "[ext2] allocate_block_in_group: Processing 2 writes in batch (bitmap + BGD)"
1843 );
1844 let write_results = self.block_device.process_requests();
1845
1846 if write_results.len() != 2 || write_results.iter().any(|r| r.result.is_err()) {
1847 return Err(FileSystemError::new(
1848 FileSystemErrorKind::IoError,
1849 "Failed to write bitmap or BGD",
1850 ));
1851 }
1852
1853 self.update_superblock_counts(-1, 0, 0)?;
1855
1856 #[cfg(test)]
1857 crate::early_println!(
1858 "[ext2] allocate_block_in_group: Successfully allocated block {} (OPTIMIZED: reduced I/O ops)",
1859 block_num
1860 );
1861 return Ok(block_num as u64);
1862 }
1863 }
1864
1865 Err(FileSystemError::new(
1866 FileSystemErrorKind::NoSpace,
1867 "No free blocks found",
1868 ))
1869 }
1870
1871 fn allocate_blocks_contiguous_in_group(
1873 &self,
1874 group: u32,
1875 count: u32,
1876 ) -> Result<Vec<u64>, FileSystemError> {
1877 profile_scope!("ext2::allocate_blocks_contiguous_in_group");
1878
1879 #[cfg(test)]
1880 crate::early_println!(
1881 "[ext2] allocate_blocks_contiguous_in_group: Starting allocation for {} blocks in group {}",
1882 count,
1883 group
1884 );
1885
1886 let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
1888 let bgd_sector = self.block_to_sector(bgd_block);
1889
1890 let request = Box::new(crate::device::block::request::BlockIORequest {
1891 request_type: crate::device::block::request::BlockIORequestType::Read,
1892 sector: bgd_sector as usize,
1893 sector_count: (self.block_size / 512) as usize,
1894 head: 0,
1895 cylinder: 0,
1896 buffer: vec![0u8; self.block_size as usize],
1897 });
1898
1899 self.block_device.enqueue_request(request);
1900 let results = self.block_device.process_requests();
1901
1902 let bgd_data = if let Some(result) = results.first() {
1903 match &result.result {
1904 Ok(_) => result.request.buffer.clone(),
1905 Err(_) => {
1906 return Err(FileSystemError::new(
1907 FileSystemErrorKind::IoError,
1908 "Failed to read block group descriptor",
1909 ));
1910 }
1911 }
1912 } else {
1913 return Err(FileSystemError::new(
1914 FileSystemErrorKind::IoError,
1915 "No result from block device read",
1916 ));
1917 };
1918
1919 let bgd_offset = (group * core::mem::size_of::<Ext2BlockGroupDescriptor>() as u32
1920 % self.block_size) as usize;
1921 let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset..])?;
1922
1923 let free_blocks_count = u16::from_le(bgd.free_blocks_count);
1925 if free_blocks_count < count as u16 {
1926 return Err(FileSystemError::new(
1927 FileSystemErrorKind::NoSpace,
1928 &format!(
1929 "Insufficient free blocks in group {} (need {}, have {})",
1930 group, count, free_blocks_count
1931 ),
1932 ));
1933 }
1934
1935 let bitmap_sector = self.block_to_sector(bgd.block_bitmap as u64);
1937 let request = Box::new(crate::device::block::request::BlockIORequest {
1938 request_type: crate::device::block::request::BlockIORequestType::Read,
1939 sector: bitmap_sector as usize,
1940 sector_count: (self.block_size / 512) as usize,
1941 head: 0,
1942 cylinder: 0,
1943 buffer: vec![0u8; self.block_size as usize],
1944 });
1945
1946 self.block_device.enqueue_request(request);
1947 let results = self.block_device.process_requests();
1948
1949 let mut bitmap_data = if let Some(result) = results.first() {
1950 match &result.result {
1951 Ok(_) => result.request.buffer.clone(),
1952 Err(_) => {
1953 return Err(FileSystemError::new(
1954 FileSystemErrorKind::IoError,
1955 "Failed to read block bitmap",
1956 ));
1957 }
1958 }
1959 } else {
1960 return Err(FileSystemError::new(
1961 FileSystemErrorKind::IoError,
1962 "No result from block device read",
1963 ));
1964 };
1965
1966 let group_start_block = group * self.superblock.blocks_per_group;
1968 let data_start_block = if group == 0 {
1969 810.max(group_start_block)
1970 } else {
1971 let blocks_for_metadata = 3
1972 + (self.superblock.inodes_per_group * 128 + self.block_size - 1) / self.block_size;
1973 group_start_block + blocks_for_metadata
1974 };
1975
1976 let group_end_block = (group + 1) * self.superblock.blocks_per_group;
1977 let search_end = core::cmp::min(group_end_block, self.superblock.blocks_count as u32);
1978
1979 for start_block in data_start_block..(search_end.saturating_sub(count - 1)) {
1981 let mut all_free = true;
1982
1983 for offset in 0..count {
1985 let block_num = start_block + offset;
1986 let bit = block_num - group_start_block;
1987 let byte_index = (bit / 8) as usize;
1988 let bit_index = bit % 8;
1989
1990 if byte_index >= bitmap_data.len() {
1991 all_free = false;
1992 break;
1993 }
1994
1995 if (bitmap_data[byte_index] & (1 << bit_index)) != 0 {
1997 all_free = false;
1998 break;
1999 }
2000 }
2001
2002 if all_free {
2003 let mut allocated_blocks = Vec::new();
2005 for offset in 0..count {
2006 let block_num = start_block + offset;
2007 let bit = block_num - group_start_block;
2008 let byte_index = (bit / 8) as usize;
2009 let bit_index = bit % 8;
2010
2011 bitmap_data[byte_index] |= 1 << bit_index;
2012 allocated_blocks.push(block_num as u64);
2013 }
2014
2015 #[cfg(test)]
2016 crate::early_println!(
2017 "[ext2] allocate_blocks_contiguous_in_group: Found {} contiguous blocks starting at {}, batching updates",
2018 count,
2019 start_block
2020 );
2021
2022 let bitmap_write = Box::new(crate::device::block::request::BlockIORequest {
2026 request_type: crate::device::block::request::BlockIORequestType::Write,
2027 sector: bitmap_sector as usize,
2028 sector_count: (self.block_size / 512) as usize,
2029 head: 0,
2030 cylinder: 0,
2031 buffer: bitmap_data,
2032 });
2033 self.block_device.enqueue_request(bitmap_write);
2034
2035 let mut updated_bgd_data = bgd_data.clone();
2037 let mut bgd_update =
2038 Ext2BlockGroupDescriptor::from_bytes(&updated_bgd_data[bgd_offset..])?;
2039 let current_free_blocks = u16::from_le(bgd_update.free_blocks_count);
2040 bgd_update.free_blocks_count =
2041 (current_free_blocks.saturating_sub(count as u16)).to_le();
2042 bgd_update.write_to_bytes(&mut updated_bgd_data[bgd_offset..]);
2043
2044 let bgd_write = Box::new(crate::device::block::request::BlockIORequest {
2046 request_type: crate::device::block::request::BlockIORequestType::Write,
2047 sector: bgd_sector as usize,
2048 sector_count: (self.block_size / 512) as usize,
2049 head: 0,
2050 cylinder: 0,
2051 buffer: updated_bgd_data,
2052 });
2053 self.block_device.enqueue_request(bgd_write);
2054
2055 #[cfg(test)]
2057 crate::early_println!(
2058 "[ext2] allocate_blocks_contiguous_in_group: Processing 2 writes in batch for {} blocks",
2059 count
2060 );
2061 let write_results = self.block_device.process_requests();
2062
2063 if write_results.len() != 2 || write_results.iter().any(|r| r.result.is_err()) {
2064 return Err(FileSystemError::new(
2065 FileSystemErrorKind::IoError,
2066 "Failed to write bitmap or BGD",
2067 ));
2068 }
2069
2070 self.update_superblock_counts(-(count as i32), 0, 0)?;
2072
2073 #[cfg(test)]
2074 crate::early_println!(
2075 "[ext2] allocate_blocks_contiguous_in_group: Successfully allocated {} blocks starting at {} (MAJOR OPTIMIZATION: reduced from {} to ~3 I/O ops)",
2076 count,
2077 start_block,
2078 count * 5
2079 );
2080 return Ok(allocated_blocks);
2081 }
2082 }
2083
2084 Err(FileSystemError::new(
2085 FileSystemErrorKind::NoSpace,
2086 &format!(
2087 "No {} contiguous free blocks found in group {}",
2088 count, group
2089 ),
2090 ))
2091 }
2092
2093 fn allocate_blocks_contiguous(&self, count: u32) -> Result<Vec<u64>, FileSystemError> {
2094 profile_scope!("ext2::allocate_blocks_contiguous");
2095
2096 if count == 0 {
2097 return Ok(Vec::new());
2098 }
2099
2100 if count == 1 {
2102 let block = self.allocate_block()?;
2103 return Ok(vec![block]);
2104 }
2105
2106 let group_count = (self.superblock.blocks_count + self.superblock.blocks_per_group - 1)
2108 / self.superblock.blocks_per_group;
2109
2110 for group in 0..group_count {
2112 match self.allocate_blocks_contiguous_in_group(group, count) {
2113 Ok(blocks) => {
2114 #[cfg(test)]
2115 crate::early_println!(
2116 "ext2: Allocated {} contiguous blocks starting at {} in group {}",
2117 count,
2118 blocks[0],
2119 group
2120 );
2121 return Ok(blocks);
2122 }
2123 Err(FileSystemError {
2124 kind: crate::fs::FileSystemErrorKind::NoSpace,
2125 ..
2126 }) => {
2127 continue;
2129 }
2130 Err(e) => {
2131 return Err(e);
2133 }
2134 }
2135 }
2136
2137 if count >= 6 {
2139 crate::early_println!(
2141 "ext2: Full contiguous allocation failed, trying partial contiguous allocation"
2142 );
2143 let mut allocated_blocks = Vec::new();
2144 let mut remaining = count;
2145
2146 let chunk_sizes = [count / 2, count / 3, count / 4, 8, 4]; for &chunk_size in &chunk_sizes {
2150 if chunk_size == 0 || chunk_size >= remaining {
2151 continue;
2152 }
2153
2154 while remaining >= chunk_size {
2155 let mut allocated_chunk = false;
2156
2157 for group in 0..group_count {
2159 match self.allocate_blocks_contiguous_in_group(group, chunk_size) {
2160 Ok(mut chunk_blocks) => {
2161 #[cfg(test)]
2162 crate::early_println!(
2163 "ext2: Allocated {} contiguous blocks (chunk) starting at {} in group {}",
2164 chunk_size,
2165 chunk_blocks[0],
2166 group
2167 );
2168 allocated_blocks.append(&mut chunk_blocks);
2169 remaining -= chunk_size;
2170 allocated_chunk = true;
2171 break;
2172 }
2173 Err(FileSystemError {
2174 kind: crate::fs::FileSystemErrorKind::NoSpace,
2175 ..
2176 }) => {
2177 continue; }
2179 Err(e) => {
2180 for &block in &allocated_blocks {
2182 if let Err(free_err) = self.free_block(block as u32) {
2183 crate::early_println!(
2184 "ext2: Failed to free block {} during cleanup: {:?}",
2185 block,
2186 free_err
2187 );
2188 }
2189 }
2190 return Err(e);
2191 }
2192 }
2193 }
2194
2195 if !allocated_chunk {
2196 break; }
2198 }
2199
2200 if remaining == 0 {
2201 crate::early_println!(
2202 "ext2: Successfully allocated {} blocks using partial contiguous strategy",
2203 count
2204 );
2205 return Ok(allocated_blocks);
2206 }
2207 }
2208
2209 if !allocated_blocks.is_empty() && remaining > 0 {
2211 crate::early_println!(
2212 "ext2: Partial contiguous allocation successful ({} blocks), using individual allocation for remaining {} blocks",
2213 allocated_blocks.len(),
2214 remaining
2215 );
2216
2217 for _ in 0..remaining {
2218 match self.allocate_block() {
2219 Ok(block) => allocated_blocks.push(block),
2220 Err(e) => {
2221 for &allocated_block in &allocated_blocks {
2223 if let Err(free_err) = self.free_block(allocated_block as u32) {
2224 crate::early_println!(
2225 "ext2: Failed to free block {} during cleanup: {:?}",
2226 allocated_block,
2227 free_err
2228 );
2229 }
2230 }
2231 return Err(e);
2232 }
2233 }
2234 }
2235
2236 crate::early_println!(
2237 "ext2: Hybrid allocation completed: {} blocks total",
2238 allocated_blocks.len()
2239 );
2240 return Ok(allocated_blocks);
2241 }
2242
2243 for &block in &allocated_blocks {
2245 if let Err(free_err) = self.free_block(block as u32) {
2246 crate::early_println!(
2247 "ext2: Failed to free block {} during cleanup: {:?}",
2248 block,
2249 free_err
2250 );
2251 }
2252 }
2253 }
2254
2255 crate::early_println!(
2257 "ext2: All contiguous strategies failed for {} blocks, falling back to individual allocation",
2258 count
2259 );
2260 let mut blocks = Vec::new();
2261 for _ in 0..count {
2262 match self.allocate_block() {
2263 Ok(block) => blocks.push(block),
2264 Err(e) => {
2265 for &allocated_block in &blocks {
2267 if let Err(free_err) = self.free_block(allocated_block as u32) {
2268 crate::early_println!(
2269 "ext2: Failed to free block {} during cleanup: {:?}",
2270 allocated_block,
2271 free_err
2272 );
2273 }
2274 }
2275 return Err(e);
2276 }
2277 }
2278 }
2279
2280 #[cfg(test)]
2281 crate::early_println!("ext2: Allocated {} blocks individually as fallback", count);
2282 Ok(blocks)
2283 }
2284
2285 fn allocate_inode(&self) -> Result<u32, FileSystemError> {
2287 profile_scope!("ext2::allocate_inode");
2288 let group = 0;
2291
2292 let bgd_block = if self.block_size == 1024 { 2 } else { 1 }; let bgd_block_sector = self.block_to_sector(bgd_block);
2295
2296 let request = Box::new(crate::device::block::request::BlockIORequest {
2297 request_type: crate::device::block::request::BlockIORequestType::Read,
2298 sector: bgd_block_sector,
2299 sector_count: (self.block_size / 512) as usize,
2300 head: 0,
2301 cylinder: 0,
2302 buffer: vec![0u8; self.block_size as usize],
2303 });
2304
2305 self.block_device.enqueue_request(request);
2306 let results = self.block_device.process_requests();
2307
2308 let bgd_data = if let Some(result) = results.first() {
2309 match &result.result {
2310 Ok(_) => result.request.buffer.clone(),
2311 Err(_) => {
2312 return Err(FileSystemError::new(
2313 FileSystemErrorKind::IoError,
2314 "Failed to read block group descriptor",
2315 ));
2316 }
2317 }
2318 } else {
2319 return Err(FileSystemError::new(
2320 FileSystemErrorKind::IoError,
2321 "No result from block device read",
2322 ));
2323 };
2324
2325 let bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data)?;
2326
2327 if bgd.free_inodes_count == 0 {
2329 return Err(FileSystemError::new(
2330 FileSystemErrorKind::NoSpace,
2331 "No free inodes in group 0",
2332 ));
2333 }
2334
2335 let bitmap_sector = self.block_to_sector(bgd.inode_bitmap as u64);
2337 let request = Box::new(crate::device::block::request::BlockIORequest {
2338 request_type: crate::device::block::request::BlockIORequestType::Read,
2339 sector: bitmap_sector as usize,
2340 sector_count: (self.block_size / 512) as usize,
2341 head: 0,
2342 cylinder: 0,
2343 buffer: vec![0u8; self.block_size as usize],
2344 });
2345
2346 self.block_device.enqueue_request(request);
2347 let results = self.block_device.process_requests();
2348
2349 let mut bitmap_data = if let Some(result) = results.first() {
2350 match &result.result {
2351 Ok(_) => result.request.buffer.clone(),
2352 Err(_) => {
2353 return Err(FileSystemError::new(
2354 FileSystemErrorKind::IoError,
2355 "Failed to read inode bitmap",
2356 ));
2357 }
2358 }
2359 } else {
2360 return Err(FileSystemError::new(
2361 FileSystemErrorKind::IoError,
2362 "No result from block device read",
2363 ));
2364 };
2365
2366 let start_inode = 30;
2369 let start_bit = start_inode - 1; for bit in start_bit..self.superblock.inodes_per_group {
2372 let byte_index = (bit / 8) as usize;
2373 let bit_index = bit % 8;
2374
2375 if byte_index >= bitmap_data.len() {
2376 break;
2377 }
2378
2379 if (bitmap_data[byte_index] & (1 << bit_index)) == 0 {
2381 bitmap_data[byte_index] |= 1 << bit_index;
2383
2384 let request = Box::new(crate::device::block::request::BlockIORequest {
2386 request_type: crate::device::block::request::BlockIORequestType::Write,
2387 sector: bitmap_sector as usize,
2388 sector_count: (self.block_size / 512) as usize,
2389 head: 0,
2390 cylinder: 0,
2391 buffer: bitmap_data,
2392 });
2393
2394 self.block_device.enqueue_request(request);
2395 let results = self.block_device.process_requests();
2396
2397 if let Some(result) = results.first() {
2398 match &result.result {
2399 Ok(_) => {
2400 let mut bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data)?;
2402 let current_free_inodes = u16::from_le(bgd.free_inodes_count);
2403 bgd.free_inodes_count = (current_free_inodes.saturating_sub(1)).to_le();
2404 self.update_group_descriptor(group, &bgd)?;
2405
2406 self.update_superblock_counts(0, -1, 0)?;
2408 }
2409 Err(_) => {
2410 return Err(FileSystemError::new(
2411 FileSystemErrorKind::IoError,
2412 "Failed to write inode bitmap",
2413 ));
2414 }
2415 }
2416 }
2417
2418 let allocated_inode = bit + 1; return Ok(allocated_inode);
2424 }
2425 }
2426
2427 Err(FileSystemError::new(
2428 FileSystemErrorKind::NoSpace,
2429 "No free inodes found",
2430 ))
2431 }
2432
2433 fn check_entry_exists(
2435 &self,
2436 parent_inode: u32,
2437 name: &String,
2438 ) -> Result<bool, FileSystemError> {
2439 let parent_dir_inode = self.read_inode(parent_inode)?;
2441
2442 if !parent_dir_inode.is_dir() {
2443 return Err(FileSystemError::new(
2444 FileSystemErrorKind::InvalidData,
2445 "Parent is not a directory",
2446 ));
2447 }
2448
2449 let entries = self.read_directory_entries(&parent_dir_inode)?;
2451
2452 for entry in entries {
2454 let entry_name = &entry.name;
2455
2456 if entry_name == name {
2457 return Ok(true); }
2459 }
2460
2461 Ok(false) }
2463
2464 fn add_directory_entry(
2466 &self,
2467 parent_inode: u32,
2468 name: &String,
2469 child_inode: u32,
2470 file_type: FileType,
2471 ) -> Result<(), FileSystemError> {
2472 profile_scope!("ext2::add_directory_entry");
2473
2474 let parent_dir_inode = self.read_inode(parent_inode)?;
2476
2477 if !parent_dir_inode.is_dir() {
2478 return Err(FileSystemError::new(
2479 FileSystemErrorKind::InvalidData,
2480 "Parent is not a directory",
2481 ));
2482 }
2483
2484 let entry_name_len = name.len() as u8;
2487 let entry_total_len = ((8 + entry_name_len as usize + 3) / 4) * 4; let ext2_file_type = match file_type {
2491 FileType::RegularFile => 1,
2492 FileType::Directory => 2,
2493 FileType::CharDevice(_) => 3,
2494 FileType::BlockDevice(_) => 4,
2495 FileType::Pipe => 5,
2496 FileType::Socket(_) => 6,
2497 FileType::SymbolicLink(_) => 7,
2498 FileType::Unknown => 0,
2499 };
2500
2501 let blocks_in_dir = (parent_dir_inode.get_size() as u64 + self.block_size as u64 - 1)
2503 / self.block_size as u64;
2504
2505 for block_idx in 0..blocks_in_dir.max(1) {
2506 let block_num = self.get_inode_block(&parent_dir_inode, block_idx)?;
2507 if block_num == 0 {
2508 continue; }
2510
2511 let mut block_data = self.read_block_cached(block_num)?;
2513
2514 let mut offset = 0;
2516 let mut last_entry_offset = 0;
2517 let mut last_entry_rec_len = 0;
2518
2519 while offset < self.block_size as usize {
2520 if offset + 8 > block_data.len() {
2521 break;
2522 }
2523
2524 let entry = Ext2DirectoryEntryRaw::from_bytes(&block_data[offset..])?;
2525 let rec_len = entry.get_rec_len();
2526
2527 if rec_len == 0 {
2528 break; }
2530
2531 last_entry_offset = offset;
2532 last_entry_rec_len = rec_len as usize;
2533
2534 offset += rec_len as usize;
2535 }
2536
2537 if last_entry_offset > 0 {
2539 let last_entry =
2540 Ext2DirectoryEntryRaw::from_bytes(&block_data[last_entry_offset..])?;
2541 let actual_last_entry_len = ((8 + last_entry.get_name_len() as usize + 3) / 4) * 4;
2542 let available_space = last_entry_rec_len - actual_last_entry_len;
2543
2544 if available_space >= entry_total_len {
2545 let actual_rec_len_bytes = (actual_last_entry_len as u16).to_le_bytes();
2549 block_data[last_entry_offset + 4] = actual_rec_len_bytes[0];
2550 block_data[last_entry_offset + 5] = actual_rec_len_bytes[1];
2551
2552 let new_entry_offset = last_entry_offset + actual_last_entry_len;
2554 let remaining_space = last_entry_rec_len - actual_last_entry_len;
2555
2556 let child_inode_bytes = child_inode.to_le_bytes();
2558 let rec_len_bytes = (remaining_space as u16).to_le_bytes();
2559
2560 block_data[new_entry_offset..new_entry_offset + 4]
2561 .copy_from_slice(&child_inode_bytes);
2562 block_data[new_entry_offset + 4..new_entry_offset + 6]
2563 .copy_from_slice(&rec_len_bytes);
2564 block_data[new_entry_offset + 6] = entry_name_len;
2565 block_data[new_entry_offset + 7] = ext2_file_type;
2566
2567 block_data
2569 [new_entry_offset + 8..new_entry_offset + 8 + entry_name_len as usize]
2570 .copy_from_slice(name.as_bytes());
2571
2572 self.write_block_cached(block_num, &block_data)?;
2574 return Ok(());
2575 }
2576 }
2577 }
2578
2579 Err(FileSystemError::new(
2582 FileSystemErrorKind::NoSpace,
2583 "No space available in directory for new entry",
2584 ))
2585 }
2586
2587 fn remove_directory_entry(
2589 &self,
2590 parent_inode: u32,
2591 name: &String,
2592 ) -> Result<(), FileSystemError> {
2593 let parent_dir_inode = self.read_inode(parent_inode)?;
2595
2596 if !parent_dir_inode.is_dir() {
2597 return Err(FileSystemError::new(
2598 FileSystemErrorKind::InvalidData,
2599 "Parent is not a directory",
2600 ));
2601 }
2602
2603 let blocks_in_dir = (parent_dir_inode.get_size() as u64 + self.block_size as u64 - 1)
2605 / self.block_size as u64;
2606
2607 for block_idx in 0..blocks_in_dir {
2608 let block_num = self.get_inode_block(&parent_dir_inode, block_idx)?;
2609 if block_num == 0 {
2610 continue; }
2612
2613 let mut block_data = self.read_block_cached(block_num)?;
2615
2616 let mut offset = 0;
2618 let mut prev_entry_offset = None;
2619
2620 while offset < self.block_size as usize {
2621 if offset + 8 > block_data.len() {
2622 break;
2623 }
2624
2625 let entry = match Ext2DirectoryEntryRaw::from_bytes(&block_data[offset..]) {
2626 Ok(entry) => entry,
2627 Err(_) => break,
2628 };
2629
2630 let rec_len = entry.get_rec_len();
2631 if rec_len == 0 {
2632 break; }
2634
2635 let name_len = entry.get_name_len() as usize;
2636 if offset + 8 + name_len <= block_data.len() {
2637 let entry_name_bytes = &block_data[offset + 8..offset + 8 + name_len];
2638 if let Ok(entry_name) = core::str::from_utf8(entry_name_bytes) {
2639 if entry_name == *name {
2640 if let Some(prev_offset) = prev_entry_offset {
2642 let prev_entry =
2644 Ext2DirectoryEntryRaw::from_bytes(&block_data[prev_offset..])?;
2645 let new_rec_len = prev_entry.get_rec_len() + rec_len;
2646 let new_rec_len_bytes = new_rec_len.to_le_bytes();
2647
2648 block_data[prev_offset + 4] = new_rec_len_bytes[0];
2649 block_data[prev_offset + 5] = new_rec_len_bytes[1];
2650 } else {
2651 block_data[offset..offset + 4].fill(0);
2653 }
2654
2655 self.write_block_cached(block_num, &block_data)?;
2657 return Ok(());
2658 }
2659 }
2660 }
2661
2662 prev_entry_offset = Some(offset);
2663 offset += rec_len as usize;
2664 }
2665 }
2666
2667 Err(FileSystemError::new(
2669 FileSystemErrorKind::NotFound,
2670 "Directory entry not found",
2671 ))
2672 }
2673
2674 fn free_inode(&self, inode_number: u32) -> Result<(), FileSystemError> {
2676 let inode = self.read_inode(inode_number)?;
2678 let is_directory = inode.is_dir();
2679 let blocks_to_free = self.get_inode_data_blocks(&inode)?;
2680
2681 for block_num in blocks_to_free {
2683 self.free_block(block_num)?;
2686 }
2687
2688 let group = (inode_number - 1) / self.superblock.get_inodes_per_group();
2690 let local_inode = (inode_number - 1) % self.superblock.get_inodes_per_group();
2691
2692 let bgd_block = (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32)
2694 / self.block_size
2695 + if self.block_size == 1024 { 2 } else { 1 };
2696 let bgd_block_sector = self.block_to_sector(bgd_block as u64);
2697
2698 let request = Box::new(crate::device::block::request::BlockIORequest {
2699 request_type: crate::device::block::request::BlockIORequestType::Read,
2700 sector: bgd_block_sector as usize,
2701 sector_count: (self.block_size / 512) as usize,
2702 head: 0,
2703 cylinder: 0,
2704 buffer: vec![0u8; self.block_size as usize],
2705 });
2706
2707 self.block_device.enqueue_request(request);
2708 let results = self.block_device.process_requests();
2709
2710 let mut bgd_data = if let Some(result) = results.first() {
2711 match &result.result {
2712 Ok(_) => result.request.buffer.clone(),
2713 Err(_) => {
2714 return Err(FileSystemError::new(
2715 FileSystemErrorKind::IoError,
2716 "Failed to read block group descriptor",
2717 ));
2718 }
2719 }
2720 } else {
2721 return Err(FileSystemError::new(
2722 FileSystemErrorKind::IoError,
2723 "No result from block device read",
2724 ));
2725 };
2726
2727 let bgd_offset =
2728 (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
2729 let mut bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset as usize..])?;
2730
2731 let inode_bitmap_block = bgd.get_inode_bitmap();
2733 let bitmap_sector = self.block_to_sector(inode_bitmap_block as u64);
2734
2735 let request = Box::new(crate::device::block::request::BlockIORequest {
2736 request_type: crate::device::block::request::BlockIORequestType::Read,
2737 sector: bitmap_sector as usize,
2738 sector_count: (self.block_size / 512) as usize,
2739 head: 0,
2740 cylinder: 0,
2741 buffer: vec![0u8; self.block_size as usize],
2742 });
2743
2744 self.block_device.enqueue_request(request);
2745 let results = self.block_device.process_requests();
2746
2747 let mut bitmap_data = if let Some(result) = results.first() {
2748 match &result.result {
2749 Ok(_) => result.request.buffer.clone(),
2750 Err(_) => {
2751 return Err(FileSystemError::new(
2752 FileSystemErrorKind::IoError,
2753 "Failed to read inode bitmap",
2754 ));
2755 }
2756 }
2757 } else {
2758 return Err(FileSystemError::new(
2759 FileSystemErrorKind::IoError,
2760 "No result from block device read",
2761 ));
2762 };
2763
2764 let byte_index = (local_inode / 8) as usize;
2766 let bit_index = (local_inode % 8) as u8;
2767
2768 if byte_index >= bitmap_data.len() {
2769 return Err(FileSystemError::new(
2770 FileSystemErrorKind::InvalidData,
2771 "Inode bitmap index out of bounds",
2772 ));
2773 }
2774
2775 bitmap_data[byte_index] &= !(1 << bit_index);
2777
2778 let write_request = Box::new(crate::device::block::request::BlockIORequest {
2780 request_type: crate::device::block::request::BlockIORequestType::Write,
2781 sector: bitmap_sector as usize,
2782 sector_count: (self.block_size / 512) as usize,
2783 head: 0,
2784 cylinder: 0,
2785 buffer: bitmap_data,
2786 });
2787
2788 self.block_device.enqueue_request(write_request);
2789 let write_results = self.block_device.process_requests();
2790
2791 if let Some(write_result) = write_results.first() {
2792 match &write_result.result {
2793 Ok(_) => {}
2794 Err(_) => {
2795 return Err(FileSystemError::new(
2796 FileSystemErrorKind::IoError,
2797 "Failed to write inode to disk",
2798 ));
2799 }
2800 }
2801 } else {
2802 return Err(FileSystemError::new(
2803 FileSystemErrorKind::IoError,
2804 "No result from inode write",
2805 ));
2806 }
2807
2808 bgd.set_free_inodes_count(bgd.get_free_inodes_count() + 1);
2810 if is_directory {
2811 bgd.set_used_dirs_count(bgd.get_used_dirs_count().saturating_sub(1));
2812 }
2813
2814 bgd.write_to_bytes(&mut bgd_data[bgd_offset as usize..]);
2816 let write_bgd_request = Box::new(crate::device::block::request::BlockIORequest {
2817 request_type: crate::device::block::request::BlockIORequestType::Write,
2818 sector: bgd_block_sector as usize,
2819 sector_count: (self.block_size / 512) as usize,
2820 head: 0,
2821 cylinder: 0,
2822 buffer: bgd_data,
2823 });
2824
2825 self.block_device.enqueue_request(write_bgd_request);
2826 let bgd_write_results = self.block_device.process_requests();
2827
2828 if let Some(bgd_write_result) = bgd_write_results.first() {
2829 match &bgd_write_result.result {
2830 Ok(_) => {}
2831 Err(_) => {
2832 return Err(FileSystemError::new(
2833 FileSystemErrorKind::IoError,
2834 "Failed to write updated block group descriptor",
2835 ));
2836 }
2837 }
2838 } else {
2839 return Err(FileSystemError::new(
2840 FileSystemErrorKind::IoError,
2841 "No response from BGD write",
2842 ));
2843 }
2844
2845 self.clear_inode_on_disk(inode_number)?;
2846
2847 self.update_superblock_free_counts(0, 1)?;
2849
2850 {
2852 let mut cache = self.inode_cache.lock();
2853 cache.remove(inode_number);
2854 }
2855
2856 Ok(())
2857 }
2858
2859 fn clear_inode_on_disk(&self, inode_number: u32) -> Result<(), FileSystemError> {
2860 let inode = Ext2Inode::empty();
2861 self.write_inode(inode_number, &inode)?;
2862
2863 Ok(())
2864 }
2865
2866 pub fn write_file_content(
2868 &self,
2869 inode_num: u32,
2870 content: &[u8],
2871 ) -> Result<(), FileSystemError> {
2872 profile_scope!("ext2::write_file_content");
2873
2874 #[cfg(test)]
2875 crate::early_println!(
2876 "[ext2] write_file_content: inode={}, content_len={}",
2877 inode_num,
2878 content.len()
2879 );
2880
2881 let mut inode = self.read_inode(inode_num)?;
2883
2884 let blocks_needed = if content.is_empty() {
2886 0
2887 } else {
2888 ((content.len() as u64 + self.block_size as u64 - 1) / self.block_size as u64) as u32
2889 };
2890
2891 #[cfg(test)]
2892 crate::early_println!("[ext2] write_file_content: blocks_needed={}", blocks_needed);
2893
2894 let mut block_list = Vec::new();
2896 let mut new_block_assignments = Vec::new(); if blocks_needed > 0 {
2898 let existing_blocks = self.get_inode_blocks(&inode, 0, blocks_needed as u64)?;
2900
2901 let mut allocation_ranges = Vec::new(); let mut current_start = None;
2904 let mut current_count = 0;
2905
2906 for (block_idx, &existing_block) in existing_blocks.iter().enumerate() {
2907 if existing_block == 0 {
2908 if current_start.is_none() {
2910 current_start = Some(block_idx);
2911 current_count = 1;
2912 } else {
2913 current_count += 1;
2914 }
2915 } else {
2916 if let Some(start) = current_start {
2918 allocation_ranges.push((start, current_count));
2919 current_start = None;
2920 current_count = 0;
2921 }
2922 #[cfg(test)]
2923 crate::early_println!(
2924 "[ext2] write_file_content: reusing existing block {} for logical block {}",
2925 existing_block,
2926 block_idx
2927 );
2928 block_list.push(existing_block);
2929 }
2930 }
2931
2932 if let Some(start) = current_start {
2934 allocation_ranges.push((start, current_count));
2935 }
2936
2937 for (start_idx, count) in allocation_ranges {
2939 if count >= 3 {
2940 #[cfg(test)]
2942 crate::early_println!(
2943 "[ext2] write_file_content: using multi-block allocation for {} blocks starting at logical block {}",
2944 count,
2945 start_idx
2946 );
2947
2948 let allocated_blocks = self.allocate_blocks_contiguous(count as u32)?;
2949
2950 for (i, &block_num) in allocated_blocks.iter().enumerate() {
2951 let logical_idx = start_idx + i;
2952 new_block_assignments.push((logical_idx as u64, block_num as u32));
2953
2954 while block_list.len() <= logical_idx {
2956 block_list.push(0);
2957 }
2958 block_list[logical_idx] = block_num;
2959
2960 #[cfg(test)]
2961 crate::early_println!(
2962 "[ext2] write_file_content: multi-allocated block {} for logical block {}",
2963 block_num,
2964 logical_idx
2965 );
2966 }
2967 } else {
2968 for i in 0..count {
2970 let logical_idx = start_idx + i;
2971 let new_block = self.allocate_block()?;
2972
2973 #[cfg(test)]
2974 crate::early_println!(
2975 "[ext2] write_file_content: individually allocated block {} for logical block {}",
2976 new_block,
2977 logical_idx
2978 );
2979
2980 new_block_assignments.push((logical_idx as u64, new_block as u32));
2981
2982 while block_list.len() <= logical_idx {
2984 block_list.push(0);
2985 }
2986 block_list[logical_idx] = new_block;
2987 }
2988 }
2989 }
2990 }
2991
2992 if !new_block_assignments.is_empty() {
2994 self.set_inode_blocks_simple_batch(&mut inode, &new_block_assignments)?;
2995 }
2996
2997 let mut remaining = content.len();
2999 let mut content_offset = 0;
3000 let mut write_blocks = BTreeMap::new();
3001
3002 for &block_num in block_list.iter() {
3003 if remaining == 0 {
3004 break;
3005 }
3006
3007 let bytes_to_write = core::cmp::min(remaining, self.block_size as usize);
3008 let mut block_data = vec![0u8; self.block_size as usize];
3009
3010 block_data[..bytes_to_write]
3012 .copy_from_slice(&content[content_offset..content_offset + bytes_to_write]);
3013
3014 #[cfg(test)]
3015 crate::early_println!(
3016 "[ext2] write_file_content: preparing block {} ({} bytes) for batch write",
3017 block_num,
3018 bytes_to_write
3019 );
3020
3021 write_blocks.insert(block_num, block_data);
3023
3024 remaining -= bytes_to_write;
3025 content_offset += bytes_to_write;
3026 }
3027
3028 if !write_blocks.is_empty() {
3030 #[cfg(test)]
3031 crate::early_println!(
3032 "[ext2] write_file_content: batch writing {} content blocks",
3033 write_blocks.len()
3034 );
3035 self.write_blocks_cached(&write_blocks)?;
3036 }
3037
3038 inode.size = content.len() as u32;
3040 inode.mtime = 0; inode.blocks = blocks_needed * (self.block_size / 512);
3044
3045 self.write_inode(inode_num, &inode)?;
3047
3048 {
3050 let mut cache = self.inode_cache.lock();
3051 cache.insert(inode_num, inode);
3052 }
3053
3054 Ok(())
3055 }
3056
3057 pub fn file_type_from_inode(
3059 &self,
3060 inode: &Ext2Inode,
3061 _inode_number: u32,
3062 ) -> Result<FileType, FileSystemError> {
3063 let mode = inode.get_mode();
3064 let file_type_bits = mode & EXT2_S_IFMT;
3065
3066 match file_type_bits {
3067 EXT2_S_IFREG => Ok(FileType::RegularFile),
3068 EXT2_S_IFDIR => Ok(FileType::Directory),
3069 EXT2_S_IFLNK => {
3070 let size = inode.get_size() as usize;
3072
3073 if size <= 60 {
3074 let inode_bytes = unsafe {
3077 core::slice::from_raw_parts(
3078 inode as *const Ext2Inode as *const u8,
3079 core::mem::size_of::<Ext2Inode>(),
3080 )
3081 };
3082 let block_start_offset = 40;
3084 let block_bytes = &inode_bytes[block_start_offset..block_start_offset + 60];
3085
3086 let target_bytes = &block_bytes[..size];
3087 let target = String::from_utf8(target_bytes.to_vec()).map_err(|_| {
3088 FileSystemError::new(
3089 FileSystemErrorKind::InvalidData,
3090 "Invalid UTF-8 in fast symlink target",
3091 )
3092 })?;
3093 Ok(FileType::SymbolicLink(target))
3094 } else {
3095 Ok(FileType::SymbolicLink("".to_string()))
3098 }
3099 }
3100 EXT2_S_IFCHR => {
3101 if let Some((major, minor)) = inode.get_device_info() {
3103 let device_info = crate::fs::DeviceFileInfo {
3104 device_id: ((major << 8) | minor) as usize,
3105 device_type: crate::device::DeviceType::Char,
3106 };
3107 Ok(FileType::CharDevice(device_info))
3108 } else {
3109 Err(FileSystemError::new(
3110 FileSystemErrorKind::InvalidData,
3111 "Invalid character device information",
3112 ))
3113 }
3114 }
3115 EXT2_S_IFBLK => {
3116 if let Some((major, minor)) = inode.get_device_info() {
3118 let device_info = crate::fs::DeviceFileInfo {
3119 device_id: ((major << 8) | minor) as usize,
3120 device_type: crate::device::DeviceType::Block,
3121 };
3122 Ok(FileType::BlockDevice(device_info))
3123 } else {
3124 Err(FileSystemError::new(
3125 FileSystemErrorKind::InvalidData,
3126 "Invalid block device information",
3127 ))
3128 }
3129 }
3130 EXT2_S_IFIFO => Ok(FileType::Pipe),
3131 EXT2_S_IFSOCK => Ok(FileType::Socket(SocketFileInfo {
3132 socket_id: crate::fs::UNBOUND_SOCKET_ID,
3133 })), _ => Ok(FileType::Unknown),
3135 }
3136 }
3137
3138 fn get_inode_data_blocks(&self, inode: &Ext2Inode) -> Result<Vec<u32>, FileSystemError> {
3140 let mut blocks = Vec::new();
3141
3142 let mode = inode.get_mode();
3144 let is_symlink = (mode & EXT2_S_IFMT) == EXT2_S_IFLNK;
3145
3146 if is_symlink && inode.get_size() <= 60 {
3147 return Ok(blocks);
3149 }
3150
3151 let blocks_in_file =
3152 (inode.get_size() as u64 + self.block_size as u64 - 1) / self.block_size as u64;
3153
3154 if blocks_in_file == 0 {
3155 return Ok(blocks);
3156 }
3157
3158 let block_nums = self.get_inode_blocks(inode, 0, blocks_in_file)?;
3160
3161 for &block_num in &block_nums {
3162 if block_num != 0 {
3163 blocks.push(block_num as u32);
3164 }
3165 }
3166
3167 Ok(blocks)
3168 }
3169
3170 fn free_block(&self, block_number: u32) -> Result<(), FileSystemError> {
3172 if block_number == 0 {
3173 return Ok(()); }
3175
3176 let group = (block_number - 1) / self.superblock.get_blocks_per_group();
3178 let local_block = (block_number - 1) % self.superblock.get_blocks_per_group();
3179
3180 let bgd_block = (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32)
3182 / self.block_size
3183 + if self.block_size == 1024 { 2 } else { 1 };
3184 let bgd_block_sector = self.block_to_sector(bgd_block as u64);
3185
3186 let request = Box::new(crate::device::block::request::BlockIORequest {
3187 request_type: crate::device::block::request::BlockIORequestType::Read,
3188 sector: bgd_block_sector as usize,
3189 sector_count: (self.block_size / 512) as usize,
3190 head: 0,
3191 cylinder: 0,
3192 buffer: vec![0u8; self.block_size as usize],
3193 });
3194
3195 self.block_device.enqueue_request(request);
3196 let results = self.block_device.process_requests();
3197
3198 let mut bgd_data = if let Some(result) = results.first() {
3199 match &result.result {
3200 Ok(_) => result.request.buffer.clone(),
3201 Err(_) => {
3202 return Err(FileSystemError::new(
3203 FileSystemErrorKind::IoError,
3204 "Failed to read block group descriptor",
3205 ));
3206 }
3207 }
3208 } else {
3209 return Err(FileSystemError::new(
3210 FileSystemErrorKind::IoError,
3211 "No result from block device read",
3212 ));
3213 };
3214
3215 let bgd_offset =
3216 (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
3217 let mut bgd = Ext2BlockGroupDescriptor::from_bytes(&bgd_data[bgd_offset as usize..])?;
3218
3219 let block_bitmap_block = bgd.get_block_bitmap();
3221 let bitmap_sector = self.block_to_sector(block_bitmap_block as u64);
3222
3223 let request = Box::new(crate::device::block::request::BlockIORequest {
3224 request_type: crate::device::block::request::BlockIORequestType::Read,
3225 sector: bitmap_sector as usize,
3226 sector_count: (self.block_size / 512) as usize,
3227 head: 0,
3228 cylinder: 0,
3229 buffer: vec![0u8; self.block_size as usize],
3230 });
3231
3232 self.block_device.enqueue_request(request);
3233 let results = self.block_device.process_requests();
3234
3235 let mut bitmap_data = if let Some(result) = results.first() {
3236 match &result.result {
3237 Ok(_) => result.request.buffer.clone(),
3238 Err(_) => {
3239 return Err(FileSystemError::new(
3240 FileSystemErrorKind::IoError,
3241 "Failed to read block bitmap",
3242 ));
3243 }
3244 }
3245 } else {
3246 return Err(FileSystemError::new(
3247 FileSystemErrorKind::IoError,
3248 "No result from block device read",
3249 ));
3250 };
3251
3252 let byte_index = (local_block / 8) as usize;
3254 let bit_index = (local_block % 8) as u8;
3255
3256 if byte_index >= bitmap_data.len() {
3257 return Err(FileSystemError::new(
3258 FileSystemErrorKind::InvalidData,
3259 "Block bitmap index out of bounds",
3260 ));
3261 }
3262
3263 bitmap_data[byte_index] &= !(1 << bit_index);
3265
3266 let write_request = Box::new(crate::device::block::request::BlockIORequest {
3268 request_type: crate::device::block::request::BlockIORequestType::Write,
3269 sector: bitmap_sector as usize,
3270 sector_count: (self.block_size / 512) as usize,
3271 head: 0,
3272 cylinder: 0,
3273 buffer: bitmap_data,
3274 });
3275
3276 self.block_device.enqueue_request(write_request);
3277 let write_results = self.block_device.process_requests();
3278
3279 if let Some(write_result) = write_results.first() {
3280 match &write_result.result {
3281 Ok(_) => {}
3282 Err(_) => {
3283 return Err(FileSystemError::new(
3284 FileSystemErrorKind::IoError,
3285 "Failed to write updated block bitmap",
3286 ));
3287 }
3288 }
3289 } else {
3290 return Err(FileSystemError::new(
3291 FileSystemErrorKind::IoError,
3292 "No response from block bitmap write",
3293 ));
3294 }
3295
3296 bgd.set_free_blocks_count(bgd.get_free_blocks_count() + 1);
3298
3299 bgd.write_to_bytes(&mut bgd_data[bgd_offset as usize..]);
3301 let write_bgd_request = Box::new(crate::device::block::request::BlockIORequest {
3302 request_type: crate::device::block::request::BlockIORequestType::Write,
3303 sector: bgd_block_sector as usize,
3304 sector_count: (self.block_size / 512) as usize,
3305 head: 0,
3306 cylinder: 0,
3307 buffer: bgd_data,
3308 });
3309
3310 self.block_device.enqueue_request(write_bgd_request);
3311 let bgd_write_results = self.block_device.process_requests();
3312
3313 if let Some(bgd_write_result) = bgd_write_results.first() {
3314 match &bgd_write_result.result {
3315 Ok(_) => {
3316 self.update_superblock_counts(1, 0, 0)?;
3318 }
3319 Err(_) => {
3320 return Err(FileSystemError::new(
3321 FileSystemErrorKind::IoError,
3322 "Failed to write updated block group descriptor",
3323 ));
3324 }
3325 }
3326 } else {
3327 return Err(FileSystemError::new(
3328 FileSystemErrorKind::IoError,
3329 "No response from BGD write",
3330 ));
3331 }
3332
3333 Ok(())
3334 }
3335
3336 fn set_inode_block(
3338 &self,
3339 inode: &mut Ext2Inode,
3340 logical_block: u64,
3341 block_number: u32,
3342 ) -> Result<(), FileSystemError> {
3343 profile_scope!("ext2::set_inode_block");
3344 let blocks_per_indirect = self.block_size / 4; if logical_block < 12 {
3347 inode.block[logical_block as usize] = block_number;
3349 Ok(())
3350 } else if logical_block < 12 + blocks_per_indirect as u64 {
3351 let index = logical_block - 12;
3353
3354 if inode.block[12] == 0 {
3356 let indirect_block = self.allocate_block()? as u32;
3357 inode.block[12] = indirect_block;
3358
3359 let clear_data = vec![0u8; self.block_size as usize];
3361 self.write_block_cached(indirect_block as u64, &clear_data)?;
3362 }
3363
3364 let indirect_block = inode.block[12];
3365
3366 let mut indirect_data = self.read_block_cached(indirect_block as u64)?;
3368
3369 let offset = index as usize * 4;
3371 let block_bytes = block_number.to_le_bytes();
3372 indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3373
3374 self.write_block_cached(indirect_block as u64, &indirect_data)?;
3376 Ok(())
3377 } else if logical_block
3378 < 12 + blocks_per_indirect as u64
3379 + blocks_per_indirect as u64 * blocks_per_indirect as u64
3380 {
3381 let offset_in_double = logical_block - 12 - blocks_per_indirect as u64;
3383 let first_indirect_index = offset_in_double / blocks_per_indirect as u64;
3384 let second_indirect_index = offset_in_double % blocks_per_indirect as u64;
3385
3386 if inode.block[13] == 0 {
3388 let double_indirect_block = self.allocate_block()? as u32;
3389 inode.block[13] = double_indirect_block;
3390
3391 let clear_data = vec![0u8; self.block_size as usize];
3393 let clear_request = Box::new(crate::device::block::request::BlockIORequest {
3394 request_type: crate::device::block::request::BlockIORequestType::Write,
3395 sector: self.block_to_sector(double_indirect_block as u64),
3396 sector_count: (self.block_size / 512) as usize,
3397 head: 0,
3398 cylinder: 0,
3399 buffer: clear_data,
3400 });
3401
3402 self.block_device.enqueue_request(clear_request);
3403 let _results = self.block_device.process_requests();
3404 }
3405
3406 let double_indirect_block = inode.block[13];
3407 let double_indirect_sector = self.block_to_sector(double_indirect_block as u64);
3408
3409 let request = Box::new(crate::device::block::request::BlockIORequest {
3411 request_type: crate::device::block::request::BlockIORequestType::Read,
3412 sector: double_indirect_sector as usize,
3413 sector_count: (self.block_size / 512) as usize,
3414 head: 0,
3415 cylinder: 0,
3416 buffer: vec![0u8; self.block_size as usize],
3417 });
3418
3419 self.block_device.enqueue_request(request);
3420 let results = self.block_device.process_requests();
3421
3422 let mut double_indirect_data = if let Some(result) = results.first() {
3423 match &result.result {
3424 Ok(_) => result.request.buffer.clone(),
3425 Err(_) => {
3426 return Err(FileSystemError::new(
3427 FileSystemErrorKind::IoError,
3428 "Failed to read double indirect block",
3429 ));
3430 }
3431 }
3432 } else {
3433 return Err(FileSystemError::new(
3434 FileSystemErrorKind::IoError,
3435 "No result from double indirect block read",
3436 ));
3437 };
3438
3439 let mut first_indirect_ptr = u32::from_le_bytes([
3441 double_indirect_data[first_indirect_index as usize * 4],
3442 double_indirect_data[first_indirect_index as usize * 4 + 1],
3443 double_indirect_data[first_indirect_index as usize * 4 + 2],
3444 double_indirect_data[first_indirect_index as usize * 4 + 3],
3445 ]);
3446
3447 if first_indirect_ptr == 0 {
3448 first_indirect_ptr = self.allocate_block()? as u32;
3450
3451 let first_indirect_bytes = first_indirect_ptr.to_le_bytes();
3453 let offset = first_indirect_index as usize * 4;
3454 double_indirect_data[offset..offset + 4].copy_from_slice(&first_indirect_bytes);
3455
3456 let write_request = Box::new(crate::device::block::request::BlockIORequest {
3458 request_type: crate::device::block::request::BlockIORequestType::Write,
3459 sector: double_indirect_sector as usize,
3460 sector_count: (self.block_size / 512) as usize,
3461 head: 0,
3462 cylinder: 0,
3463 buffer: double_indirect_data.clone(),
3464 });
3465
3466 self.block_device.enqueue_request(write_request);
3467 let write_results = self.block_device.process_requests();
3468
3469 if let Some(write_result) = write_results.first() {
3470 match &write_result.result {
3471 Ok(_) => {}
3472 Err(_) => {
3473 return Err(FileSystemError::new(
3474 FileSystemErrorKind::IoError,
3475 "Failed to write double indirect block",
3476 ));
3477 }
3478 }
3479 } else {
3480 return Err(FileSystemError::new(
3481 FileSystemErrorKind::IoError,
3482 "No response from double indirect block write",
3483 ));
3484 }
3485
3486 let clear_data = vec![0u8; self.block_size as usize];
3488 let clear_request = Box::new(crate::device::block::request::BlockIORequest {
3489 request_type: crate::device::block::request::BlockIORequestType::Write,
3490 sector: self.block_to_sector(first_indirect_ptr as u64),
3491 sector_count: (self.block_size / 512) as usize,
3492 head: 0,
3493 cylinder: 0,
3494 buffer: clear_data,
3495 });
3496
3497 self.block_device.enqueue_request(clear_request);
3498 let _results = self.block_device.process_requests();
3499 }
3500
3501 let first_indirect_sector = self.block_to_sector(first_indirect_ptr as u64);
3503 let request = Box::new(crate::device::block::request::BlockIORequest {
3504 request_type: crate::device::block::request::BlockIORequestType::Read,
3505 sector: first_indirect_sector as usize,
3506 sector_count: (self.block_size / 512) as usize,
3507 head: 0,
3508 cylinder: 0,
3509 buffer: vec![0u8; self.block_size as usize],
3510 });
3511
3512 self.block_device.enqueue_request(request);
3513 let results = self.block_device.process_requests();
3514
3515 let mut first_indirect_data = if let Some(result) = results.first() {
3516 match &result.result {
3517 Ok(_) => result.request.buffer.clone(),
3518 Err(_) => {
3519 return Err(FileSystemError::new(
3520 FileSystemErrorKind::IoError,
3521 "Failed to read first level indirect block",
3522 ));
3523 }
3524 }
3525 } else {
3526 return Err(FileSystemError::new(
3527 FileSystemErrorKind::IoError,
3528 "No result from first level indirect block read",
3529 ));
3530 };
3531
3532 let offset = second_indirect_index as usize * 4;
3534 let block_bytes = block_number.to_le_bytes();
3535 first_indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3536
3537 let write_request = Box::new(crate::device::block::request::BlockIORequest {
3539 request_type: crate::device::block::request::BlockIORequestType::Write,
3540 sector: first_indirect_sector as usize,
3541 sector_count: (self.block_size / 512) as usize,
3542 head: 0,
3543 cylinder: 0,
3544 buffer: first_indirect_data,
3545 });
3546
3547 self.block_device.enqueue_request(write_request);
3548 let write_results = self.block_device.process_requests();
3549
3550 if let Some(write_result) = write_results.first() {
3551 match &write_result.result {
3552 Ok(_) => Ok(()),
3553 Err(_) => Err(FileSystemError::new(
3554 FileSystemErrorKind::IoError,
3555 "Failed to write first level indirect block",
3556 )),
3557 }
3558 } else {
3559 Err(FileSystemError::new(
3560 FileSystemErrorKind::IoError,
3561 "No response from first level indirect block write",
3562 ))
3563 }
3564 } else {
3565 Err(FileSystemError::new(
3567 FileSystemErrorKind::NotSupported,
3568 "Triple indirect blocks not yet supported",
3569 ))
3570 }
3571 }
3572
3573 fn set_inode_blocks_simple_batch(
3575 &self,
3576 inode: &mut Ext2Inode,
3577 assignments: &[(u64, u32)],
3578 ) -> Result<(), FileSystemError> {
3579 profile_scope!("ext2::set_inode_blocks_simple_batch");
3580
3581 let blocks_per_indirect = self.block_size / 4;
3582 let mut indirect_blocks_cache = alloc::collections::BTreeMap::new();
3583 let mut double_indirect_cache = alloc::collections::BTreeMap::new();
3584 let mut _batched_writes = 0;
3585 let mut new_indirect_blocks = Vec::new();
3586
3587 let mut needed_indirect_blocks = 0u32;
3589 let mut needed_first_level_indirects = alloc::collections::BTreeSet::new();
3590 let mut need_single_indirect = false;
3591 let mut need_double_indirect = false;
3592
3593 for &(logical_block, _) in assignments {
3594 if logical_block >= 12 && logical_block < 12 + blocks_per_indirect as u64 {
3595 if inode.block[12] == 0 {
3596 need_single_indirect = true;
3597 }
3598 } else if logical_block >= 12 + blocks_per_indirect as u64 {
3599 if inode.block[13] == 0 {
3600 need_double_indirect = true;
3601 }
3602
3603 let double_base = 12 + blocks_per_indirect as u64;
3605 let double_offset = logical_block - double_base;
3606 let first_indirect_index = double_offset / blocks_per_indirect as u64;
3607 needed_first_level_indirects.insert(first_indirect_index);
3608 }
3609 }
3610
3611 if need_single_indirect {
3613 needed_indirect_blocks += 1;
3614 }
3615 if need_double_indirect {
3616 needed_indirect_blocks += 1;
3617 }
3618 needed_indirect_blocks += needed_first_level_indirects.len() as u32;
3619
3620 let allocated_indirect_blocks = if needed_indirect_blocks > 0 {
3622 #[cfg(test)]
3623 crate::early_println!(
3624 "[ext2] set_inode_blocks_simple_batch: Pre-allocating {} indirect blocks",
3625 needed_indirect_blocks
3626 );
3627 self.allocate_blocks_contiguous(needed_indirect_blocks)?
3628 } else {
3629 Vec::new()
3630 };
3631
3632 let mut indirect_block_index = 0;
3633
3634 for &(logical_block, block_number) in assignments {
3635 if logical_block < 12 {
3638 inode.block[logical_block as usize] = block_number;
3640 } else if logical_block < 12 + blocks_per_indirect as u64 {
3641 let index = logical_block - 12;
3643
3644 if inode.block[12] == 0 {
3646 let indirect_block = if indirect_block_index < allocated_indirect_blocks.len() {
3647 let block = allocated_indirect_blocks[indirect_block_index] as u32;
3648 indirect_block_index += 1;
3649 block
3650 } else {
3651 return Err(FileSystemError::new(
3652 FileSystemErrorKind::NoSpace,
3653 "Not enough pre-allocated indirect blocks",
3654 ));
3655 };
3656
3657 inode.block[12] = indirect_block;
3658 new_indirect_blocks.push(indirect_block);
3659 indirect_blocks_cache
3660 .insert(indirect_block, vec![0u8; self.block_size as usize]);
3661 }
3662
3663 let indirect_block = inode.block[12];
3664
3665 if !indirect_blocks_cache.contains_key(&indirect_block) {
3667 if new_indirect_blocks.contains(&indirect_block) {
3668 indirect_blocks_cache
3670 .insert(indirect_block, vec![0u8; self.block_size as usize]);
3671 } else {
3672 let data = self.read_block_cached(indirect_block as u64)?;
3675 indirect_blocks_cache.insert(indirect_block, data);
3676 }
3677 }
3678
3679 if let Some(indirect_data) = indirect_blocks_cache.get_mut(&indirect_block) {
3681 let offset = index as usize * 4;
3682 let block_bytes = block_number.to_le_bytes();
3683 indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3684 _batched_writes += 1;
3685 }
3686 } else if logical_block
3687 < 12 + blocks_per_indirect as u64
3688 + blocks_per_indirect as u64 * blocks_per_indirect as u64
3689 {
3690 let double_base = 12 + blocks_per_indirect as u64;
3692
3693 if logical_block < double_base {
3695 crate::early_println!(
3696 "[ext2] ERROR: Double indirect block calculation would underflow: logical_block={}, double_base={}",
3697 logical_block,
3698 double_base
3699 );
3700 return Err(FileSystemError::new(
3701 FileSystemErrorKind::InvalidData,
3702 "Double indirect block calculation underflow",
3703 ));
3704 }
3705
3706 let double_offset = logical_block - double_base;
3707 let first_indirect_index = double_offset / blocks_per_indirect as u64;
3708 let second_indirect_index = double_offset % blocks_per_indirect as u64;
3709
3710 if inode.block[13] == 0 {
3715 let double_indirect_block =
3716 if indirect_block_index < allocated_indirect_blocks.len() {
3717 let block = allocated_indirect_blocks[indirect_block_index] as u32;
3718 indirect_block_index += 1;
3719 block
3720 } else {
3721 return Err(FileSystemError::new(
3722 FileSystemErrorKind::NoSpace,
3723 "Not enough pre-allocated double indirect blocks",
3724 ));
3725 };
3726
3727 inode.block[13] = double_indirect_block;
3728 new_indirect_blocks.push(double_indirect_block);
3729 double_indirect_cache
3730 .insert(double_indirect_block, vec![0u8; self.block_size as usize]);
3731 }
3732
3733 let double_indirect_block = inode.block[13];
3734
3735 if !double_indirect_cache.contains_key(&double_indirect_block) {
3737 if new_indirect_blocks.contains(&double_indirect_block) {
3738 double_indirect_cache
3739 .insert(double_indirect_block, vec![0u8; self.block_size as usize]);
3740 } else {
3741 let data = self.read_block_cached(double_indirect_block as u64)?;
3743 double_indirect_cache.insert(double_indirect_block, data);
3744 }
3745 }
3746
3747 let first_indirect_ptr =
3749 if let Some(double_data) = double_indirect_cache.get(&double_indirect_block) {
3750 let ptr_offset = first_indirect_index as usize * 4;
3751 u32::from_le_bytes([
3752 double_data[ptr_offset],
3753 double_data[ptr_offset + 1],
3754 double_data[ptr_offset + 2],
3755 double_data[ptr_offset + 3],
3756 ])
3757 } else {
3758 0
3759 };
3760
3761 let first_indirect_block = if first_indirect_ptr == 0 {
3763 let new_block = if indirect_block_index < allocated_indirect_blocks.len() {
3764 let block = allocated_indirect_blocks[indirect_block_index] as u32;
3765 indirect_block_index += 1;
3766 block
3767 } else {
3768 return Err(FileSystemError::new(
3769 FileSystemErrorKind::NoSpace,
3770 "Not enough pre-allocated first-level indirect blocks",
3771 ));
3772 };
3773
3774 new_indirect_blocks.push(new_block);
3775
3776 if let Some(double_data) = double_indirect_cache.get_mut(&double_indirect_block)
3778 {
3779 let ptr_offset = first_indirect_index as usize * 4;
3780 let block_bytes = new_block.to_le_bytes();
3781 double_data[ptr_offset..ptr_offset + 4].copy_from_slice(&block_bytes);
3782 }
3783
3784 indirect_blocks_cache.insert(new_block, vec![0u8; self.block_size as usize]);
3786 new_block
3787 } else {
3788 first_indirect_ptr
3789 };
3790
3791 if !indirect_blocks_cache.contains_key(&first_indirect_block) {
3793 if new_indirect_blocks.contains(&first_indirect_block) {
3794 indirect_blocks_cache
3795 .insert(first_indirect_block, vec![0u8; self.block_size as usize]);
3796 } else {
3797 let data = self.read_block_cached(first_indirect_block as u64)?;
3799 indirect_blocks_cache.insert(first_indirect_block, data);
3800 }
3801 }
3802
3803 if let Some(indirect_data) = indirect_blocks_cache.get_mut(&first_indirect_block) {
3805 let offset = second_indirect_index as usize * 4;
3806 let block_bytes = block_number.to_le_bytes();
3807 indirect_data[offset..offset + 4].copy_from_slice(&block_bytes);
3808 _batched_writes += 1;
3809 }
3810 } else {
3811 self.set_inode_block(inode, logical_block, block_number)?;
3814 }
3815 }
3816
3817 let total_indirect_blocks = indirect_blocks_cache.len() + double_indirect_cache.len();
3819 if total_indirect_blocks > 0 {
3820 let mut write_blocks = BTreeMap::new();
3821
3822 for (block_num, data) in indirect_blocks_cache {
3824 if block_num as u64 > (1u64 << 32) {
3826 return Err(FileSystemError::new(
3828 FileSystemErrorKind::InvalidData,
3829 "Invalid indirect block number",
3830 ));
3831 }
3832 write_blocks.insert(block_num as u64, data);
3833 }
3834
3835 for (block_num, data) in double_indirect_cache {
3837 if block_num as u64 > (1u64 << 32) {
3839 return Err(FileSystemError::new(
3841 FileSystemErrorKind::InvalidData,
3842 "Invalid double indirect block number",
3843 ));
3844 }
3845 write_blocks.insert(block_num as u64, data);
3846 }
3847
3848 self.write_blocks_cached(&write_blocks)?;
3850 }
3851
3852 Ok(())
3855 }
3856
3857 fn update_group_descriptor(
3859 &self,
3860 group: u32,
3861 bgd: &Ext2BlockGroupDescriptor,
3862 ) -> Result<(), FileSystemError> {
3863 let bgd_block = (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32)
3864 / self.block_size
3865 + if self.block_size == 1024 { 2 } else { 1 };
3866 let bgd_block_sector = self.block_to_sector(bgd_block as u64);
3867
3868 let request = Box::new(crate::device::block::request::BlockIORequest {
3869 request_type: crate::device::block::request::BlockIORequestType::Read,
3870 sector: bgd_block_sector as usize,
3871 sector_count: (self.block_size / 512) as usize,
3872 head: 0,
3873 cylinder: 0,
3874 buffer: vec![0u8; self.block_size as usize],
3875 });
3876
3877 self.block_device.enqueue_request(request);
3878 let results = self.block_device.process_requests();
3879
3880 let mut bgd_data = if let Some(result) = results.first() {
3881 match &result.result {
3882 Ok(_) => result.request.buffer.clone(),
3883 Err(_) => {
3884 return Err(FileSystemError::new(
3885 FileSystemErrorKind::IoError,
3886 "Failed to read block group descriptor block",
3887 ));
3888 }
3889 }
3890 } else {
3891 return Err(FileSystemError::new(
3892 FileSystemErrorKind::IoError,
3893 "No result from block device read",
3894 ));
3895 };
3896
3897 let bgd_offset =
3898 (group * mem::size_of::<Ext2BlockGroupDescriptor>() as u32) % self.block_size;
3899 bgd.write_to_bytes(&mut bgd_data[bgd_offset as usize..]);
3900
3901 let write_request = Box::new(crate::device::block::request::BlockIORequest {
3902 request_type: crate::device::block::request::BlockIORequestType::Write,
3903 sector: bgd_block_sector as usize,
3904 sector_count: (self.block_size / 512) as usize,
3905 head: 0,
3906 cylinder: 0,
3907 buffer: bgd_data,
3908 });
3909
3910 self.block_device.enqueue_request(write_request);
3911 let write_results = self.block_device.process_requests();
3912
3913 if let Some(write_result) = write_results.first() {
3914 match &write_result.result {
3915 Ok(_) => Ok(()),
3916 Err(_) => Err(FileSystemError::new(
3917 FileSystemErrorKind::IoError,
3918 "Failed to write updated block group descriptor",
3919 )),
3920 }
3921 } else {
3922 Err(FileSystemError::new(
3923 FileSystemErrorKind::IoError,
3924 "No response from BGD write",
3925 ))
3926 }
3927 }
3928
3929 fn update_superblock_counts(
3931 &self,
3932 block_delta: i32,
3933 inode_delta: i32,
3934 _dir_delta: i32,
3935 ) -> Result<(), FileSystemError> {
3936 let request = Box::new(crate::device::block::request::BlockIORequest {
3938 request_type: crate::device::block::request::BlockIORequestType::Read,
3939 sector: 2,
3940 sector_count: 2,
3941 head: 0,
3942 cylinder: 0,
3943 buffer: vec![0u8; 1024],
3944 });
3945
3946 self.block_device.enqueue_request(request);
3947 let results = self.block_device.process_requests();
3948
3949 let mut superblock_data = if let Some(result) = results.first() {
3950 match &result.result {
3951 Ok(_) => result.request.buffer.clone(),
3952 Err(_) => {
3953 return Err(FileSystemError::new(
3954 FileSystemErrorKind::IoError,
3955 "Failed to read superblock",
3956 ));
3957 }
3958 }
3959 } else {
3960 return Err(FileSystemError::new(
3961 FileSystemErrorKind::IoError,
3962 "No result from superblock read",
3963 ));
3964 };
3965
3966 if block_delta != 0 {
3968 let current = u32::from_le_bytes([
3969 superblock_data[12],
3970 superblock_data[13],
3971 superblock_data[14],
3972 superblock_data[15],
3973 ]);
3974 let new_count = if block_delta < 0 {
3975 current.saturating_sub((-block_delta) as u32)
3976 } else {
3977 current.saturating_add(block_delta as u32)
3978 };
3979 let bytes = new_count.to_le_bytes();
3980 superblock_data[12..16].copy_from_slice(&bytes);
3981
3982 }
3986
3987 if inode_delta != 0 {
3988 let current = u32::from_le_bytes([
3989 superblock_data[16],
3990 superblock_data[17],
3991 superblock_data[18],
3992 superblock_data[19],
3993 ]);
3994 let new_count = if inode_delta < 0 {
3995 current.saturating_sub((-inode_delta) as u32)
3996 } else {
3997 current.saturating_add(inode_delta as u32)
3998 };
3999 let bytes = new_count.to_le_bytes();
4000 superblock_data[16..20].copy_from_slice(&bytes);
4001
4002 }
4006
4007 let write_request = Box::new(crate::device::block::request::BlockIORequest {
4009 request_type: crate::device::block::request::BlockIORequestType::Write,
4010 sector: 2,
4011 sector_count: 2,
4012 head: 0,
4013 cylinder: 0,
4014 buffer: superblock_data,
4015 });
4016
4017 self.block_device.enqueue_request(write_request);
4018 let write_results = self.block_device.process_requests();
4019
4020 if let Some(write_result) = write_results.first() {
4021 match &write_result.result {
4022 Ok(_) => {
4023 Ok(())
4026 }
4027 Err(_) => Err(FileSystemError::new(
4028 FileSystemErrorKind::IoError,
4029 "Failed to write updated superblock",
4030 )),
4031 }
4032 } else {
4033 Err(FileSystemError::new(
4034 FileSystemErrorKind::IoError,
4035 "No response from superblock write",
4036 ))
4037 }
4038 }
4039
4040 fn update_superblock_free_counts(
4042 &self,
4043 block_delta: i32,
4044 inode_delta: i32,
4045 ) -> Result<(), FileSystemError> {
4046 self.update_superblock_counts(block_delta, inode_delta, 0)
4047 }
4048
4049 fn read_blocks_cached(&self, block_nums: &[u64]) -> Result<Vec<Vec<u8>>, FileSystemError> {
4052 profile_scope!("ext2::read_blocks_cached");
4053
4054 if block_nums.len() == 1 {
4056 let block_num = block_nums[0];
4057 let mut cache = self.block_cache.lock();
4058 if let Some(data) = cache.get(block_num) {
4059 return Ok(vec![data]);
4060 }
4061 drop(cache);
4062 }
4063
4064 let mut results = Vec::with_capacity(block_nums.len());
4066 let mut missing_blocks = Vec::new();
4067 let mut cache = self.block_cache.lock();
4068
4069 for &block_num in block_nums {
4071 if let Some(data) = cache.get(block_num) {
4072 results.push((block_num, data));
4073 } else {
4074 missing_blocks.push(block_num);
4075 results.push((block_num, Vec::new())); }
4077 }
4078
4079 drop(cache);
4081
4082 if missing_blocks.is_empty() {
4084 return Ok(results.into_iter().map(|(_, data)| data).collect());
4085 }
4086
4087 if !missing_blocks.is_empty() {
4088 missing_blocks.sort();
4090
4091 let mut request_ranges = Vec::new();
4093
4094 let mut i = 0;
4095 while i < missing_blocks.len() {
4096 let start_block = missing_blocks[i];
4097 let mut count = 1;
4098
4099 while i + count < missing_blocks.len()
4101 && missing_blocks[i + count] == start_block + count as u64
4102 {
4103 count += 1;
4104 }
4105
4106 let start_sector = self.block_to_sector(start_block);
4107 let num_sectors = count * self.sectors_per_block() as usize;
4108 let buffer_size = count * self.block_size as usize;
4109
4110 let request = Box::new(crate::device::block::request::BlockIORequest {
4111 request_type: crate::device::block::request::BlockIORequestType::Read,
4112 sector: start_sector,
4113 sector_count: num_sectors,
4114 head: 0,
4115 cylinder: 0,
4116 buffer: vec![0u8; buffer_size],
4117 });
4118
4119 request_ranges.push((start_block, count));
4121
4122 self.block_device.enqueue_request(request);
4124 i += count; }
4126
4127 let read_results = self.block_device.process_requests();
4129
4130 if read_results.len() != request_ranges.len() {
4132 return Err(FileSystemError::new(
4133 FileSystemErrorKind::DeviceError,
4134 "Mismatch between requested and received block count",
4135 ));
4136 }
4137
4138 let mut cache = self.block_cache.lock();
4140 let mut missing_data = HashMap::new();
4141
4142 for (result_idx, result) in read_results.iter().enumerate() {
4143 if result.result.is_err() {
4144 return Err(FileSystemError::new(
4145 FileSystemErrorKind::DeviceError,
4146 "Failed to read blocks from device",
4147 ));
4148 }
4149
4150 let (start_block, count) = request_ranges[result_idx];
4151 let data = &result.request.buffer;
4152
4153 let expected_size = count * self.block_size as usize;
4155 if data.len() != expected_size {
4156 let mut corrected_data = data.clone();
4158 if data.len() > expected_size {
4159 corrected_data.truncate(expected_size);
4160 } else {
4161 corrected_data.resize(expected_size, 0);
4162 }
4163
4164 for j in 0..count {
4166 let current_block = start_block + j as u64;
4167 let offset = j * self.block_size as usize;
4168 let end_offset = offset + self.block_size as usize;
4169
4170 if end_offset <= corrected_data.len() {
4171 let block_data = corrected_data[offset..end_offset].to_vec();
4172 missing_data.insert(current_block, block_data.clone());
4173 cache.insert(current_block, block_data);
4174 } else {
4175 return Err(FileSystemError::new(
4176 FileSystemErrorKind::DeviceError,
4177 "Buffer corruption detected",
4178 ));
4179 }
4180 }
4181 continue; }
4183
4184 for j in 0..count {
4185 let current_block = start_block + j as u64;
4186 let offset = j * self.block_size as usize;
4187 let end_offset = offset + self.block_size as usize;
4188
4189 let block_data = data[offset..end_offset].to_vec();
4190 missing_data.insert(current_block, block_data.clone());
4191 cache.insert(current_block, block_data);
4192 }
4193 }
4194
4195 for i in 0..results.len() {
4197 let (block_num, ref data) = results[i];
4198 if data.is_empty() {
4199 if let Some(fetched_data) = missing_data.get(&block_num) {
4201 results[i] = (block_num, fetched_data.clone());
4202 }
4203 }
4204 }
4205 }
4206
4207 let result: Vec<Vec<u8>> = results.into_iter().map(|(_, data)| data).collect();
4209
4210 #[cfg(test)]
4211 unsafe {
4212 static mut CALL_COUNT: u64 = 0;
4213 CALL_COUNT += 1;
4214 if CALL_COUNT % 100 == 0 {
4216 let cache = self.block_cache.lock();
4217 cache.print_stats("Block");
4218 }
4219 }
4220
4221 Ok(result)
4222 }
4223
4224 fn write_blocks_cached(&self, blocks: &BTreeMap<u64, Vec<u8>>) -> Result<(), FileSystemError> {
4226 profile_scope!("ext2::write_blocks_cached");
4227
4228 if blocks.is_empty() {
4229 return Ok(());
4230 }
4231
4232 #[cfg(test)]
4233 crate::early_println!(
4234 "[ext2] write_blocks_cached: {} blocks to write",
4235 blocks.len()
4236 );
4237
4238 for (block_num, _) in blocks.iter() {
4240 if *block_num > (1u64 << 32) {
4241 crate::early_println!(
4243 "[ext2] ERROR: Invalid block number detected: {} (0x{:x})",
4244 block_num,
4245 block_num
4246 );
4247 panic!("Invalid block number: {} (0x{:x})", block_num, block_num);
4248 }
4249 }
4250
4251 let mut sorted_blocks: Vec<_> = blocks.iter().collect();
4252 sorted_blocks.sort_by_key(|(k, _)| *k);
4253
4254 let mut request_ranges = Vec::new();
4256
4257 let mut i = 0;
4258 while i < sorted_blocks.len() {
4259 let start_block = *sorted_blocks[i].0;
4260 let mut count = 1;
4261 let mut data_to_write = sorted_blocks[i].1.clone();
4262
4263 while i + count < sorted_blocks.len()
4265 && *sorted_blocks[i + count].0 == start_block + count as u64
4266 {
4267 data_to_write.extend_from_slice(sorted_blocks[i + count].1);
4268 count += 1;
4269 }
4270
4271 let start_sector = self.block_to_sector(start_block);
4272 let num_sectors = count * self.sectors_per_block() as usize;
4273
4274 let request = Box::new(crate::device::block::request::BlockIORequest {
4275 request_type: crate::device::block::request::BlockIORequestType::Write,
4276 sector: start_sector,
4277 sector_count: num_sectors,
4278 head: 0,
4279 cylinder: 0,
4280 buffer: data_to_write,
4281 });
4282
4283 request_ranges.push((start_block, count));
4285
4286 self.block_device.enqueue_request(request);
4288 i += count; }
4290
4291 #[cfg(test)]
4293 crate::early_println!(
4294 "[ext2] write_blocks_cached: Processing {} requests in batch",
4295 request_ranges.len()
4296 );
4297 let write_results = self.block_device.process_requests();
4298
4299 if write_results.len() != request_ranges.len() {
4301 return Err(FileSystemError::new(
4302 FileSystemErrorKind::DeviceError,
4303 "Mismatch between requested and received write count",
4304 ));
4305 }
4306
4307 let mut cache = self.block_cache.lock();
4309 for (result_idx, result) in write_results.iter().enumerate() {
4310 if result.result.is_err() {
4311 return Err(FileSystemError::new(
4312 FileSystemErrorKind::DeviceError,
4313 "Failed to write blocks to device",
4314 ));
4315 }
4316
4317 let (start_block, count) = request_ranges[result_idx];
4318
4319 for j in 0..count {
4322 let current_block = start_block + j as u64;
4323 cache.remove(current_block);
4324 }
4325 }
4326
4327 Ok(())
4328 }
4329
4330 fn sectors_per_block(&self) -> u64 {
4332 (self.block_size as u64) / 512
4333 }
4334
4335 fn block_to_sector(&self, block_num: u64) -> usize {
4337 if block_num > (1u64 << 32) {
4339 crate::early_println!(
4340 "[ext2] ERROR: block_to_sector called with invalid block_num: {} (0x{:x})",
4341 block_num,
4342 block_num
4343 );
4344 panic!(
4345 "block_to_sector: invalid block_num: {} (0x{:x})",
4346 block_num, block_num
4347 );
4348 }
4349
4350 if block_num > (1u64 << 30) {
4352 #[cfg(test)]
4353 crate::early_println!(
4354 "[ext2] WARNING: block_to_sector called with very large block_num: {}",
4355 block_num
4356 );
4357 }
4358
4359 (block_num * self.sectors_per_block()) as usize
4360 }
4361
4362 fn read_block_cached(&self, block_num: u64) -> Result<Vec<u8>, FileSystemError> {
4364 let blocks = self.read_blocks_cached(&[block_num])?;
4366 if let Some(block_data) = blocks.into_iter().next() {
4367 Ok(block_data)
4368 } else {
4369 Err(FileSystemError::new(
4370 FileSystemErrorKind::DeviceError,
4371 "Failed to read single block",
4372 ))
4373 }
4374 }
4375
4376 fn write_block_cached(&self, block_num: u64, data: &[u8]) -> Result<(), FileSystemError> {
4378 self.write_blocks_cached(&BTreeMap::from([(block_num, data.to_vec())]))
4381 }
4382
4383 pub fn print_cache_stats(&self) {
4385 let inode_cache = self.inode_cache.lock();
4386 let block_cache = self.block_cache.lock();
4387
4388 inode_cache.print_stats("Inode");
4389 block_cache.print_stats("Block");
4390 }
4391}
4392
4393impl FileSystemOperations for Ext2FileSystem {
4394 fn fs_id(&self) -> FileSystemId {
4395 self.fs_id
4396 }
4397
4398 fn lookup(
4399 &self,
4400 parent: &Arc<dyn VfsNode>,
4401 name: &String,
4402 ) -> Result<Arc<dyn VfsNode>, FileSystemError> {
4403 let ext2_parent = parent.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4405 FileSystemError::new(
4406 FileSystemErrorKind::InvalidOperation,
4407 "Parent node is not an Ext2Node",
4408 )
4409 })?;
4410
4411 let parent_inode = self.read_inode(ext2_parent.inode_number())?;
4413
4414 if parent_inode.mode & EXT2_S_IFMT != EXT2_S_IFDIR {
4416 return Err(FileSystemError::new(
4417 FileSystemErrorKind::NotADirectory,
4418 "Parent is not a directory",
4419 ));
4420 }
4421
4422 let entries = self.read_directory_entries(&parent_inode)?;
4424
4425 for entry in entries {
4427 let entry_name = entry.name_str()?;
4428 if entry_name == *name {
4429 let child_inode = self.read_inode(entry.entry.inode)?;
4431
4432 let file_type = self.file_type_from_inode(&child_inode, entry.entry.inode)?;
4434
4435 let file_id = entry.entry.inode as u64;
4437
4438 let node = Ext2Node::new(entry.entry.inode, file_type, file_id);
4440
4441 if let Some(fs_ref) = ext2_parent.filesystem() {
4443 node.set_filesystem(fs_ref);
4444 }
4445
4446 return Ok(Arc::new(node));
4447 }
4448 }
4449
4450 Err(FileSystemError::new(
4451 FileSystemErrorKind::NotFound,
4452 "File not found",
4453 ))
4454 }
4455
4456 fn readdir(
4457 &self,
4458 node: &Arc<dyn VfsNode>,
4459 ) -> Result<Vec<DirectoryEntryInternal>, FileSystemError> {
4460 let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4462 FileSystemError::new(
4463 FileSystemErrorKind::InvalidOperation,
4464 "Node is not an Ext2Node",
4465 )
4466 })?;
4467
4468 let inode = self.read_inode(ext2_node.inode_number())?;
4470
4471 if inode.mode & EXT2_S_IFMT != EXT2_S_IFDIR {
4473 return Err(FileSystemError::new(
4474 FileSystemErrorKind::NotADirectory,
4475 "Node is not a directory",
4476 ));
4477 }
4478
4479 let entries = self.read_directory_entries(&inode)?;
4481
4482 let mut result = Vec::new();
4484 for entry in entries {
4485 let name = entry.name_str()?;
4486 let child_inode = self.read_inode(entry.entry.inode)?;
4487
4488 let file_type = self.file_type_from_inode(&child_inode, entry.entry.inode)?;
4490
4491 result.push(DirectoryEntryInternal {
4492 name,
4493 file_type,
4494 file_id: entry.entry.inode as u64,
4495 });
4496 }
4497
4498 Ok(result)
4499 }
4500
4501 fn open(
4502 &self,
4503 node: &Arc<dyn VfsNode>,
4504 _flags: u32,
4505 ) -> Result<Arc<dyn FileObject>, FileSystemError> {
4506 #[cfg(test)]
4507 crate::early_println!("[ext2] open: Starting open operation");
4508
4509 let file_type = node.file_type()?;
4510
4511 #[cfg(test)]
4512 crate::early_println!("[ext2] open: File type = {:?}", file_type);
4513
4514 match file_type {
4515 FileType::RegularFile => {
4516 #[cfg(test)]
4517 crate::early_println!("[ext2] open: Opening regular file");
4518
4519 let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4520 FileSystemError::new(
4521 FileSystemErrorKind::InvalidOperation,
4522 "Node is not an Ext2Node",
4523 )
4524 })?;
4525 let file_obj = Arc::new(Ext2FileObject::new(
4526 ext2_node.inode_number(),
4527 ext2_node.id(),
4528 ));
4529
4530 if let Some(fs_weak) = ext2_node.filesystem() {
4532 file_obj.set_filesystem(fs_weak);
4533 }
4534
4535 Ok(file_obj)
4536 }
4537 FileType::Directory => {
4538 #[cfg(test)]
4539 crate::early_println!("[ext2] open: Opening directory");
4540
4541 let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4542 FileSystemError::new(
4543 FileSystemErrorKind::InvalidOperation,
4544 "Node is not an Ext2Node",
4545 )
4546 })?;
4547 let dir_obj = Arc::new(Ext2DirectoryObject::new(
4548 ext2_node.inode_number(),
4549 ext2_node.id(),
4550 ));
4551
4552 if let Some(fs_weak) = ext2_node.filesystem() {
4554 dir_obj.set_filesystem(fs_weak);
4555 }
4556
4557 Ok(dir_obj)
4558 }
4559 FileType::CharDevice(device_info) => {
4560 #[cfg(test)]
4561 crate::early_println!(
4562 "[ext2] Opening character device file: device_id={}",
4563 device_info.device_id
4564 );
4565
4566 let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4567 FileSystemError::new(
4568 FileSystemErrorKind::InvalidOperation,
4569 "Node is not an Ext2Node",
4570 )
4571 })?;
4572 let char_device_obj =
4573 Arc::new(Ext2CharDeviceFileObject::new(device_info, ext2_node.id()));
4574
4575 if let Some(fs_weak) = ext2_node.filesystem() {
4577 char_device_obj.set_filesystem(fs_weak);
4578 }
4579
4580 #[cfg(test)]
4581 crate::early_println!("[ext2] Character device file object created successfully");
4582
4583 Ok(char_device_obj)
4584 }
4585 _ => {
4586 #[cfg(test)]
4587 crate::early_println!("[ext2] open: Unsupported file type: {:?}", file_type);
4588
4589 Err(FileSystemError::new(
4590 FileSystemErrorKind::NotSupported,
4591 "Unsupported file type for open operation",
4592 ))
4593 }
4594 }
4595 }
4596
4597 fn create(
4598 &self,
4599 parent: &Arc<dyn VfsNode>,
4600 name: &String,
4601 file_type: FileType,
4602 _mode: u32,
4603 ) -> Result<Arc<dyn VfsNode>, FileSystemError> {
4604 let ext2_parent = parent.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4605 FileSystemError::new(
4606 FileSystemErrorKind::NotSupported,
4607 "Invalid node type for ext2",
4608 )
4609 })?;
4610
4611 match ext2_parent.file_type() {
4613 Ok(FileType::Directory) => {}
4614 Ok(_) => {
4615 return Err(FileSystemError::new(
4616 FileSystemErrorKind::NotADirectory,
4617 "Parent is not a directory",
4618 ));
4619 }
4620 Err(e) => return Err(e),
4621 }
4622
4623 if self.check_entry_exists(ext2_parent.inode_number(), name)? {
4625 return Err(FileSystemError::new(
4626 FileSystemErrorKind::AlreadyExists,
4627 "File or directory already exists",
4628 ));
4629 }
4630
4631 let new_inode_number = self.allocate_inode()?;
4633 let file_id = new_inode_number as u64;
4634
4635 let mode = match &file_type {
4637 FileType::RegularFile => EXT2_S_IFREG | 0o644,
4638 FileType::Directory => EXT2_S_IFDIR | 0o755,
4639 FileType::SymbolicLink(_) => EXT2_S_IFLNK | 0o777,
4640 FileType::CharDevice(_) => EXT2_S_IFCHR | 0o666,
4641 FileType::BlockDevice(_) => EXT2_S_IFBLK | 0o666,
4642 FileType::Pipe => EXT2_S_IFIFO | 0o666,
4643 FileType::Socket(_) => EXT2_S_IFSOCK | 0o666,
4644 _ => {
4645 return Err(FileSystemError::new(
4646 FileSystemErrorKind::NotSupported,
4647 "Unsupported file type for ext2",
4648 ));
4649 }
4650 } as u16;
4651
4652 let initial_nlinks: u16 = if file_type == FileType::Directory {
4654 2
4655 } else {
4656 1
4657 }; let mut new_inode = Ext2Inode {
4659 mode: mode.to_le(),
4660 uid: 0_u16.to_le(),
4661 size: 0_u32.to_le(),
4662 atime: 0_u32.to_le(),
4663 ctime: 0_u32.to_le(),
4664 mtime: 0_u32.to_le(),
4665 dtime: 0_u32.to_le(),
4666 gid: 0_u16.to_le(),
4667 links_count: initial_nlinks.to_le(),
4668 blocks: 0_u32.to_le(),
4669 flags: 0_u32.to_le(),
4670 osd1: 0_u32.to_le(),
4671 block: [0_u32; 15],
4672 generation: 0_u32.to_le(),
4673 file_acl: 0_u32.to_le(),
4674 dir_acl: 0_u32.to_le(),
4675 faddr: 0_u32.to_le(),
4676 osd2: [0u8; 12],
4677 };
4678
4679 if let FileType::SymbolicLink(target_path) = &file_type {
4681 let target_bytes = target_path.as_bytes();
4682 new_inode.size = (target_bytes.len() as u32).to_le();
4683
4684 if target_bytes.len() <= 60 {
4685 new_inode.block = [0u32; 15];
4688 let mut block_as_bytes = [0u8; 60];
4691 block_as_bytes[..target_bytes.len()].copy_from_slice(target_bytes);
4692
4693 for (i, chunk) in block_as_bytes.chunks(4).enumerate() {
4695 if i >= 15 {
4696 break;
4697 }
4698 let mut val = [0u8; 4];
4699 val[..chunk.len()].copy_from_slice(chunk);
4700 new_inode.block[i] = u32::from_le_bytes(val);
4701 }
4702 } else {
4703 let block_number = self.allocate_block()? as u32;
4705 new_inode.block[0] = block_number.to_le();
4706 new_inode.blocks = (self.block_size / 512).to_le(); let mut block_data = vec![0u8; self.block_size as usize];
4710 block_data[..target_bytes.len()].copy_from_slice(target_bytes);
4711
4712 let write_request = Box::new(crate::device::block::request::BlockIORequest {
4713 request_type: crate::device::block::request::BlockIORequestType::Write,
4714 sector: self.block_to_sector(block_number as u64),
4715 sector_count: (self.block_size / 512) as usize,
4716 head: 0,
4717 cylinder: 0,
4718 buffer: block_data,
4719 });
4720
4721 self.block_device.enqueue_request(write_request);
4722 let write_results = self.block_device.process_requests();
4723
4724 if let Some(write_result) = write_results.first() {
4725 match &write_result.result {
4726 Ok(_) => {}
4727 Err(_) => {
4728 return Err(FileSystemError::new(
4729 FileSystemErrorKind::IoError,
4730 "Failed to write symlink target data",
4731 ));
4732 }
4733 }
4734 } else {
4735 return Err(FileSystemError::new(
4736 FileSystemErrorKind::IoError,
4737 "No response from symlink target write",
4738 ));
4739 }
4740 }
4741 }
4742
4743 if let FileType::CharDevice(device_info) | FileType::BlockDevice(device_info) = &file_type {
4745 let major = (device_info.device_id >> 8) & 0xFF;
4749 let minor = device_info.device_id & 0xFF;
4750 let device_id = (major << 8) | minor;
4751 new_inode.block[0] = (device_id as u32).to_le();
4752 new_inode.size = 0_u32.to_le(); }
4754
4755 self.write_inode(new_inode_number, &new_inode)?;
4757
4758 self.add_directory_entry(
4760 ext2_parent.inode_number(),
4761 name,
4762 new_inode_number,
4763 file_type.clone(),
4764 )?;
4765
4766 if matches!(file_type, FileType::Directory) {
4768 self.initialize_directory(new_inode_number, ext2_parent.inode_number())?;
4769
4770 let mut parent_inode = self.read_inode(ext2_parent.inode_number())?;
4772 parent_inode.links_count = (u16::from_le(parent_inode.links_count) + 1).to_le();
4773 self.write_inode(ext2_parent.inode_number(), &parent_inode)?;
4774
4775 let group = 0; let bgd_block = if self.block_size == 1024 { 2 } else { 1 };
4778 let bgd_block_sector = self.block_to_sector(bgd_block);
4779
4780 let request = Box::new(crate::device::block::request::BlockIORequest {
4781 request_type: crate::device::block::request::BlockIORequestType::Read,
4782 sector: bgd_block_sector,
4783 sector_count: (self.block_size / 512) as usize,
4784 head: 0,
4785 cylinder: 0,
4786 buffer: vec![0u8; self.block_size as usize],
4787 });
4788
4789 self.block_device.enqueue_request(request);
4790 let results = self.block_device.process_requests();
4791
4792 if let Some(result) = results.first() {
4793 if let Ok(_) = &result.result {
4794 let bgd_data = &result.request.buffer;
4795 let mut bgd = Ext2BlockGroupDescriptor::from_bytes(bgd_data)?;
4796 let current_dirs = u16::from_le(bgd.used_dirs_count);
4797 bgd.used_dirs_count = (current_dirs + 1).to_le();
4798 self.update_group_descriptor(group, &bgd)?;
4799 }
4800 }
4801 }
4802
4803 let new_node = match &file_type {
4805 FileType::RegularFile => Arc::new(Ext2Node::new(
4806 new_inode_number,
4807 FileType::RegularFile,
4808 file_id,
4809 )),
4810 FileType::Directory => Arc::new(Ext2Node::new(
4811 new_inode_number,
4812 FileType::Directory,
4813 file_id,
4814 )),
4815 FileType::SymbolicLink(_) => {
4816 Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4817 }
4818 FileType::CharDevice(_) => {
4819 Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4820 }
4821 FileType::BlockDevice(_) => {
4822 Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4823 }
4824 FileType::Pipe => Arc::new(Ext2Node::new(new_inode_number, FileType::Pipe, file_id)),
4825 FileType::Socket(_) => {
4826 Arc::new(Ext2Node::new(new_inode_number, file_type.clone(), file_id))
4827 }
4828 _ => {
4829 return Err(FileSystemError::new(
4830 FileSystemErrorKind::NotSupported,
4831 "Unsupported file type for ext2",
4832 ));
4833 }
4834 };
4835
4836 if let Some(fs_ref) = ext2_parent.filesystem() {
4838 new_node.set_filesystem(fs_ref);
4839 }
4840
4841 Ok(new_node)
4842 }
4843
4844 fn remove(&self, parent: &Arc<dyn VfsNode>, name: &String) -> Result<(), FileSystemError> {
4845 if name == "." || name == ".." {
4847 return Err(FileSystemError::new(
4848 FileSystemErrorKind::InvalidOperation,
4849 "Cannot delete '.' or '..' entries",
4850 ));
4851 }
4852
4853 let ext2_parent = parent.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4854 FileSystemError::new(
4855 FileSystemErrorKind::NotSupported,
4856 "Invalid node type for ext2",
4857 )
4858 })?;
4859
4860 match ext2_parent.file_type() {
4862 Ok(FileType::Directory) => {}
4863 Ok(_) => {
4864 return Err(FileSystemError::new(
4865 FileSystemErrorKind::NotADirectory,
4866 "Parent is not a directory",
4867 ));
4868 }
4869 Err(e) => return Err(e),
4870 }
4871
4872 let node = self.lookup(parent, name)?;
4874 let ext2_node = node.as_any().downcast_ref::<Ext2Node>().ok_or_else(|| {
4875 FileSystemError::new(
4876 FileSystemErrorKind::NotSupported,
4877 "Invalid node type for ext2",
4878 )
4879 })?;
4880
4881 let inode_number = ext2_node.inode_number();
4882
4883 let is_directory = match ext2_node.file_type() {
4885 Ok(FileType::Directory) => true,
4886 _ => false,
4887 };
4888
4889 self.remove_directory_entry(ext2_parent.inode_number(), name)?;
4891
4892 if is_directory {
4895 let mut parent_inode = self.read_inode(ext2_parent.inode_number())?;
4896 let current_links = u16::from_le(parent_inode.links_count);
4897 if current_links > 0 {
4898 parent_inode.links_count = (current_links - 1).to_le();
4899 self.write_inode(ext2_parent.inode_number(), &parent_inode)?;
4900 }
4901 }
4902
4903 self.free_inode(inode_number)?;
4905
4906 {
4908 use crate::fs::vfs_v2::cache::CacheId;
4909 use crate::mem::page_cache::PageCacheManager;
4910 let fs_id = self.fs_id().get();
4911 let cache_id = CacheId::new((fs_id << 32) | (inode_number as u64));
4912 PageCacheManager::global().invalidate(cache_id);
4913 }
4914
4915 Ok(())
4916 }
4917
4918 fn root_node(&self) -> Arc<dyn VfsNode> {
4919 self.root.read().clone()
4920 }
4921
4922 fn name(&self) -> &str {
4923 &self.name
4924 }
4925
4926 fn as_any(&self) -> &dyn Any {
4927 self
4928 }
4929}
4930
4931fn register_driver() {
4933 let manager = get_fs_driver_manager();
4934 manager.register_driver(Box::new(Ext2Driver));
4935}
4936
4937driver_initcall!(register_driver);