1use crate::environment::PAGE_SIZE;
44use crate::fs::{FileObject, SeekFrom};
45use crate::mem::page::{allocate_raw_pages, free_raw_pages};
46use crate::task::{ManagedPage, Task};
47use crate::vm::vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryPermission, VirtualMemoryRegion};
48use alloc::boxed::Box;
49use alloc::string::{String, ToString};
50use alloc::{format, vec};
51use core::sync::atomic::Ordering;
52
53use super::TaskType;
54
55const ELFMAG: [u8; 4] = [0x7F, b'E', b'L', b'F'];
57const ELFCLASS64: u8 = 2; const ELFDATA2LSB: u8 = 1; pub const ET_EXEC: u16 = 2; pub const ET_DYN: u16 = 3; const PT_LOAD: u32 = 1; const PT_INTERP: u32 = 3; #[derive(Debug, Clone, Copy)]
74pub enum LoadTarget {
75 MainProgram, Interpreter, SharedLib, }
79
80pub struct LoadStrategy {
85 pub choose_base_address: fn(target: LoadTarget, needs_relocation: bool) -> u64,
86 pub resolve_interpreter: fn(requested: Option<&str>) -> Option<String>,
87}
88
89impl Default for LoadStrategy {
90 fn default() -> Self {
91 Self {
92 choose_base_address: |target, needs_relocation| {
93 match (target, needs_relocation) {
94 (LoadTarget::MainProgram, false) => 0, (LoadTarget::MainProgram, true) => 0x10000, (LoadTarget::Interpreter, _) => 0x40000000, (LoadTarget::SharedLib, _) => 0x50000000, }
99 },
100 resolve_interpreter: |requested| requested.map(|s| s.to_string()),
101 }
102 }
103}
104
105#[derive(Debug, Clone)]
107pub enum ExecutionMode {
108 Static,
110 Dynamic { interpreter_path: String },
112}
113
114#[derive(Debug, Clone)]
116pub struct LoadElfResult {
117 pub mode: ExecutionMode,
119 pub entry_point: u64,
121 pub original_entry_point: Option<u64>,
123 pub base_address: Option<u64>,
125 pub interpreter_base: Option<u64>,
127 pub program_headers: ProgramHeadersInfo,
129}
130
131#[derive(Debug, Clone)]
133pub struct ProgramHeadersInfo {
134 pub phdr_addr: u64, pub phdr_size: u64, pub phdr_count: u64, }
138
139pub const AT_NULL: u64 = 0; pub const AT_IGNORE: u64 = 1; pub const AT_EXECFD: u64 = 2; pub const AT_PHDR: u64 = 3; pub const AT_PHENT: u64 = 4; pub const AT_PHNUM: u64 = 5; pub const AT_PAGESZ: u64 = 6; pub const AT_BASE: u64 = 7; pub const AT_FLAGS: u64 = 8; pub const AT_ENTRY: u64 = 9; pub const AT_NOTELF: u64 = 10; pub const AT_UID: u64 = 11; pub const AT_EUID: u64 = 12; pub const AT_GID: u64 = 13; pub const AT_EGID: u64 = 14; pub const AT_PLATFORM: u64 = 15; pub const AT_HWCAP: u64 = 16; pub const AT_CLKTCK: u64 = 17; pub const AT_RANDOM: u64 = 25; #[derive(Debug, Clone, Copy)]
163pub struct AuxVec {
164 pub a_type: u64,
165 pub a_val: u64,
166}
167
168impl AuxVec {
169 pub fn new(a_type: u64, a_val: u64) -> Self {
170 Self { a_type, a_val }
171 }
172}
173
174pub const PF_X: u32 = 1; pub const PF_W: u32 = 2; pub const PF_R: u32 = 4; const EI_MAG0: usize = 0;
181const EI_MAG1: usize = 1;
182const EI_MAG2: usize = 2;
183const EI_MAG3: usize = 3;
184const EI_CLASS: usize = 4;
185const EI_DATA: usize = 5;
186fn read_u16(buffer: &[u8], offset: usize, is_little_endian: bool) -> u16 {
190 let bytes = buffer[offset..offset + 2].try_into().unwrap();
191 if is_little_endian {
192 u16::from_le_bytes(bytes)
193 } else {
194 u16::from_be_bytes(bytes)
195 }
196}
197
198fn read_u32(buffer: &[u8], offset: usize, is_little_endian: bool) -> u32 {
199 let bytes = buffer[offset..offset + 4].try_into().unwrap();
200 if is_little_endian {
201 u32::from_le_bytes(bytes)
202 } else {
203 u32::from_be_bytes(bytes)
204 }
205}
206
207fn read_u64(buffer: &[u8], offset: usize, is_little_endian: bool) -> u64 {
208 let bytes = buffer[offset..offset + 8].try_into().unwrap();
209 if is_little_endian {
210 u64::from_le_bytes(bytes)
211 } else {
212 u64::from_be_bytes(bytes)
213 }
214}
215
216#[derive(Debug)]
217pub struct ElfHeader {
218 pub ei_class: u8, pub ei_data: u8, pub e_type: u16, pub e_machine: u16, pub e_version: u32, pub e_entry: u64, pub e_phoff: u64, pub e_shoff: u64, pub e_flags: u32, pub e_ehsize: u16, pub e_phentsize: u16, pub e_phnum: u16, pub e_shentsize: u16, pub e_shnum: u16, pub e_shstrndx: u16, }
234
235#[derive(Debug)]
236pub struct ProgramHeader {
237 pub p_type: u32, pub p_flags: u32, pub p_offset: u64, pub p_vaddr: u64, pub p_paddr: u64, pub p_filesz: u64, pub p_memsz: u64, pub p_align: u64, }
246
247#[derive(Debug)]
248pub enum ElfHeaderParseErrorKind {
249 InvalidMagicNumber,
250 UnsupportedClass,
251 InvalidData,
252 Other(String),
253}
254
255#[derive(Debug)]
256pub struct ElfHeaderParseError {
257 pub kind: ElfHeaderParseErrorKind,
258 pub message: String,
259}
260
261#[derive(Debug)]
262pub enum ProgramHeaderParseErrorKind {
263 InvalidSize,
264 Other(String),
265}
266
267#[derive(Debug)]
268pub struct ProgramHeaderParseError {
269 pub kind: ProgramHeaderParseErrorKind,
270 pub message: String,
271}
272
273#[derive(Debug)]
274pub struct ElfLoaderError {
275 pub message: String,
276}
277
278impl ElfHeader {
279 pub fn parse(buffer: &[u8]) -> Result<Self, ElfHeaderParseError> {
280 if buffer.len() < 64 {
281 return Err(ElfHeaderParseError {
282 kind: ElfHeaderParseErrorKind::InvalidData,
283 message: "ELF header too small".to_string(),
284 });
285 }
286
287 if buffer[EI_MAG0] != ELFMAG[0]
288 || buffer[EI_MAG1] != ELFMAG[1]
289 || buffer[EI_MAG2] != ELFMAG[2]
290 || buffer[EI_MAG3] != ELFMAG[3]
291 {
292 return Err(ElfHeaderParseError {
293 kind: ElfHeaderParseErrorKind::InvalidMagicNumber,
294 message: "Invalid ELF magic number".to_string(),
295 });
296 }
297
298 let ei_class = buffer[EI_CLASS];
299 if ei_class != ELFCLASS64 {
300 return Err(ElfHeaderParseError {
301 kind: ElfHeaderParseErrorKind::UnsupportedClass,
302 message: "Only 64-bit ELF is supported".to_string(),
303 });
304 }
305
306 let ei_data = buffer[EI_DATA];
308 let is_little_endian = ei_data == ELFDATA2LSB;
309 let e_type = read_u16(buffer, 16, is_little_endian);
310 let e_machine = read_u16(buffer, 18, is_little_endian);
311 let e_version = read_u32(buffer, 20, is_little_endian);
312 let e_entry = read_u64(buffer, 24, is_little_endian);
313 let e_phoff = read_u64(buffer, 32, is_little_endian);
314 let e_shoff = read_u64(buffer, 40, is_little_endian);
315 let e_flags = read_u32(buffer, 48, is_little_endian);
316 let e_ehsize = read_u16(buffer, 52, is_little_endian);
317 let e_phentsize = read_u16(buffer, 54, is_little_endian);
318 let e_phnum = read_u16(buffer, 56, is_little_endian);
319 let e_shentsize = read_u16(buffer, 58, is_little_endian);
320 let e_shnum = read_u16(buffer, 60, is_little_endian);
321 let e_shstrndx = read_u16(buffer, 62, is_little_endian);
322
323 Ok(Self {
324 ei_class,
325 ei_data,
326 e_type,
327 e_machine,
328 e_version,
329 e_entry,
330 e_phoff,
331 e_shoff,
332 e_flags,
333 e_ehsize,
334 e_phentsize,
335 e_phnum,
336 e_shentsize,
337 e_shnum,
338 e_shstrndx,
339 })
340 }
341}
342
343impl ProgramHeader {
344 pub fn parse(buffer: &[u8], is_little_endian: bool) -> Result<Self, ProgramHeaderParseError> {
345 if buffer.len() < 56 {
346 return Err(ProgramHeaderParseError {
347 kind: ProgramHeaderParseErrorKind::InvalidSize,
348 message: "Program header too small".to_string(),
349 });
350 }
351
352 let p_type = read_u32(buffer, 0, is_little_endian);
354 let p_flags = read_u32(buffer, 4, is_little_endian);
355 let p_offset = read_u64(buffer, 8, is_little_endian);
356 let p_vaddr = read_u64(buffer, 16, is_little_endian);
357 let p_paddr = read_u64(buffer, 24, is_little_endian);
358 let p_filesz = read_u64(buffer, 32, is_little_endian);
359 let p_memsz = read_u64(buffer, 40, is_little_endian);
360 let p_align = read_u64(buffer, 48, is_little_endian);
361
362 Ok(Self {
363 p_type,
364 p_flags,
365 p_offset,
366 p_vaddr,
367 p_paddr,
368 p_filesz,
369 p_memsz,
370 p_align,
371 })
372 }
373}
374
375fn read_program_header(
377 header: &ElfHeader,
378 file_obj: &dyn FileObject,
379 index: u16,
380) -> Result<ProgramHeader, ElfLoaderError> {
381 let offset = header.e_phoff + (index as u64) * (header.e_phentsize as u64);
382 file_obj
383 .seek(SeekFrom::Start(offset))
384 .map_err(|e| ElfLoaderError {
385 message: format!("Failed to seek to program header {}: {:?}", index, e),
386 })?;
387
388 let mut ph_buffer = vec![0u8; header.e_phentsize as usize];
389 file_obj.read(&mut ph_buffer).map_err(|e| ElfLoaderError {
390 message: format!("Failed to read program header {}: {:?}", index, e),
391 })?;
392
393 ProgramHeader::parse(&ph_buffer, header.ei_data == ELFDATA2LSB).map_err(|e| ElfLoaderError {
394 message: format!("Failed to parse program header {}: {:?}", index, e),
395 })
396}
397
398fn for_each_program_header<F>(
400 header: &ElfHeader,
401 file_obj: &dyn FileObject,
402 mut callback: F,
403) -> Result<(), ElfLoaderError>
404where
405 F: FnMut(u16, &ProgramHeader) -> Result<bool, ElfLoaderError>, {
407 for i in 0..header.e_phnum {
408 let ph = read_program_header(header, file_obj, i)?;
409 let should_continue = callback(i, &ph)?;
410 if !should_continue {
411 break;
412 }
413 }
414 Ok(())
415}
416
417#[derive(Debug)]
418pub struct LoadedSegment {
419 pub vaddr: u64, pub size: u64, pub flags: u32, }
423
424pub fn load_elf_into_task(file_obj: &dyn FileObject, task: &Task) -> Result<u64, ElfLoaderError> {
447 let result = analyze_and_load_elf(file_obj, task)?;
448 Ok(result.entry_point)
449}
450
451pub fn analyze_and_load_elf(
468 file_obj: &dyn FileObject,
469 task: &Task,
470) -> Result<LoadElfResult, ElfLoaderError> {
471 analyze_and_load_elf_with_strategy(file_obj, task, &LoadStrategy::default())
472}
473
474pub fn analyze_and_load_elf_with_strategy(
492 file_obj: &dyn FileObject,
493 task: &Task,
494 strategy: &LoadStrategy,
495) -> Result<LoadElfResult, ElfLoaderError> {
496 file_obj
498 .seek(SeekFrom::Start(0))
499 .map_err(|e| ElfLoaderError {
500 message: format!("Failed to seek to start of file: {:?}", e),
501 })?;
502
503 let mut header_buffer = vec![0u8; 64]; file_obj
506 .read(&mut header_buffer)
507 .map_err(|e| ElfLoaderError {
508 message: format!("Failed to read ELF header: {:?}", e),
509 })?;
510
511 let header = match ElfHeader::parse(&header_buffer) {
512 Ok(header) => header,
513 Err(e) => {
514 return Err(ElfLoaderError {
515 message: format!("Failed to parse ELF header: {:?}", e),
516 });
517 }
518 };
519
520 let interpreter_path = find_interpreter_path(&header, file_obj)?;
522
523 let needs_relocation = header.e_type == ET_DYN;
525
526 match interpreter_path {
527 Some(interp_path) => {
528 crate::println!(
530 "ELF requires dynamic linking with interpreter: {}",
531 interp_path
532 );
533
534 let actual_interpreter = (strategy.resolve_interpreter)(Some(&interp_path));
536
537 if let Some(final_interp_path) = actual_interpreter {
538 crate::println!("Using interpreter: {}", final_interp_path);
539 let base_address =
540 load_elf_segments_for_interpreter(&header, file_obj, task, strategy)?;
541 let (interpreter_entry, interpreter_base) =
542 load_interpreter(&final_interp_path, task, strategy)?;
543
544 let phdr_info = ProgramHeadersInfo {
546 phdr_addr: base_address + header.e_phoff,
547 phdr_size: header.e_phentsize as u64,
548 phdr_count: header.e_phnum as u64,
549 };
550
551 let original_entry = if needs_relocation {
555 base_address + header.e_entry
556 } else {
557 header.e_entry
558 };
559
560 Ok(LoadElfResult {
561 mode: ExecutionMode::Dynamic {
562 interpreter_path: final_interp_path,
563 },
564 entry_point: interpreter_entry,
565 original_entry_point: Some(original_entry),
566 base_address: Some(base_address),
567 interpreter_base: Some(interpreter_base),
568 program_headers: phdr_info,
569 })
570 } else {
571 return Err(ElfLoaderError {
573 message: "Dynamic linking not supported by current ABI".to_string(),
574 });
575 }
576 }
577 None => {
578 let base_address =
580 (strategy.choose_base_address)(LoadTarget::MainProgram, needs_relocation);
581 let entry_point = load_elf_into_task_static(&header, file_obj, task, strategy)?;
582
583 let phdr_info = if needs_relocation {
585 ProgramHeadersInfo {
587 phdr_addr: base_address + header.e_phoff,
588 phdr_size: header.e_phentsize as u64,
589 phdr_count: header.e_phnum as u64,
590 }
591 } else {
592 let phdr_mem_addr = load_program_headers_into_memory(&header, file_obj, task)?;
594 ProgramHeadersInfo {
595 phdr_addr: phdr_mem_addr,
596 phdr_size: header.e_phentsize as u64,
597 phdr_count: header.e_phnum as u64,
598 }
599 };
600
601 Ok(LoadElfResult {
602 mode: ExecutionMode::Static,
603 entry_point,
604 original_entry_point: None, base_address: if needs_relocation {
606 Some(base_address)
607 } else {
608 None
609 },
610 interpreter_base: None, program_headers: phdr_info,
612 })
613 }
614 }
615}
616
617fn find_interpreter_path(
619 header: &ElfHeader,
620 file_obj: &dyn FileObject,
621) -> Result<Option<String>, ElfLoaderError> {
622 let mut result = None;
623
624 for_each_program_header(header, file_obj, |_i, ph| {
625 if ph.p_type == PT_INTERP {
626 file_obj
628 .seek(SeekFrom::Start(ph.p_offset))
629 .map_err(|e| ElfLoaderError {
630 message: format!("Failed to seek to interpreter path: {:?}", e),
631 })?;
632
633 let mut interp_buffer = vec![0u8; ph.p_filesz as usize];
634 file_obj
635 .read(&mut interp_buffer)
636 .map_err(|e| ElfLoaderError {
637 message: format!("Failed to read interpreter path: {:?}", e),
638 })?;
639
640 if let Some(null_pos) = interp_buffer.iter().position(|&x| x == 0) {
642 interp_buffer.truncate(null_pos);
643 }
644
645 let path = core::str::from_utf8(&interp_buffer)
646 .map_err(|_| ElfLoaderError {
647 message: "Invalid UTF-8 in interpreter path".to_string(),
648 })?
649 .to_string();
650
651 result = Some(path);
652 return Ok(false); }
654 Ok(true) })?;
656
657 Ok(result)
658}
659
660fn load_elf_segments_for_interpreter(
662 header: &ElfHeader,
663 file_obj: &dyn FileObject,
664 task: &Task,
665 strategy: &LoadStrategy,
666) -> Result<u64, ElfLoaderError> {
667 let needs_relocation = header.e_type == ET_DYN;
669 let base_address = (strategy.choose_base_address)(LoadTarget::MainProgram, needs_relocation);
672 let mut first_load_addr: Option<u64> = None;
676 let mut _load_segment_count = 0;
677
678 for_each_program_header(header, file_obj, |_i, ph| {
680 if ph.p_type == PT_LOAD {
681 let segment_addr = base_address + ph.p_vaddr;
682 if first_load_addr.is_none() {
685 first_load_addr = Some(segment_addr);
686 }
688 load_elf_segment_at_address(ph, file_obj, task, segment_addr)?;
689 _load_segment_count += 1;
690 }
691 Ok(true) })?;
693
694 let actual_base = first_load_addr.unwrap_or(base_address);
699
700 Ok(actual_base)
708}
709
710const MAX_INTERPRETER_DEPTH: usize = 5;
713
714fn load_interpreter(
715 interpreter_path: &str,
716 task: &Task,
717 strategy: &LoadStrategy,
718) -> Result<(u64, u64), ElfLoaderError> {
719 load_interpreter_recursive(interpreter_path, task, strategy, 0)
720}
721
722fn load_interpreter_recursive(
724 interpreter_path: &str,
725 task: &Task,
726 strategy: &LoadStrategy,
727 depth: usize,
728) -> Result<(u64, u64), ElfLoaderError> {
729 if depth >= MAX_INTERPRETER_DEPTH {
731 return Err(ElfLoaderError {
732 message: format!(
733 "Maximum interpreter recursion depth ({}) exceeded",
734 MAX_INTERPRETER_DEPTH
735 ),
736 });
737 }
738
739 crate::println!(
740 "Loading interpreter (depth {}): {}",
741 depth,
742 interpreter_path
743 );
744
745 let vfs = task.get_vfs().ok_or_else(|| ElfLoaderError {
747 message: "Task VFS not available for interpreter loading".to_string(),
748 })?;
749
750 let file_obj = vfs
751 .open(interpreter_path, 0)
752 .map_err(|fs_err| ElfLoaderError {
753 message: format!(
754 "Failed to open interpreter '{}': {:?}",
755 interpreter_path, fs_err
756 ),
757 })?;
758
759 let file_arc = match file_obj {
761 crate::object::KernelObject::File(file_ref) => file_ref,
762 _ => {
763 return Err(ElfLoaderError {
764 message: "Invalid kernel object type for interpreter file".to_string(),
765 });
766 }
767 };
768
769 let file_object: &dyn crate::fs::FileObject = file_arc.as_ref();
770
771 file_object
773 .seek(crate::fs::SeekFrom::Start(0))
774 .map_err(|e| ElfLoaderError {
775 message: format!("Failed to seek to start of interpreter file: {:?}", e),
776 })?;
777
778 let mut header_buffer = vec![0u8; 64];
780 let bytes_read = file_object
781 .read(&mut header_buffer)
782 .map_err(|e| ElfLoaderError {
783 message: format!("Failed to read interpreter ELF header: {:?}", e),
784 })?;
785
786 crate::println!(
787 "Read {} bytes for interpreter ELF header (expected 64)",
788 bytes_read
789 );
790
791 if bytes_read < 64 {
793 return Err(ElfLoaderError {
794 message: format!(
795 "Interpreter ELF header too small: read {} bytes, expected 64",
796 bytes_read
797 ),
798 });
799 }
800
801 let interp_header = ElfHeader::parse(&header_buffer).map_err(|e| ElfLoaderError {
802 message: format!("Failed to parse interpreter ELF header: {}", e.message),
803 })?;
804
805 let nested_interpreter_path = find_interpreter_path(&interp_header, file_object)?;
807 let (final_entry_point, final_base) = if let Some(nested_path) = nested_interpreter_path {
808 let resolved_nested_path =
809 (strategy.resolve_interpreter)(Some(&nested_path)).unwrap_or(nested_path);
810 crate::println!(
811 "Interpreter {} requests nested interpreter: {}",
812 interpreter_path,
813 resolved_nested_path
814 );
815
816 load_interpreter_recursive(&resolved_nested_path, task, strategy, depth + 1)?
818 } else {
819 let interp_needs_relocation = interp_header.e_type == ET_DYN;
821
822 let mut min_vaddr: u64 = u64::MAX;
824 let mut max_end: u64 = 0;
825 for_each_program_header(&interp_header, file_object, |_i, ph| {
826 if ph.p_type == PT_LOAD {
827 if ph.p_vaddr < min_vaddr {
828 min_vaddr = ph.p_vaddr;
829 }
830 let end = ph.p_vaddr.saturating_add(ph.p_memsz);
831 if end > max_end {
832 max_end = end;
833 }
834 }
835 Ok(true)
836 })?;
837
838 if min_vaddr == u64::MAX {
839 return Err(ElfLoaderError {
840 message: "Interpreter has no PT_LOAD segments".to_string(),
841 });
842 }
843
844 let span = max_end.saturating_sub(min_vaddr) as usize;
845 let align = crate::environment::PAGE_SIZE;
846 let span_aligned = (span + align - 1) & !(align - 1);
847
848 let _preferred =
850 (strategy.choose_base_address)(LoadTarget::Interpreter, interp_needs_relocation);
851 let start = task
852 .vm_manager
853 .find_unmapped_area(span_aligned, align)
854 .ok_or_else(|| ElfLoaderError {
855 message: "No unmapped area available for interpreter".to_string(),
856 })? as u64;
857
858 let interpreter_base_add = start.saturating_sub(min_vaddr);
860 crate::println!(
861 "Interpreter base address: {:#x} (mapped span: {:#x} bytes)",
862 interpreter_base_add,
863 span_aligned
864 );
865
866 load_elf_segments_with_base(&interp_header, file_object, task, interpreter_base_add)?;
868
869 let entry = if interp_needs_relocation {
871 interpreter_base_add + interp_header.e_entry as u64
872 } else {
873 interp_header.e_entry
874 };
875 (entry, interpreter_base_add)
876 };
877
878 crate::println!(
879 "Interpreter entry point (depth {}): {:#x}",
880 depth,
881 final_entry_point
882 );
883 Ok((final_entry_point, final_base))
884}
885
886fn load_elf_segments_with_base(
888 header: &ElfHeader,
889 file_obj: &dyn FileObject,
890 task: &Task,
891 base_address: u64,
892) -> Result<(), ElfLoaderError> {
893 for_each_program_header(header, file_obj, |_i, ph| {
895 if ph.p_type == PT_LOAD {
896 let segment_addr = base_address + ph.p_vaddr;
897 load_elf_segment_at_address(ph, file_obj, task, segment_addr)?;
898 }
899 Ok(true) })?;
901
902 Ok(())
903}
904
905fn load_elf_into_task_static(
907 header: &ElfHeader,
908 file_obj: &dyn FileObject,
909 task: &Task,
910 strategy: &LoadStrategy,
911) -> Result<u64, ElfLoaderError> {
912 let needs_relocation = header.e_type == ET_DYN;
914 let base_address = (strategy.choose_base_address)(LoadTarget::MainProgram, needs_relocation);
917 for_each_program_header(header, file_obj, |_i, ph| {
919 if ph.p_type == PT_LOAD {
921 let segment_addr = base_address + ph.p_vaddr;
923 let align = ph.p_align as usize;
924
925 let (mapping_addr, aligned_size, effective_align) = if align == 0 || align == 1 {
927 let page_offset = (segment_addr as usize) % PAGE_SIZE;
929 let mapping_start = (segment_addr as usize) - page_offset;
930 let mapping_size = (ph.p_memsz as usize) + page_offset;
931 let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
932 (mapping_start, aligned_size, PAGE_SIZE)
933 } else {
934 let effective_align = core::cmp::max(align, PAGE_SIZE);
936
937 let vaddr_offset = (segment_addr as usize) % align;
940 let file_offset = (ph.p_offset as usize) % align;
941
942 if vaddr_offset != file_offset {
944 let adjustment = if vaddr_offset >= file_offset {
946 vaddr_offset - file_offset
947 } else {
948 align - (file_offset - vaddr_offset)
949 };
950 let adjusted_segment_addr = (segment_addr as usize) - adjustment;
951 let aligned_addr = (adjusted_segment_addr) & !(effective_align - 1);
952 let offset = (segment_addr as usize) - aligned_addr;
953 let mapping_size = (ph.p_memsz as usize) + offset;
954 let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
955 (aligned_addr, aligned_size, effective_align)
956 } else {
957 let aligned_addr = (segment_addr as usize) & !(effective_align - 1);
959 let offset = (segment_addr as usize) - aligned_addr;
960 let mapping_size = (ph.p_memsz as usize) + offset;
961 let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
962 (aligned_addr, aligned_size, effective_align)
963 }
964 };
965
966 map_elf_segment(
968 task,
969 mapping_addr,
970 aligned_size,
971 effective_align,
972 ph.p_flags,
973 )
974 .map_err(|e| ElfLoaderError {
975 message: format!("Failed to map ELF segment at {:#x}: {:?}", mapping_addr, e),
976 })?;
977
978 let segment_type = if ph.p_flags & PF_X != 0 {
980 VirtualMemoryRegion::Text
981 } else if ph.p_flags & PF_W != 0 || ph.p_flags & PF_R != 0 {
982 VirtualMemoryRegion::Data
983 } else {
984 VirtualMemoryRegion::Unknown
985 };
986
987 match segment_type {
988 VirtualMemoryRegion::Text => {
989 task.text_size
990 .fetch_add(aligned_size as usize, Ordering::SeqCst);
991 }
992 VirtualMemoryRegion::Data => {
993 task.data_size
994 .fetch_add(aligned_size as usize, Ordering::SeqCst);
995 }
996 _ => {
997 return Err(ElfLoaderError {
998 message: format!("Unknown segment type: {:#x}", ph.p_flags),
999 });
1000 }
1001 }
1002
1003 let segment_end = mapping_addr + aligned_size;
1006 let current_brk_raw = task.brk.load(core::sync::atomic::Ordering::SeqCst);
1007 let current_brk = if current_brk_raw == usize::MAX {
1008 0
1009 } else {
1010 current_brk_raw
1011 };
1012 if segment_end > current_brk {
1013 task.brk
1014 .store(segment_end, core::sync::atomic::Ordering::SeqCst);
1015 }
1017
1018 if ph.p_filesz > 0 {
1020 let mut segment_data = vec![0u8; ph.p_filesz as usize];
1021
1022 file_obj
1024 .seek(SeekFrom::Start(ph.p_offset))
1025 .map_err(|e| ElfLoaderError {
1026 message: format!("Failed to seek to segment data: {:?}", e),
1027 })?;
1028
1029 file_obj
1031 .read(&mut segment_data)
1032 .map_err(|e| ElfLoaderError {
1033 message: format!("Failed to read segment data: {:?}", e),
1034 })?;
1035
1036 let data_offset = (segment_addr as usize) - mapping_addr;
1039 let target_vaddr = mapping_addr + data_offset;
1040
1041 match task.vm_manager.translate_vaddr(target_vaddr) {
1057 Some(paddr) => unsafe {
1058 core::ptr::copy_nonoverlapping(
1059 segment_data.as_ptr(),
1060 paddr as *mut u8,
1061 ph.p_filesz as usize,
1062 );
1063 },
1064 None => {
1065 return Err(ElfLoaderError {
1066 message: format!(
1067 "Failed to translate virtual address {:#x}",
1068 target_vaddr
1069 ),
1070 });
1071 }
1072 }
1073 }
1074 }
1075 Ok(true) })?;
1077
1078 let final_entry_point = if needs_relocation {
1080 base_address + header.e_entry
1081 } else {
1082 header.e_entry
1083 };
1084 Ok(final_entry_point)
1085}
1086
1087fn load_program_headers_into_memory(
1105 header: &ElfHeader,
1106 file_obj: &dyn FileObject,
1107 task: &Task,
1108) -> Result<u64, ElfLoaderError> {
1109 let phdr_table_size = (header.e_phentsize as u64) * (header.e_phnum as u64);
1111
1112 if phdr_table_size == 0 {
1113 return Err(ElfLoaderError {
1114 message: "No program headers to load".to_string(),
1115 });
1116 }
1117
1118 let phdr_vaddr = 0x70000000u64; let page_aligned_size = ((phdr_table_size as usize) + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
1125
1126 map_elf_segment(
1128 task,
1129 phdr_vaddr as usize,
1130 page_aligned_size,
1131 PAGE_SIZE,
1132 PF_R,
1133 )
1134 .map_err(|e| ElfLoaderError {
1135 message: format!("Failed to map memory for program headers: {}", e),
1136 })?;
1137
1138 file_obj
1140 .seek(SeekFrom::Start(header.e_phoff))
1141 .map_err(|e| ElfLoaderError {
1142 message: format!("Failed to seek to program headers: {:?}", e),
1143 })?;
1144
1145 let mut phdr_data = vec![0u8; phdr_table_size as usize];
1146 file_obj.read(&mut phdr_data).map_err(|e| ElfLoaderError {
1147 message: format!("Failed to read program headers: {:?}", e),
1148 })?;
1149
1150 match task.vm_manager.translate_vaddr(phdr_vaddr as usize) {
1152 Some(paddr) => unsafe {
1153 core::ptr::copy_nonoverlapping(
1154 phdr_data.as_ptr(),
1155 paddr as *mut u8,
1156 phdr_table_size as usize,
1157 );
1158 },
1159 None => {
1160 return Err(ElfLoaderError {
1161 message: format!(
1162 "Failed to translate program headers virtual address {:#x}",
1163 phdr_vaddr
1164 ),
1165 });
1166 }
1167 }
1168 Ok(phdr_vaddr)
1169}
1170
1171fn map_elf_segment(
1172 task: &Task,
1173 vaddr: usize,
1174 size: usize,
1175 align: usize,
1176 flags: u32,
1177) -> Result<(), &'static str> {
1178 if align == 0 {
1180 return Err("Alignment must be greater than zero");
1181 }
1182
1183 if !align.is_power_of_two() || align < PAGE_SIZE {
1185 return Err("Invalid alignment: must be power of 2 and at least PAGE_SIZE");
1186 }
1187
1188 if size == 0 || size % PAGE_SIZE != 0 {
1190 return Err("Invalid size: must be non-zero and page-aligned");
1191 }
1192
1193 if vaddr % PAGE_SIZE != 0 {
1195 return Err("Address is not aligned to PAGE_SIZE");
1196 }
1197
1198 let mut permissions = 0;
1200 if flags & PF_R != 0 {
1201 permissions |= VirtualMemoryPermission::Read as usize;
1202 }
1203 if flags & PF_W != 0 {
1204 permissions |= VirtualMemoryPermission::Write as usize;
1205 }
1206 if flags & PF_X != 0 {
1207 permissions |= VirtualMemoryPermission::Execute as usize;
1208 }
1209 if task.task_type == TaskType::User {
1210 permissions |= VirtualMemoryPermission::User as usize;
1211 }
1212
1213 let vmarea = MemoryArea {
1215 start: vaddr,
1216 end: vaddr + size - 1,
1217 };
1218
1219 if let Some(_existing) = task.vm_manager.search_memory_map(vaddr) {
1221 return Err("Memory area overlaps with existing mapping");
1224 }
1225
1226 let num_of_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
1228 let pages = allocate_raw_pages(num_of_pages);
1229 let ptr = pages as *mut u8;
1230 if ptr.is_null() {
1231 return Err("Failed to allocate memory");
1232 }
1233 let pmarea = MemoryArea {
1234 start: ptr as usize,
1235 end: (ptr as usize) + size - 1,
1236 };
1237
1238 let map = VirtualMemoryMap {
1240 vmarea,
1241 pmarea,
1242 permissions,
1243 is_shared: false, owner: None,
1245 };
1246
1247 if let Err(e) = task.vm_manager.add_memory_map(map) {
1249 free_raw_pages(pages, num_of_pages);
1250 return Err(e);
1251 }
1252
1253 for i in 0..num_of_pages {
1255 task.add_managed_page(ManagedPage {
1256 vaddr: vaddr + i * PAGE_SIZE,
1257 page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
1258 });
1259 }
1260
1261 Ok(())
1262}
1263
1264pub fn build_auxiliary_vector(load_result: &LoadElfResult) -> alloc::vec::Vec<AuxVec> {
1266 use crate::environment::PAGE_SIZE;
1267
1268 let mut auxv = alloc::vec::Vec::new();
1269
1270 auxv.push(AuxVec::new(AT_PHDR, load_result.program_headers.phdr_addr));
1272 auxv.push(AuxVec::new(AT_PHENT, load_result.program_headers.phdr_size));
1273 auxv.push(AuxVec::new(
1274 AT_PHNUM,
1275 load_result.program_headers.phdr_count,
1276 ));
1277
1278 auxv.push(AuxVec::new(AT_PAGESZ, PAGE_SIZE as u64));
1280
1281 match &load_result.mode {
1284 ExecutionMode::Dynamic { .. } => {
1285 if let Some(orig_entry) = load_result.original_entry_point {
1287 auxv.push(AuxVec::new(AT_ENTRY, orig_entry));
1288 }
1289 }
1290 ExecutionMode::Static => {
1291 auxv.push(AuxVec::new(AT_ENTRY, load_result.entry_point));
1293 }
1294 }
1295
1296 if let Some(interp_base) = load_result.interpreter_base {
1298 auxv.push(AuxVec::new(AT_BASE, interp_base));
1299 }
1300
1301 auxv.push(AuxVec::new(AT_UID, 0)); auxv.push(AuxVec::new(AT_EUID, 0)); auxv.push(AuxVec::new(AT_GID, 0)); auxv.push(AuxVec::new(AT_EGID, 0)); auxv.push(AuxVec::new(AT_NULL, 0));
1316
1317 auxv
1318}
1319
1320pub fn setup_auxiliary_vector_on_stack(
1325 task: &Task,
1326 auxv: &[AuxVec],
1327) -> Result<usize, ElfLoaderError> {
1328 let auxv_size = auxv.len() * core::mem::size_of::<AuxVec>();
1331
1332 let stack_top = crate::environment::USER_STACK_END;
1334 let auxv_start = stack_top - auxv_size;
1335
1336 for (i, entry) in auxv.iter().enumerate() {
1338 let offset = i * core::mem::size_of::<AuxVec>();
1339 let vaddr = auxv_start + offset;
1340
1341 match task.vm_manager.translate_vaddr(vaddr) {
1343 Some(paddr) => unsafe {
1344 let ptr = paddr as *mut AuxVec;
1345 ptr.write(*entry);
1346 },
1347 None => {
1348 return Err(ElfLoaderError {
1349 message: format!("Failed to translate auxiliary vector address {:#x}", vaddr),
1350 });
1351 }
1352 }
1353 }
1354
1355 crate::println!(
1356 "Setup auxiliary vector at {:#x} (size: {} entries)",
1357 auxv_start,
1358 auxv.len()
1359 );
1360 Ok(auxv_start)
1361}
1362
1363#[cfg(test)]
1364mod tests;
1365
1366fn load_elf_segment_at_address(
1368 ph: &ProgramHeader,
1369 file_obj: &dyn FileObject,
1370 task: &Task,
1371 segment_addr: u64,
1372) -> Result<(), ElfLoaderError> {
1373 let align = if ph.p_align == 0 || ph.p_align == 1 {
1374 PAGE_SIZE
1375 } else {
1376 core::cmp::max(ph.p_align as usize, PAGE_SIZE)
1377 };
1378
1379 let page_offset = (segment_addr as usize) % PAGE_SIZE;
1381 let mapping_start = (segment_addr as usize) - page_offset;
1382 let mapping_size = (ph.p_memsz as usize) + page_offset;
1383 let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
1384
1385 map_elf_segment(task, mapping_start, aligned_size, align, ph.p_flags).map_err(|e| {
1392 ElfLoaderError {
1393 message: format!("Failed to map ELF segment at {:#x}: {:?}", mapping_start, e),
1394 }
1395 })?;
1396
1397 if ph.p_filesz > 0 {
1399 let mut segment_data = vec![0u8; ph.p_filesz as usize];
1400 file_obj
1401 .seek(SeekFrom::Start(ph.p_offset))
1402 .map_err(|e| ElfLoaderError {
1403 message: format!("Failed to seek to segment data: {:?}", e),
1404 })?;
1405 file_obj
1406 .read(&mut segment_data)
1407 .map_err(|e| ElfLoaderError {
1408 message: format!("Failed to read segment data: {:?}", e),
1409 })?;
1410
1411 let data_offset = (segment_addr as usize) - mapping_start;
1413 let target_vaddr = mapping_start + data_offset;
1414
1415 match task.vm_manager.translate_vaddr(target_vaddr) {
1416 Some(paddr) => unsafe {
1417 core::ptr::copy_nonoverlapping(
1418 segment_data.as_ptr(),
1419 paddr as *mut u8,
1420 ph.p_filesz as usize,
1421 );
1422 },
1423 None => {
1424 return Err(ElfLoaderError {
1425 message: format!(
1426 "Failed to translate virtual address {:#x} for segment loading",
1427 target_vaddr
1428 ),
1429 });
1430 }
1431 }
1432 }
1433
1434 let segment_type = if ph.p_flags & PF_X != 0 {
1436 task.text_size.fetch_add(aligned_size, Ordering::SeqCst);
1437 "text"
1438 } else if ph.p_flags & PF_W != 0 || ph.p_flags & PF_R != 0 {
1439 task.data_size.fetch_add(aligned_size, Ordering::SeqCst);
1440 "data"
1441 } else {
1442 "unknown"
1443 };
1444
1445 crate::println!(
1446 "Loaded {} segment at {:#x} (size: {:#x})",
1447 segment_type,
1448 segment_addr,
1449 aligned_size
1450 );
1451 Ok(())
1452}