kernel/task/elf_loader/
mod.rs

1//! ELF Loading Module
2//!
3//! This module provides functionality for loading ELF (Executable and Linkable Format)
4//! executables into a task's memory space. It supports 64-bit ELF files with full dynamic
5//! linking capabilities and handles the parsing of ELF headers and program headers, as well
6//! as the mapping of loadable segments into memory.
7//!
8//! # Components
9//!
10//! - `ElfHeader`: Represents the ELF file header which contains metadata about the file
11//! - `ProgramHeader`: Represents a program header which describes a segment in the ELF file
12//! - `LoadedSegment`: Represents a segment after it has been loaded into memory
13//! - Dynamic linking support for shared libraries and position-independent executables
14//! - Error types for handling various failure scenarios during ELF parsing and loading
15//!
16//! # Main Functions
17//!
18//! - `load_elf_into_task`: Loads an ELF file from a file object into a task's memory space
19//! - `map_elf_segment`: Maps an ELF segment into a task's virtual memory
20//! - Dynamic linker integration for shared library resolution
21//!
22//! # Dynamic Linking Support
23//!
24//! The module now includes comprehensive dynamic linking capabilities:
25//! - Dynamic symbol resolution
26//! - Shared library loading and linking
27//! - Position-independent executable (PIE) support
28//! - Runtime relocation handling
29//!
30//! # Constants
31//!
32//! The module defines various constants for ELF parsing, including:
33//! - Magic numbers for identifying ELF files
34//! - ELF class identifiers (64-bit)
35//! - Data encoding formats (little/big endian)
36//! - Program header types and segment flags (Read/Write/Execute)
37//!
38//! # Endian Support
39//!
40//! The module provides endian-aware data reading functions to correctly parse ELF files
41//! regardless of the endianness used in the file.
42
43use crate::environment::PAGE_SIZE;
44use crate::fs::{FileObject, SeekFrom};
45use crate::mem::page::{allocate_raw_pages, free_raw_pages};
46use crate::task::{ManagedPage, Task};
47use crate::vm::vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryPermission, VirtualMemoryRegion};
48use alloc::boxed::Box;
49use alloc::string::{String, ToString};
50use alloc::{format, vec};
51use core::sync::atomic::Ordering;
52
53use super::TaskType;
54
55// ELF Magic Number
56const ELFMAG: [u8; 4] = [0x7F, b'E', b'L', b'F'];
57// ELF Class
58// const ELFCLASS32: u8 = 1; // 32-bit
59const ELFCLASS64: u8 = 2; // 64-bit
60// ELF Data Endian
61const ELFDATA2LSB: u8 = 1; // Little Endian
62// const ELFDATA2MSB: u8 = 2; // Big Endian
63
64// ELF File Type
65pub const ET_EXEC: u16 = 2; // Executable file
66pub const ET_DYN: u16 = 3; // Shared object file / Position Independent Executable
67
68// Program Header Type
69const PT_LOAD: u32 = 1; // Loadable segment
70const PT_INTERP: u32 = 3; // Interpreter path
71
72/// Target type for ELF loading (determines base address strategy)
73#[derive(Debug, Clone, Copy)]
74pub enum LoadTarget {
75    MainProgram, // Main executable being loaded
76    Interpreter, // Dynamic linker/interpreter
77    SharedLib,   // Shared library (future use)
78}
79
80/// Binary loading strategy (format-agnostic)
81///
82/// This structure allows ABI modules to customize how binaries are loaded
83/// without being tied to specific binary formats like ELF.
84pub struct LoadStrategy {
85    pub choose_base_address: fn(target: LoadTarget, needs_relocation: bool) -> u64,
86    pub resolve_interpreter: fn(requested: Option<&str>) -> Option<String>,
87}
88
89impl Default for LoadStrategy {
90    fn default() -> Self {
91        Self {
92            choose_base_address: |target, needs_relocation| {
93                match (target, needs_relocation) {
94                    (LoadTarget::MainProgram, false) => 0, // Absolute addresses
95                    (LoadTarget::MainProgram, true) => 0x10000, // PIE executable
96                    (LoadTarget::Interpreter, _) => 0x40000000, // Dynamic linker
97                    (LoadTarget::SharedLib, _) => 0x50000000, // Shared libraries
98                }
99            },
100            resolve_interpreter: |requested| requested.map(|s| s.to_string()),
101        }
102    }
103}
104
105/// Execution mode determined by ELF analysis
106#[derive(Debug, Clone)]
107pub enum ExecutionMode {
108    /// Static linking - direct execution
109    Static,
110    /// Dynamic linking - needs interpreter
111    Dynamic { interpreter_path: String },
112}
113
114/// Result of ELF loading analysis
115#[derive(Debug, Clone)]
116pub struct LoadElfResult {
117    /// Execution mode (static or dynamic)
118    pub mode: ExecutionMode,
119    /// Entry point (either main program or interpreter)
120    pub entry_point: u64,
121    /// Original program entry point (for AT_ENTRY in dynamic linking)
122    pub original_entry_point: Option<u64>,
123    /// Base address where main program was loaded (for auxiliary vector)
124    pub base_address: Option<u64>,
125    /// Base address where interpreter was loaded (for AT_BASE)
126    pub interpreter_base: Option<u64>,
127    /// Program headers info (for auxiliary vector)
128    pub program_headers: ProgramHeadersInfo,
129}
130
131/// Program headers information for auxiliary vector
132#[derive(Debug, Clone)]
133pub struct ProgramHeadersInfo {
134    pub phdr_addr: u64,  // Address of program headers in memory
135    pub phdr_size: u64,  // Size of program header entry
136    pub phdr_count: u64, // Number of program headers
137}
138
139// Auxiliary Vector (auxv) types for dynamic linking
140/// Auxiliary Vector entry type constants
141pub const AT_NULL: u64 = 0; // End of vector
142pub const AT_IGNORE: u64 = 1; // Entry should be ignored
143pub const AT_EXECFD: u64 = 2; // File descriptor of program
144pub const AT_PHDR: u64 = 3; // Program headers for program
145pub const AT_PHENT: u64 = 4; // Size of program header entry
146pub const AT_PHNUM: u64 = 5; // Number of program headers
147pub const AT_PAGESZ: u64 = 6; // System page size
148pub const AT_BASE: u64 = 7; // Base address of interpreter
149pub const AT_FLAGS: u64 = 8; // Flags
150pub const AT_ENTRY: u64 = 9; // Entry point of program
151pub const AT_NOTELF: u64 = 10; // Program is not ELF
152pub const AT_UID: u64 = 11; // Real uid
153pub const AT_EUID: u64 = 12; // Effective uid
154pub const AT_GID: u64 = 13; // Real gid
155pub const AT_EGID: u64 = 14; // Effective gid
156pub const AT_PLATFORM: u64 = 15; // String identifying platform
157pub const AT_HWCAP: u64 = 16; // Machine dependent hints about processor capabilities
158pub const AT_CLKTCK: u64 = 17; // Frequency of times()
159pub const AT_RANDOM: u64 = 25; // Address of 16 random bytes
160
161/// Auxiliary Vector entry
162#[derive(Debug, Clone, Copy)]
163pub struct AuxVec {
164    pub a_type: u64,
165    pub a_val: u64,
166}
167
168impl AuxVec {
169    pub fn new(a_type: u64, a_val: u64) -> Self {
170        Self { a_type, a_val }
171    }
172}
173
174// Segment Flags
175pub const PF_X: u32 = 1; // Executable
176pub const PF_W: u32 = 2; // Writable
177pub const PF_R: u32 = 4; // Readable
178
179// ELF Identifier Indices
180const EI_MAG0: usize = 0;
181const EI_MAG1: usize = 1;
182const EI_MAG2: usize = 2;
183const EI_MAG3: usize = 3;
184const EI_CLASS: usize = 4;
185const EI_DATA: usize = 5;
186// const EI_VERSION: usize = 6;
187
188// Endian-aware data reading functions
189fn read_u16(buffer: &[u8], offset: usize, is_little_endian: bool) -> u16 {
190    let bytes = buffer[offset..offset + 2].try_into().unwrap();
191    if is_little_endian {
192        u16::from_le_bytes(bytes)
193    } else {
194        u16::from_be_bytes(bytes)
195    }
196}
197
198fn read_u32(buffer: &[u8], offset: usize, is_little_endian: bool) -> u32 {
199    let bytes = buffer[offset..offset + 4].try_into().unwrap();
200    if is_little_endian {
201        u32::from_le_bytes(bytes)
202    } else {
203        u32::from_be_bytes(bytes)
204    }
205}
206
207fn read_u64(buffer: &[u8], offset: usize, is_little_endian: bool) -> u64 {
208    let bytes = buffer[offset..offset + 8].try_into().unwrap();
209    if is_little_endian {
210        u64::from_le_bytes(bytes)
211    } else {
212        u64::from_be_bytes(bytes)
213    }
214}
215
216#[derive(Debug)]
217pub struct ElfHeader {
218    pub ei_class: u8,     // 32-bit or 64-bit (EI_CLASS)
219    pub ei_data: u8,      // Endianness (EI_DATA)
220    pub e_type: u16,      // File type
221    pub e_machine: u16,   // Machine type
222    pub e_version: u32,   // ELF version
223    pub e_entry: u64,     // Entry point address
224    pub e_phoff: u64,     // Program header table file offset
225    pub e_shoff: u64,     // Section header table file offset
226    pub e_flags: u32,     // Processor-specific flags
227    pub e_ehsize: u16,    // ELF header size
228    pub e_phentsize: u16, // Program header table entry size
229    pub e_phnum: u16,     // Number of program header entries
230    pub e_shentsize: u16, // Section header table entry size
231    pub e_shnum: u16,     // Number of section header entries
232    pub e_shstrndx: u16,  // Section header string table index
233}
234
235#[derive(Debug)]
236pub struct ProgramHeader {
237    pub p_type: u32,   // Segment type
238    pub p_flags: u32,  // Segment flags
239    pub p_offset: u64, // Segment offset in file
240    pub p_vaddr: u64,  // Segment virtual address for loading
241    pub p_paddr: u64,  // Segment physical address (usually unused)
242    pub p_filesz: u64, // Segment size in file
243    pub p_memsz: u64,  // Segment size in memory
244    pub p_align: u64,  // Segment alignment
245}
246
247#[derive(Debug)]
248pub enum ElfHeaderParseErrorKind {
249    InvalidMagicNumber,
250    UnsupportedClass,
251    InvalidData,
252    Other(String),
253}
254
255#[derive(Debug)]
256pub struct ElfHeaderParseError {
257    pub kind: ElfHeaderParseErrorKind,
258    pub message: String,
259}
260
261#[derive(Debug)]
262pub enum ProgramHeaderParseErrorKind {
263    InvalidSize,
264    Other(String),
265}
266
267#[derive(Debug)]
268pub struct ProgramHeaderParseError {
269    pub kind: ProgramHeaderParseErrorKind,
270    pub message: String,
271}
272
273#[derive(Debug)]
274pub struct ElfLoaderError {
275    pub message: String,
276}
277
278impl ElfHeader {
279    pub fn parse(buffer: &[u8]) -> Result<Self, ElfHeaderParseError> {
280        if buffer.len() < 64 {
281            return Err(ElfHeaderParseError {
282                kind: ElfHeaderParseErrorKind::InvalidData,
283                message: "ELF header too small".to_string(),
284            });
285        }
286
287        if buffer[EI_MAG0] != ELFMAG[0]
288            || buffer[EI_MAG1] != ELFMAG[1]
289            || buffer[EI_MAG2] != ELFMAG[2]
290            || buffer[EI_MAG3] != ELFMAG[3]
291        {
292            return Err(ElfHeaderParseError {
293                kind: ElfHeaderParseErrorKind::InvalidMagicNumber,
294                message: "Invalid ELF magic number".to_string(),
295            });
296        }
297
298        let ei_class = buffer[EI_CLASS];
299        if ei_class != ELFCLASS64 {
300            return Err(ElfHeaderParseError {
301                kind: ElfHeaderParseErrorKind::UnsupportedClass,
302                message: "Only 64-bit ELF is supported".to_string(),
303            });
304        }
305
306        // Read each field considering endianness
307        let ei_data = buffer[EI_DATA];
308        let is_little_endian = ei_data == ELFDATA2LSB;
309        let e_type = read_u16(buffer, 16, is_little_endian);
310        let e_machine = read_u16(buffer, 18, is_little_endian);
311        let e_version = read_u32(buffer, 20, is_little_endian);
312        let e_entry = read_u64(buffer, 24, is_little_endian);
313        let e_phoff = read_u64(buffer, 32, is_little_endian);
314        let e_shoff = read_u64(buffer, 40, is_little_endian);
315        let e_flags = read_u32(buffer, 48, is_little_endian);
316        let e_ehsize = read_u16(buffer, 52, is_little_endian);
317        let e_phentsize = read_u16(buffer, 54, is_little_endian);
318        let e_phnum = read_u16(buffer, 56, is_little_endian);
319        let e_shentsize = read_u16(buffer, 58, is_little_endian);
320        let e_shnum = read_u16(buffer, 60, is_little_endian);
321        let e_shstrndx = read_u16(buffer, 62, is_little_endian);
322
323        Ok(Self {
324            ei_class,
325            ei_data,
326            e_type,
327            e_machine,
328            e_version,
329            e_entry,
330            e_phoff,
331            e_shoff,
332            e_flags,
333            e_ehsize,
334            e_phentsize,
335            e_phnum,
336            e_shentsize,
337            e_shnum,
338            e_shstrndx,
339        })
340    }
341}
342
343impl ProgramHeader {
344    pub fn parse(buffer: &[u8], is_little_endian: bool) -> Result<Self, ProgramHeaderParseError> {
345        if buffer.len() < 56 {
346            return Err(ProgramHeaderParseError {
347                kind: ProgramHeaderParseErrorKind::InvalidSize,
348                message: "Program header too small".to_string(),
349            });
350        }
351
352        // Read each field considering endianness
353        let p_type = read_u32(buffer, 0, is_little_endian);
354        let p_flags = read_u32(buffer, 4, is_little_endian);
355        let p_offset = read_u64(buffer, 8, is_little_endian);
356        let p_vaddr = read_u64(buffer, 16, is_little_endian);
357        let p_paddr = read_u64(buffer, 24, is_little_endian);
358        let p_filesz = read_u64(buffer, 32, is_little_endian);
359        let p_memsz = read_u64(buffer, 40, is_little_endian);
360        let p_align = read_u64(buffer, 48, is_little_endian);
361
362        Ok(Self {
363            p_type,
364            p_flags,
365            p_offset,
366            p_vaddr,
367            p_paddr,
368            p_filesz,
369            p_memsz,
370            p_align,
371        })
372    }
373}
374
375/// Read and parse a program header at the specified index
376fn read_program_header(
377    header: &ElfHeader,
378    file_obj: &dyn FileObject,
379    index: u16,
380) -> Result<ProgramHeader, ElfLoaderError> {
381    let offset = header.e_phoff + (index as u64) * (header.e_phentsize as u64);
382    file_obj
383        .seek(SeekFrom::Start(offset))
384        .map_err(|e| ElfLoaderError {
385            message: format!("Failed to seek to program header {}: {:?}", index, e),
386        })?;
387
388    let mut ph_buffer = vec![0u8; header.e_phentsize as usize];
389    file_obj.read(&mut ph_buffer).map_err(|e| ElfLoaderError {
390        message: format!("Failed to read program header {}: {:?}", index, e),
391    })?;
392
393    ProgramHeader::parse(&ph_buffer, header.ei_data == ELFDATA2LSB).map_err(|e| ElfLoaderError {
394        message: format!("Failed to parse program header {}: {:?}", index, e),
395    })
396}
397
398/// Iterate through all program headers and call a closure for each one
399fn for_each_program_header<F>(
400    header: &ElfHeader,
401    file_obj: &dyn FileObject,
402    mut callback: F,
403) -> Result<(), ElfLoaderError>
404where
405    F: FnMut(u16, &ProgramHeader) -> Result<bool, ElfLoaderError>, // Return false to break early
406{
407    for i in 0..header.e_phnum {
408        let ph = read_program_header(header, file_obj, i)?;
409        let should_continue = callback(i, &ph)?;
410        if !should_continue {
411            break;
412        }
413    }
414    Ok(())
415}
416
417#[derive(Debug)]
418pub struct LoadedSegment {
419    pub vaddr: u64, // Virtual address
420    pub size: u64,  // Size
421    pub flags: u32, // Flags (R/W/X)
422}
423
424/// Load an ELF file into a task's memory space
425///
426/// # Arguments
427///
428/// * `file`: A mutable reference to a file object containing the ELF file
429/// * `task`: A mutable reference to the task into which the ELF file will be loaded
430///
431/// # Returns
432///
433/// * `Result<u64, ElfLoaderError>`: The entry point address of the loaded ELF file on success,
434///  or an `ElfLoaderError` on failure
435///
436/// # Errors
437///
438/// * `ElfLoaderError`: If any error occurs during the loading process, such as file read errors,
439///  parsing errors, or memory allocation errors
440///
441/// Load ELF file into task (backward compatibility wrapper)
442///
443/// This function provides backward compatibility with the existing API.
444/// It calls the new analyze_and_load_elf function and returns only the entry point.
445///
446pub fn load_elf_into_task(file_obj: &dyn FileObject, task: &Task) -> Result<u64, ElfLoaderError> {
447    let result = analyze_and_load_elf(file_obj, task)?;
448    Ok(result.entry_point)
449}
450
451/// Analyze ELF file and load it with dynamic linking support
452///
453/// This function determines whether the ELF file requires dynamic linking by checking
454/// for PT_INTERP segment, then loads either the interpreter (dynamic linker) or the
455/// main program directly (static linking).
456///
457/// # Arguments
458///
459/// * `file_obj`: A reference to the file object containing the ELF data
460/// * `task`: A mutable reference to the task into which the ELF file will be loaded
461///
462/// # Returns
463///
464/// * `Result<LoadElfResult, ElfLoaderError>`: Information about the loaded ELF including
465///   execution mode, entry point, and auxiliary vector data
466///
467pub fn analyze_and_load_elf(
468    file_obj: &dyn FileObject,
469    task: &Task,
470) -> Result<LoadElfResult, ElfLoaderError> {
471    analyze_and_load_elf_with_strategy(file_obj, task, &LoadStrategy::default())
472}
473
474/// Analyze ELF file and load it with custom loading strategy
475///
476/// This function determines whether the ELF file requires dynamic linking by checking
477/// for PT_INTERP segment, then loads either the interpreter (dynamic linker) or the
478/// main program directly (static linking) using the provided strategy.
479///
480/// # Arguments
481///
482/// * `file_obj`: A reference to the file object containing the ELF data
483/// * `task`: A mutable reference to the task into which the ELF file will be loaded
484/// * `strategy`: Loading strategy provided by ABI module
485///
486/// # Returns
487///
488/// * `Result<LoadElfResult, ElfLoaderError>`: Information about the loaded ELF including
489///   execution mode, entry point, and auxiliary vector data
490///
491pub fn analyze_and_load_elf_with_strategy(
492    file_obj: &dyn FileObject,
493    task: &Task,
494    strategy: &LoadStrategy,
495) -> Result<LoadElfResult, ElfLoaderError> {
496    // Move to the beginning of the file
497    file_obj
498        .seek(SeekFrom::Start(0))
499        .map_err(|e| ElfLoaderError {
500            message: format!("Failed to seek to start of file: {:?}", e),
501        })?;
502
503    // Read the ELF header
504    let mut header_buffer = vec![0u8; 64]; // 64-bit ELF header size
505    file_obj
506        .read(&mut header_buffer)
507        .map_err(|e| ElfLoaderError {
508            message: format!("Failed to read ELF header: {:?}", e),
509        })?;
510
511    let header = match ElfHeader::parse(&header_buffer) {
512        Ok(header) => header,
513        Err(e) => {
514            return Err(ElfLoaderError {
515                message: format!("Failed to parse ELF header: {:?}", e),
516            });
517        }
518    };
519
520    // Step 1: Check for PT_INTERP segment
521    let interpreter_path = find_interpreter_path(&header, file_obj)?;
522
523    // Convert ELF type to format-agnostic information
524    let needs_relocation = header.e_type == ET_DYN;
525
526    match interpreter_path {
527        Some(interp_path) => {
528            // Dynamic linking required
529            crate::println!(
530                "ELF requires dynamic linking with interpreter: {}",
531                interp_path
532            );
533
534            // Let strategy resolve the actual interpreter to use
535            let actual_interpreter = (strategy.resolve_interpreter)(Some(&interp_path));
536
537            if let Some(final_interp_path) = actual_interpreter {
538                crate::println!("Using interpreter: {}", final_interp_path);
539                let base_address =
540                    load_elf_segments_for_interpreter(&header, file_obj, task, strategy)?;
541                let (interpreter_entry, interpreter_base) =
542                    load_interpreter(&final_interp_path, task, strategy)?;
543
544                // Prepare program headers info for auxiliary vector
545                let phdr_info = ProgramHeadersInfo {
546                    phdr_addr: base_address + header.e_phoff,
547                    phdr_size: header.e_phentsize as u64,
548                    phdr_count: header.e_phnum as u64,
549                };
550
551                // Calculate original entry point correctly based on ELF type
552                // For ET_EXEC: e_entry is an absolute address
553                // For ET_DYN: e_entry is relative to base_address
554                let original_entry = if needs_relocation {
555                    base_address + header.e_entry
556                } else {
557                    header.e_entry
558                };
559
560                Ok(LoadElfResult {
561                    mode: ExecutionMode::Dynamic {
562                        interpreter_path: final_interp_path,
563                    },
564                    entry_point: interpreter_entry,
565                    original_entry_point: Some(original_entry),
566                    base_address: Some(base_address),
567                    interpreter_base: Some(interpreter_base),
568                    program_headers: phdr_info,
569                })
570            } else {
571                // Strategy rejected dynamic linking (e.g., xv6 ABI)
572                return Err(ElfLoaderError {
573                    message: "Dynamic linking not supported by current ABI".to_string(),
574                });
575            }
576        }
577        None => {
578            // Static linking - use existing implementation
579            let base_address =
580                (strategy.choose_base_address)(LoadTarget::MainProgram, needs_relocation);
581            let entry_point = load_elf_into_task_static(&header, file_obj, task, strategy)?;
582
583            // For static executables, load program headers into memory if needed
584            let phdr_info = if needs_relocation {
585                // PIE static executable - program headers are loaded with the executable
586                ProgramHeadersInfo {
587                    phdr_addr: base_address + header.e_phoff,
588                    phdr_size: header.e_phentsize as u64,
589                    phdr_count: header.e_phnum as u64,
590                }
591            } else {
592                // Traditional static executable - load program headers into memory
593                let phdr_mem_addr = load_program_headers_into_memory(&header, file_obj, task)?;
594                ProgramHeadersInfo {
595                    phdr_addr: phdr_mem_addr,
596                    phdr_size: header.e_phentsize as u64,
597                    phdr_count: header.e_phnum as u64,
598                }
599            };
600
601            Ok(LoadElfResult {
602                mode: ExecutionMode::Static,
603                entry_point,
604                original_entry_point: None, // Same as entry_point for static executables
605                base_address: if needs_relocation {
606                    Some(base_address)
607                } else {
608                    None
609                },
610                interpreter_base: None, // No interpreter for static linking
611                program_headers: phdr_info,
612            })
613        }
614    }
615}
616
617/// Find PT_INTERP segment and extract interpreter path
618fn find_interpreter_path(
619    header: &ElfHeader,
620    file_obj: &dyn FileObject,
621) -> Result<Option<String>, ElfLoaderError> {
622    let mut result = None;
623
624    for_each_program_header(header, file_obj, |_i, ph| {
625        if ph.p_type == PT_INTERP {
626            // Read interpreter path
627            file_obj
628                .seek(SeekFrom::Start(ph.p_offset))
629                .map_err(|e| ElfLoaderError {
630                    message: format!("Failed to seek to interpreter path: {:?}", e),
631                })?;
632
633            let mut interp_buffer = vec![0u8; ph.p_filesz as usize];
634            file_obj
635                .read(&mut interp_buffer)
636                .map_err(|e| ElfLoaderError {
637                    message: format!("Failed to read interpreter path: {:?}", e),
638                })?;
639
640            // Remove null terminator and convert to string
641            if let Some(null_pos) = interp_buffer.iter().position(|&x| x == 0) {
642                interp_buffer.truncate(null_pos);
643            }
644
645            let path = core::str::from_utf8(&interp_buffer)
646                .map_err(|_| ElfLoaderError {
647                    message: "Invalid UTF-8 in interpreter path".to_string(),
648                })?
649                .to_string();
650
651            result = Some(path);
652            return Ok(false); // Break early
653        }
654        Ok(true) // Continue iteration
655    })?;
656
657    Ok(result)
658}
659
660/// Load ELF segments for dynamic execution (without executing)
661fn load_elf_segments_for_interpreter(
662    header: &ElfHeader,
663    file_obj: &dyn FileObject,
664    task: &Task,
665    strategy: &LoadStrategy,
666) -> Result<u64, ElfLoaderError> {
667    // Use strategy to determine base address
668    let needs_relocation = header.e_type == ET_DYN;
669    // crate::println!("[ELF Loader] Main program: e_type={:#x}, needs_relocation={}, e_phoff={:#x}",
670    //     header.e_type, needs_relocation, header.e_phoff);
671    let base_address = (strategy.choose_base_address)(LoadTarget::MainProgram, needs_relocation);
672    // crate::println!("[ELF Loader] Chosen base_address={:#x}", base_address);
673
674    // Track the actual load address of the first LOAD segment for program headers
675    let mut first_load_addr: Option<u64> = None;
676    let mut _load_segment_count = 0;
677
678    // Load PT_LOAD segments using simplified approach
679    for_each_program_header(header, file_obj, |_i, ph| {
680        if ph.p_type == PT_LOAD {
681            let segment_addr = base_address + ph.p_vaddr;
682            // crate::println!("[ELF Loader] PT_LOAD[{}]: p_vaddr={:#x}, p_memsz={:#x}, p_filesz={:#x}, p_flags={:#x} -> load_addr={:#x}",
683            //     i, ph.p_vaddr, ph.p_memsz, ph.p_filesz, ph.p_flags, segment_addr);
684            if first_load_addr.is_none() {
685                first_load_addr = Some(segment_addr);
686                // crate::println!("[ELF Loader] First LOAD segment at {:#x}", segment_addr);
687            }
688            load_elf_segment_at_address(ph, file_obj, task, segment_addr)?;
689            _load_segment_count += 1;
690        }
691        Ok(true) // Continue iteration
692    })?;
693
694    // crate::println!("[ELF Loader] Loaded {} PT_LOAD segments, e_entry={:#x}", _load_segment_count, header.e_entry);
695
696    // Calculate phdr_addr based on actual load address
697    // Program headers are typically in the first LOAD segment
698    let actual_base = first_load_addr.unwrap_or(base_address);
699
700    // Program headers are already loaded as part of the first LOAD segment
701    // (which typically includes the ELF header and program headers)
702    // No need to create a separate mapping - just return the address
703    // crate::println!("[ELF Loader] Program headers at {:#x} (actual_base={:#x} + e_phoff={:#x})",
704    //     actual_base + header.e_phoff, actual_base, header.e_phoff);
705
706    // Return the actual base address where the first segment was loaded
707    Ok(actual_base)
708}
709
710/// Load interpreter (dynamic linker) into task memory  
711/// Maximum recursion depth for interpreter loading to prevent infinite loops
712const MAX_INTERPRETER_DEPTH: usize = 5;
713
714fn load_interpreter(
715    interpreter_path: &str,
716    task: &Task,
717    strategy: &LoadStrategy,
718) -> Result<(u64, u64), ElfLoaderError> {
719    load_interpreter_recursive(interpreter_path, task, strategy, 0)
720}
721
722/// Recursive interpreter loading with depth limiting
723fn load_interpreter_recursive(
724    interpreter_path: &str,
725    task: &Task,
726    strategy: &LoadStrategy,
727    depth: usize,
728) -> Result<(u64, u64), ElfLoaderError> {
729    // Check recursion depth to prevent infinite loops
730    if depth >= MAX_INTERPRETER_DEPTH {
731        return Err(ElfLoaderError {
732            message: format!(
733                "Maximum interpreter recursion depth ({}) exceeded",
734                MAX_INTERPRETER_DEPTH
735            ),
736        });
737    }
738
739    crate::println!(
740        "Loading interpreter (depth {}): {}",
741        depth,
742        interpreter_path
743    );
744
745    // Step 1: Open interpreter file from VFS
746    let vfs = task.get_vfs().ok_or_else(|| ElfLoaderError {
747        message: "Task VFS not available for interpreter loading".to_string(),
748    })?;
749
750    let file_obj = vfs
751        .open(interpreter_path, 0)
752        .map_err(|fs_err| ElfLoaderError {
753            message: format!(
754                "Failed to open interpreter '{}': {:?}",
755                interpreter_path, fs_err
756            ),
757        })?;
758
759    // Extract FileObject from KernelObject and keep it alive
760    let file_arc = match file_obj {
761        crate::object::KernelObject::File(file_ref) => file_ref,
762        _ => {
763            return Err(ElfLoaderError {
764                message: "Invalid kernel object type for interpreter file".to_string(),
765            });
766        }
767    };
768
769    let file_object: &dyn crate::fs::FileObject = file_arc.as_ref();
770
771    // Step 2: Read ELF header data from file
772    file_object
773        .seek(crate::fs::SeekFrom::Start(0))
774        .map_err(|e| ElfLoaderError {
775            message: format!("Failed to seek to start of interpreter file: {:?}", e),
776        })?;
777
778    // ELF header is always 64 bytes for 64-bit ELF files
779    let mut header_buffer = vec![0u8; 64];
780    let bytes_read = file_object
781        .read(&mut header_buffer)
782        .map_err(|e| ElfLoaderError {
783            message: format!("Failed to read interpreter ELF header: {:?}", e),
784        })?;
785
786    crate::println!(
787        "Read {} bytes for interpreter ELF header (expected 64)",
788        bytes_read
789    );
790
791    // Check if we actually read enough bytes
792    if bytes_read < 64 {
793        return Err(ElfLoaderError {
794            message: format!(
795                "Interpreter ELF header too small: read {} bytes, expected 64",
796                bytes_read
797            ),
798        });
799    }
800
801    let interp_header = ElfHeader::parse(&header_buffer).map_err(|e| ElfLoaderError {
802        message: format!("Failed to parse interpreter ELF header: {}", e.message),
803    })?;
804
805    // Step 3: Check if this interpreter itself has an interpreter (recursive case)
806    let nested_interpreter_path = find_interpreter_path(&interp_header, file_object)?;
807    let (final_entry_point, final_base) = if let Some(nested_path) = nested_interpreter_path {
808        let resolved_nested_path =
809            (strategy.resolve_interpreter)(Some(&nested_path)).unwrap_or(nested_path);
810        crate::println!(
811            "Interpreter {} requests nested interpreter: {}",
812            interpreter_path,
813            resolved_nested_path
814        );
815
816        // Recursively load the nested interpreter first
817        load_interpreter_recursive(&resolved_nested_path, task, strategy, depth + 1)?
818    } else {
819        // No nested interpreter, load this interpreter normally
820        let interp_needs_relocation = interp_header.e_type == ET_DYN;
821
822        // Determine total span of PT_LOAD segments to avoid overlap
823        let mut min_vaddr: u64 = u64::MAX;
824        let mut max_end: u64 = 0;
825        for_each_program_header(&interp_header, file_object, |_i, ph| {
826            if ph.p_type == PT_LOAD {
827                if ph.p_vaddr < min_vaddr {
828                    min_vaddr = ph.p_vaddr;
829                }
830                let end = ph.p_vaddr.saturating_add(ph.p_memsz);
831                if end > max_end {
832                    max_end = end;
833                }
834            }
835            Ok(true)
836        })?;
837
838        if min_vaddr == u64::MAX {
839            return Err(ElfLoaderError {
840                message: "Interpreter has no PT_LOAD segments".to_string(),
841            });
842        }
843
844        let span = max_end.saturating_sub(min_vaddr) as usize;
845        let align = crate::environment::PAGE_SIZE;
846        let span_aligned = (span + align - 1) & !(align - 1);
847
848        // Prefer the strategy's hint, but pick an actually free area in the task's VM
849        let _preferred =
850            (strategy.choose_base_address)(LoadTarget::Interpreter, interp_needs_relocation);
851        let start = task
852            .vm_manager
853            .find_unmapped_area(span_aligned, align)
854            .ok_or_else(|| ElfLoaderError {
855                message: "No unmapped area available for interpreter".to_string(),
856            })? as u64;
857
858        // Compute additive base so that the lowest PT_LOAD maps to `start`
859        let interpreter_base_add = start.saturating_sub(min_vaddr);
860        crate::println!(
861            "Interpreter base address: {:#x} (mapped span: {:#x} bytes)",
862            interpreter_base_add,
863            span_aligned
864        );
865
866        // Load interpreter segments with this base
867        load_elf_segments_with_base(&interp_header, file_object, task, interpreter_base_add)?;
868
869        // Calculate actual entry point and return base used for relocations/AT_BASE
870        let entry = if interp_needs_relocation {
871            interpreter_base_add + interp_header.e_entry as u64
872        } else {
873            interp_header.e_entry
874        };
875        (entry, interpreter_base_add)
876    };
877
878    crate::println!(
879        "Interpreter entry point (depth {}): {:#x}",
880        depth,
881        final_entry_point
882    );
883    Ok((final_entry_point, final_base))
884}
885
886/// Load ELF segments for interpreter with specified base address
887fn load_elf_segments_with_base(
888    header: &ElfHeader,
889    file_obj: &dyn FileObject,
890    task: &Task,
891    base_address: u64,
892) -> Result<(), ElfLoaderError> {
893    // Load PT_LOAD segments with provided base address
894    for_each_program_header(header, file_obj, |_i, ph| {
895        if ph.p_type == PT_LOAD {
896            let segment_addr = base_address + ph.p_vaddr;
897            load_elf_segment_at_address(ph, file_obj, task, segment_addr)?;
898        }
899        Ok(true) // Continue iteration
900    })?;
901
902    Ok(())
903}
904
905/// Load ELF using the static linking logic with strategy support
906fn load_elf_into_task_static(
907    header: &ElfHeader,
908    file_obj: &dyn FileObject,
909    task: &Task,
910    strategy: &LoadStrategy,
911) -> Result<u64, ElfLoaderError> {
912    // Use strategy to determine base address for main program
913    let needs_relocation = header.e_type == ET_DYN;
914    // let needs_relocation = false;
915
916    let base_address = (strategy.choose_base_address)(LoadTarget::MainProgram, needs_relocation);
917    // Read program headers and load LOAD segments (existing logic)
918    for_each_program_header(header, file_obj, |_i, ph| {
919        // For LOAD segments, load them into memory
920        if ph.p_type == PT_LOAD {
921            // Calculate proper alignment-aware mapping with base address
922            let segment_addr = base_address + ph.p_vaddr;
923            let align = ph.p_align as usize;
924
925            // Calculate the final mapping parameters according to ELF specification
926            let (mapping_addr, aligned_size, effective_align) = if align == 0 || align == 1 {
927                // No specific alignment requirement, use page alignment
928                let page_offset = (segment_addr as usize) % PAGE_SIZE;
929                let mapping_start = (segment_addr as usize) - page_offset;
930                let mapping_size = (ph.p_memsz as usize) + page_offset;
931                let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
932                (mapping_start, aligned_size, PAGE_SIZE)
933            } else {
934                // Use p_align, but ensure it's at least PAGE_SIZE for memory mapping
935                let effective_align = core::cmp::max(align, PAGE_SIZE);
936
937                // Calculate aligned base address following ELF specification:
938                // The aligned base should satisfy: (vaddr % p_align) == (offset % p_align)
939                let vaddr_offset = (segment_addr as usize) % align;
940                let file_offset = (ph.p_offset as usize) % align;
941
942                // Ensure the alignment relationship is preserved
943                if vaddr_offset != file_offset {
944                    // Adjust the base address to maintain the required relationship
945                    let adjustment = if vaddr_offset >= file_offset {
946                        vaddr_offset - file_offset
947                    } else {
948                        align - (file_offset - vaddr_offset)
949                    };
950                    let adjusted_segment_addr = (segment_addr as usize) - adjustment;
951                    let aligned_addr = (adjusted_segment_addr) & !(effective_align - 1);
952                    let offset = (segment_addr as usize) - aligned_addr;
953                    let mapping_size = (ph.p_memsz as usize) + offset;
954                    let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
955                    (aligned_addr, aligned_size, effective_align)
956                } else {
957                    // Normal case: alignment relationship is already correct
958                    let aligned_addr = (segment_addr as usize) & !(effective_align - 1);
959                    let offset = (segment_addr as usize) - aligned_addr;
960                    let mapping_size = (ph.p_memsz as usize) + offset;
961                    let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
962                    (aligned_addr, aligned_size, effective_align)
963                }
964            };
965
966            // Map the segment with calculated parameters
967            map_elf_segment(
968                task,
969                mapping_addr,
970                aligned_size,
971                effective_align,
972                ph.p_flags,
973            )
974            .map_err(|e| ElfLoaderError {
975                message: format!("Failed to map ELF segment at {:#x}: {:?}", mapping_addr, e),
976            })?;
977
978            // Inference segment type
979            let segment_type = if ph.p_flags & PF_X != 0 {
980                VirtualMemoryRegion::Text
981            } else if ph.p_flags & PF_W != 0 || ph.p_flags & PF_R != 0 {
982                VirtualMemoryRegion::Data
983            } else {
984                VirtualMemoryRegion::Unknown
985            };
986
987            match segment_type {
988                VirtualMemoryRegion::Text => {
989                    task.text_size
990                        .fetch_add(aligned_size as usize, Ordering::SeqCst);
991                }
992                VirtualMemoryRegion::Data => {
993                    task.data_size
994                        .fetch_add(aligned_size as usize, Ordering::SeqCst);
995                }
996                _ => {
997                    return Err(ElfLoaderError {
998                        message: format!("Unknown segment type: {:#x}", ph.p_flags),
999                    });
1000                }
1001            }
1002
1003            // Update brk to track the end of the loaded program
1004            // brk should be set to the maximum end address of all loaded segments
1005            let segment_end = mapping_addr + aligned_size;
1006            let current_brk_raw = task.brk.load(core::sync::atomic::Ordering::SeqCst);
1007            let current_brk = if current_brk_raw == usize::MAX {
1008                0
1009            } else {
1010                current_brk_raw
1011            };
1012            if segment_end > current_brk {
1013                task.brk
1014                    .store(segment_end, core::sync::atomic::Ordering::SeqCst);
1015                // crate::println!("[ELF loader] Updated brk to {:#x} (segment end at {:#x})", segment_end, mapping_addr);
1016            }
1017
1018            // Load segment data using common function (if there's file data)
1019            if ph.p_filesz > 0 {
1020                let mut segment_data = vec![0u8; ph.p_filesz as usize];
1021
1022                // Seek to segment data position
1023                file_obj
1024                    .seek(SeekFrom::Start(ph.p_offset))
1025                    .map_err(|e| ElfLoaderError {
1026                        message: format!("Failed to seek to segment data: {:?}", e),
1027                    })?;
1028
1029                // Read segment data
1030                file_obj
1031                    .read(&mut segment_data)
1032                    .map_err(|e| ElfLoaderError {
1033                        message: format!("Failed to read segment data: {:?}", e),
1034                    })?;
1035
1036                // Copy data to task's memory space at the correct virtual address
1037                // Calculate the offset from the mapped region to the actual segment address
1038                let data_offset = (segment_addr as usize) - mapping_addr;
1039                let target_vaddr = mapping_addr + data_offset;
1040
1041                // // Debug: Check if this segment contains the entry point
1042                // let entry_point = 0x5d912; // Known entry point for debugging
1043                // if segment_addr <= entry_point && entry_point < segment_addr + ph.p_memsz {
1044                //     crate::println!("DEBUG: Loading segment containing entry point {:#x}", entry_point);
1045                //     crate::println!("  segment_addr={:#x}, size={:#x}, file_offset={:#x}",
1046                //         segment_addr, ph.p_filesz, ph.p_offset);
1047                //     crate::println!("  target_vaddr={:#x}, first 16 bytes of data:", target_vaddr);
1048                //     let preview_len = core::cmp::min(16, segment_data.len());
1049                //     let mut hex_str = alloc::string::String::new();
1050                //     for i in 0..preview_len {
1051                //         hex_str.push_str(&format!("{:02x} ", segment_data[i]));
1052                //     }
1053                //     crate::println!("  data: {}", hex_str);
1054                // }
1055
1056                match task.vm_manager.translate_vaddr(target_vaddr) {
1057                    Some(paddr) => unsafe {
1058                        core::ptr::copy_nonoverlapping(
1059                            segment_data.as_ptr(),
1060                            paddr as *mut u8,
1061                            ph.p_filesz as usize,
1062                        );
1063                    },
1064                    None => {
1065                        return Err(ElfLoaderError {
1066                            message: format!(
1067                                "Failed to translate virtual address {:#x}",
1068                                target_vaddr
1069                            ),
1070                        });
1071                    }
1072                }
1073            }
1074        }
1075        Ok(true) // Continue iteration
1076    })?;
1077
1078    // Return entry point adjusted for base address
1079    let final_entry_point = if needs_relocation {
1080        base_address + header.e_entry
1081    } else {
1082        header.e_entry
1083    };
1084    Ok(final_entry_point)
1085}
1086
1087/// Load program headers into task memory for static executables
1088///
1089/// This function allocates memory space for program headers and copies them
1090/// from the ELF file, returning the virtual address where they are loaded.
1091/// This is needed for static executables where program headers are not
1092/// automatically loaded as part of any segment.
1093///
1094/// # Arguments
1095///
1096/// * `header`: The parsed ELF header containing program header information
1097/// * `file_obj`: The file object to read program header data from
1098/// * `task`: The task to load program headers into
1099///
1100/// # Returns
1101///
1102/// * `Result<u64, ElfLoaderError>`: Virtual address where program headers are loaded
1103///
1104fn load_program_headers_into_memory(
1105    header: &ElfHeader,
1106    file_obj: &dyn FileObject,
1107    task: &Task,
1108) -> Result<u64, ElfLoaderError> {
1109    // Calculate total size of program headers
1110    let phdr_table_size = (header.e_phentsize as u64) * (header.e_phnum as u64);
1111
1112    if phdr_table_size == 0 {
1113        return Err(ElfLoaderError {
1114            message: "No program headers to load".to_string(),
1115        });
1116    }
1117
1118    // Find a suitable virtual address for program headers
1119    // Place them after the highest loaded segment to avoid conflicts
1120    // For simplicity, use a fixed address in the upper memory region
1121    let phdr_vaddr = 0x70000000u64; // 1.75GB - safe region for program headers
1122
1123    // Calculate page-aligned size
1124    let page_aligned_size = ((phdr_table_size as usize) + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
1125
1126    // Map memory for program headers (read-only for security)
1127    map_elf_segment(
1128        task,
1129        phdr_vaddr as usize,
1130        page_aligned_size,
1131        PAGE_SIZE,
1132        PF_R,
1133    )
1134    .map_err(|e| ElfLoaderError {
1135        message: format!("Failed to map memory for program headers: {}", e),
1136    })?;
1137
1138    // Read program headers from file
1139    file_obj
1140        .seek(SeekFrom::Start(header.e_phoff))
1141        .map_err(|e| ElfLoaderError {
1142            message: format!("Failed to seek to program headers: {:?}", e),
1143        })?;
1144
1145    let mut phdr_data = vec![0u8; phdr_table_size as usize];
1146    file_obj.read(&mut phdr_data).map_err(|e| ElfLoaderError {
1147        message: format!("Failed to read program headers: {:?}", e),
1148    })?;
1149
1150    // Copy program headers to task memory
1151    match task.vm_manager.translate_vaddr(phdr_vaddr as usize) {
1152        Some(paddr) => unsafe {
1153            core::ptr::copy_nonoverlapping(
1154                phdr_data.as_ptr(),
1155                paddr as *mut u8,
1156                phdr_table_size as usize,
1157            );
1158        },
1159        None => {
1160            return Err(ElfLoaderError {
1161                message: format!(
1162                    "Failed to translate program headers virtual address {:#x}",
1163                    phdr_vaddr
1164                ),
1165            });
1166        }
1167    }
1168    Ok(phdr_vaddr)
1169}
1170
1171fn map_elf_segment(
1172    task: &Task,
1173    vaddr: usize,
1174    size: usize,
1175    align: usize,
1176    flags: u32,
1177) -> Result<(), &'static str> {
1178    // Ensure alignment is greater than zero
1179    if align == 0 {
1180        return Err("Alignment must be greater than zero");
1181    }
1182
1183    // Ensure alignment is a power of 2 and at least PAGE_SIZE
1184    if !align.is_power_of_two() || align < PAGE_SIZE {
1185        return Err("Invalid alignment: must be power of 2 and at least PAGE_SIZE");
1186    }
1187
1188    // Check if the size is valid (must be page-aligned for memory mapping)
1189    if size == 0 || size % PAGE_SIZE != 0 {
1190        return Err("Invalid size: must be non-zero and page-aligned");
1191    }
1192
1193    // Check if the address is page-aligned (required for memory mapping)
1194    if vaddr % PAGE_SIZE != 0 {
1195        return Err("Address is not aligned to PAGE_SIZE");
1196    }
1197
1198    // Convert flags to VirtualMemoryPermission
1199    let mut permissions = 0;
1200    if flags & PF_R != 0 {
1201        permissions |= VirtualMemoryPermission::Read as usize;
1202    }
1203    if flags & PF_W != 0 {
1204        permissions |= VirtualMemoryPermission::Write as usize;
1205    }
1206    if flags & PF_X != 0 {
1207        permissions |= VirtualMemoryPermission::Execute as usize;
1208    }
1209    if task.task_type == TaskType::User {
1210        permissions |= VirtualMemoryPermission::User as usize;
1211    }
1212
1213    // Create memory area
1214    let vmarea = MemoryArea {
1215        start: vaddr,
1216        end: vaddr + size - 1,
1217    };
1218
1219    // Check if the area is overlapping with existing mappings
1220    if let Some(_existing) = task.vm_manager.search_memory_map(vaddr) {
1221        // crate::println!("[ELF Loader] ERROR: Memory area {:#x}-{:#x} overlaps with existing mapping {:#x}-{:#x}",
1222        //     vaddr, vaddr + size - 1, existing.vmarea.start, existing.vmarea.end);
1223        return Err("Memory area overlaps with existing mapping");
1224    }
1225
1226    // Allocate physical memory
1227    let num_of_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
1228    let pages = allocate_raw_pages(num_of_pages);
1229    let ptr = pages as *mut u8;
1230    if ptr.is_null() {
1231        return Err("Failed to allocate memory");
1232    }
1233    let pmarea = MemoryArea {
1234        start: ptr as usize,
1235        end: (ptr as usize) + size - 1,
1236    };
1237
1238    // Create memory mapping
1239    let map = VirtualMemoryMap {
1240        vmarea,
1241        pmarea,
1242        permissions,
1243        is_shared: false, // User program memory should not be shared
1244        owner: None,
1245    };
1246
1247    // Add to VM manager
1248    if let Err(e) = task.vm_manager.add_memory_map(map) {
1249        free_raw_pages(pages, num_of_pages);
1250        return Err(e);
1251    }
1252
1253    // Manage segment page in the task
1254    for i in 0..num_of_pages {
1255        task.add_managed_page(ManagedPage {
1256            vaddr: vaddr + i * PAGE_SIZE,
1257            page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
1258        });
1259    }
1260
1261    Ok(())
1262}
1263
1264/// Build auxiliary vector for dynamic linking
1265pub fn build_auxiliary_vector(load_result: &LoadElfResult) -> alloc::vec::Vec<AuxVec> {
1266    use crate::environment::PAGE_SIZE;
1267
1268    let mut auxv = alloc::vec::Vec::new();
1269
1270    // Program headers information
1271    auxv.push(AuxVec::new(AT_PHDR, load_result.program_headers.phdr_addr));
1272    auxv.push(AuxVec::new(AT_PHENT, load_result.program_headers.phdr_size));
1273    auxv.push(AuxVec::new(
1274        AT_PHNUM,
1275        load_result.program_headers.phdr_count,
1276    ));
1277
1278    // System information
1279    auxv.push(AuxVec::new(AT_PAGESZ, PAGE_SIZE as u64));
1280
1281    // Entry point of main program (not the interpreter)
1282    // For dynamic executables, AT_ENTRY should be the original program's entry point
1283    match &load_result.mode {
1284        ExecutionMode::Dynamic { .. } => {
1285            // For dynamic executables, use the original program's entry point
1286            if let Some(orig_entry) = load_result.original_entry_point {
1287                auxv.push(AuxVec::new(AT_ENTRY, orig_entry));
1288            }
1289        }
1290        ExecutionMode::Static => {
1291            // For static executables, entry point is load_result.entry_point
1292            auxv.push(AuxVec::new(AT_ENTRY, load_result.entry_point));
1293        }
1294    }
1295
1296    // Base address of interpreter (if dynamically linked)
1297    if let Some(interp_base) = load_result.interpreter_base {
1298        auxv.push(AuxVec::new(AT_BASE, interp_base));
1299    }
1300
1301    // Add UID/GID entries to prevent musl secure mode
1302    // Set all IDs to 0 (root) to make real and effective IDs equal
1303    // This prevents libc.secure from being set to true
1304    auxv.push(AuxVec::new(AT_UID, 0)); // Real user ID
1305    auxv.push(AuxVec::new(AT_EUID, 0)); // Effective user ID
1306    auxv.push(AuxVec::new(AT_GID, 0)); // Real group ID
1307    auxv.push(AuxVec::new(AT_EGID, 0)); // Effective group ID
1308
1309    // TODO: Add more auxiliary vector entries as needed:
1310    // - AT_RANDOM: Random bytes for stack canaries
1311    // - AT_PLATFORM: Platform string
1312    // - AT_HWCAP: Hardware capabilities
1313
1314    // Terminate auxiliary vector
1315    auxv.push(AuxVec::new(AT_NULL, 0));
1316
1317    auxv
1318}
1319
1320/// Setup auxiliary vector on the task's stack
1321///
1322/// This function places the auxiliary vector at the top of the stack,
1323/// which is expected by the dynamic linker and C runtime.
1324pub fn setup_auxiliary_vector_on_stack(
1325    task: &Task,
1326    auxv: &[AuxVec],
1327) -> Result<usize, ElfLoaderError> {
1328    // Calculate size needed for auxiliary vector
1329    // Each AuxVec entry is 16 bytes (two u64 values)
1330    let auxv_size = auxv.len() * core::mem::size_of::<AuxVec>();
1331
1332    // Find the top of the stack
1333    let stack_top = crate::environment::USER_STACK_END;
1334    let auxv_start = stack_top - auxv_size;
1335
1336    // Write auxiliary vector to stack
1337    for (i, entry) in auxv.iter().enumerate() {
1338        let offset = i * core::mem::size_of::<AuxVec>();
1339        let vaddr = auxv_start + offset;
1340
1341        // Translate to physical address and write
1342        match task.vm_manager.translate_vaddr(vaddr) {
1343            Some(paddr) => unsafe {
1344                let ptr = paddr as *mut AuxVec;
1345                ptr.write(*entry);
1346            },
1347            None => {
1348                return Err(ElfLoaderError {
1349                    message: format!("Failed to translate auxiliary vector address {:#x}", vaddr),
1350                });
1351            }
1352        }
1353    }
1354
1355    crate::println!(
1356        "Setup auxiliary vector at {:#x} (size: {} entries)",
1357        auxv_start,
1358        auxv.len()
1359    );
1360    Ok(auxv_start)
1361}
1362
1363#[cfg(test)]
1364mod tests;
1365
1366/// Load a single ELF segment into task memory at the specified address
1367fn load_elf_segment_at_address(
1368    ph: &ProgramHeader,
1369    file_obj: &dyn FileObject,
1370    task: &Task,
1371    segment_addr: u64,
1372) -> Result<(), ElfLoaderError> {
1373    let align = if ph.p_align == 0 || ph.p_align == 1 {
1374        PAGE_SIZE
1375    } else {
1376        core::cmp::max(ph.p_align as usize, PAGE_SIZE)
1377    };
1378
1379    // Calculate page-aligned mapping parameters
1380    let page_offset = (segment_addr as usize) % PAGE_SIZE;
1381    let mapping_start = (segment_addr as usize) - page_offset;
1382    let mapping_size = (ph.p_memsz as usize) + page_offset;
1383    let aligned_size = (mapping_size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
1384
1385    // crate::println!("[ELF Loader] Loading segment: vaddr={:#x}, memsz={:#x}, filesz={:#x}, flags={:#x}",
1386    //     segment_addr, ph.p_memsz, ph.p_filesz, ph.p_flags);
1387    // crate::println!("[ELF Loader]   mapping: start={:#x}, size={:#x}, aligned_size={:#x}",
1388    //     mapping_start, mapping_size, aligned_size);
1389
1390    // Map segment with proper page alignment
1391    map_elf_segment(task, mapping_start, aligned_size, align, ph.p_flags).map_err(|e| {
1392        ElfLoaderError {
1393            message: format!("Failed to map ELF segment at {:#x}: {:?}", mapping_start, e),
1394        }
1395    })?;
1396
1397    // Copy file data to memory if there's any
1398    if ph.p_filesz > 0 {
1399        let mut segment_data = vec![0u8; ph.p_filesz as usize];
1400        file_obj
1401            .seek(SeekFrom::Start(ph.p_offset))
1402            .map_err(|e| ElfLoaderError {
1403                message: format!("Failed to seek to segment data: {:?}", e),
1404            })?;
1405        file_obj
1406            .read(&mut segment_data)
1407            .map_err(|e| ElfLoaderError {
1408                message: format!("Failed to read segment data: {:?}", e),
1409            })?;
1410
1411        // Write data to task memory at the correct offset within the mapped region
1412        let data_offset = (segment_addr as usize) - mapping_start;
1413        let target_vaddr = mapping_start + data_offset;
1414
1415        match task.vm_manager.translate_vaddr(target_vaddr) {
1416            Some(paddr) => unsafe {
1417                core::ptr::copy_nonoverlapping(
1418                    segment_data.as_ptr(),
1419                    paddr as *mut u8,
1420                    ph.p_filesz as usize,
1421                );
1422            },
1423            None => {
1424                return Err(ElfLoaderError {
1425                    message: format!(
1426                        "Failed to translate virtual address {:#x} for segment loading",
1427                        target_vaddr
1428                    ),
1429                });
1430            }
1431        }
1432    }
1433
1434    // Update task size information for proper memory management
1435    let segment_type = if ph.p_flags & PF_X != 0 {
1436        task.text_size.fetch_add(aligned_size, Ordering::SeqCst);
1437        "text"
1438    } else if ph.p_flags & PF_W != 0 || ph.p_flags & PF_R != 0 {
1439        task.data_size.fetch_add(aligned_size, Ordering::SeqCst);
1440        "data"
1441    } else {
1442        "unknown"
1443    };
1444
1445    crate::println!(
1446        "Loaded {} segment at {:#x} (size: {:#x})",
1447        segment_type,
1448        segment_addr,
1449        aligned_size
1450    );
1451    Ok(())
1452}