kernel/task/
mod.rs

1//! Task module.
2//!
3//! The task module defines the structure and behavior of tasks in the system.
4
5pub mod elf_loader;
6pub mod namespace;
7pub mod syscall;
8
9extern crate alloc;
10
11use alloc::{
12    boxed::Box,
13    string::{String, ToString},
14    sync::Arc,
15    vec::Vec,
16};
17use core::{cell::UnsafeCell, sync::atomic};
18use spin::{Mutex, RwLock};
19
20use crate::abi::{AbiModule, scarlet::ScarletAbi};
21use crate::sync::waker::Waker;
22use crate::{
23    arch::{
24        KernelContext, Trapframe, get_cpu, trap::user::arch_switch_to_user_space, vcpu::Vcpu,
25        vm::alloc_virtual_address_space,
26    },
27    environment::{
28        DEAFAULT_MAX_TASK_DATA_SIZE, DEAFAULT_MAX_TASK_STACK_SIZE, DEAFAULT_MAX_TASK_TEXT_SIZE,
29        KERNEL_VM_STACK_END, PAGE_SIZE, USER_STACK_END,
30    },
31    fs::VfsManager,
32    ipc::{EventContent, event::ProcessControlType},
33    mem::page::{Page, allocate_raw_pages, free_boxed_page},
34    object::handle::HandleTable,
35    sched::scheduler::{Scheduler, get_scheduler},
36    timer::{TimerHandler, add_timer, get_tick},
37    vm::{
38        manager::VirtualMemoryManager,
39        user_kernel_vm_init, user_vm_init,
40        vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryRegion},
41    },
42};
43use alloc::collections::BTreeMap;
44use core::ops::Range;
45use core::sync::atomic::{AtomicI32, AtomicU8, AtomicU32, AtomicUsize, Ordering};
46use spin::Once;
47
48/// Global registry of task-specific wakers for waitpid
49static WAITPID_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
50
51/// Note: TASK_ID has been moved to TaskPool::next_id for better ID management
52/// including recycling of freed task IDs. Use TaskPool::allocate_id() instead.
53///
54/// Global registry of parent task wakers for waitpid(-1) operations
55/// Each parent task has a waker that gets triggered when any of its children exit
56static PARENT_WAITPID_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
57
58/// Initialize the waitpid wakers registry
59fn init_waitpid_wakers() -> Mutex<BTreeMap<usize, Waker>> {
60    Mutex::new(BTreeMap::new())
61}
62
63/// Initialize the parent waitpid waker registry
64fn init_parent_waitpid_wakers() -> Mutex<BTreeMap<usize, Waker>> {
65    Mutex::new(BTreeMap::new())
66}
67
68/// Get or create a waker for waitpid/wait operations for a specific task
69///
70/// This function returns a reference to the waker associated with the given task ID,
71/// used exclusively for waitpid/wait (child termination wait) synchronization.
72/// If no waker exists for the task, a new one is created.
73///
74/// # Arguments
75///
76/// * `task_id` - The ID of the task to get a waitpid/wait waker for
77///
78/// # Returns
79///
80/// A reference to the waker for the specified task
81pub fn get_waitpid_waker(task_id: usize) -> &'static Waker {
82    let wakers_mutex = WAITPID_WAKERS.call_once(init_waitpid_wakers);
83    let mut wakers = wakers_mutex.lock();
84    if !wakers.contains_key(&task_id) {
85        let waker_name = alloc::format!("task_{}", task_id);
86        // We need to create a static string for the waker name
87        let static_name = Box::leak(waker_name.into_boxed_str());
88        wakers.insert(task_id, Waker::new_interruptible(static_name));
89    }
90    // This is safe because we know the waker exists and won't be removed
91    // until the task is cleaned up
92    unsafe {
93        let waker_ptr = wakers.get(&task_id).unwrap() as *const Waker;
94        &*waker_ptr
95    }
96}
97
98// pub fn get_select_waker(...) was removed; use object-level Selectable::wait_until_ready
99
100/// Get or create a parent waker for waitpid(-1) operations
101///
102/// This waker is used when a parent process calls waitpid(-1) to wait for any child to exit.
103/// It is separate from the task-specific waitpid wakers to avoid conflicts, and is used
104/// exclusively for waitpid(-1) (any child termination wait) synchronization.
105///
106/// # Arguments
107///
108/// * `parent_id` - The ID of the parent task
109///
110/// # Returns
111///
112/// A reference to the parent waker
113pub fn get_parent_waitpid_waker(parent_id: usize) -> &'static Waker {
114    let wakers_mutex = PARENT_WAITPID_WAKERS.call_once(init_parent_waitpid_wakers);
115    let mut wakers = wakers_mutex.lock();
116
117    // Create a new waker if it doesn't exist
118    if !wakers.contains_key(&parent_id) {
119        let waker_name = alloc::format!("parent_waker_{}", parent_id);
120        // We need to leak the string to make it 'static
121        let static_name = alloc::boxed::Box::leak(waker_name.into_boxed_str());
122        wakers.insert(parent_id, Waker::new_interruptible(static_name));
123    }
124
125    // Return a reference to the waker
126    // This is safe because the BTreeMap is never dropped and the Waker is never moved
127    unsafe {
128        let waker_ptr = wakers.get(&parent_id).unwrap() as *const Waker;
129        &*waker_ptr
130    }
131}
132
133/// Wake up any processes waiting for a specific task
134///
135/// This function should be called when a task exits to wake up
136/// any parent processes that are waiting for this specific task.
137///
138/// # Arguments
139///
140/// * `task_id` - The ID of the task that has exited
141pub fn wake_task_waiters(task_id: usize) {
142    let wakers_mutex = WAITPID_WAKERS.call_once(init_waitpid_wakers);
143    let wakers = wakers_mutex.lock();
144    if let Some(waker) = wakers.get(&task_id) {
145        waker.wake_all();
146    }
147}
148
149/// Wake up a parent process waiting for any child (waitpid(-1))
150///
151/// This function should be called when any child of a parent exits.
152///
153/// # Arguments
154///
155/// * `parent_id` - The ID of the parent task
156pub fn wake_parent_waiters(parent_id: usize) {
157    let wakers_mutex = PARENT_WAITPID_WAKERS.call_once(init_parent_waitpid_wakers);
158    let wakers = wakers_mutex.lock();
159    if let Some(waker) = wakers.get(&parent_id) {
160        waker.wake_all();
161    }
162}
163
164/// Clean up the waker for a specific task
165///
166/// This function should be called when a task is completely cleaned up
167/// to remove its waker from the global registry.
168///
169/// # Arguments
170///
171/// * `task_id` - The ID of the task to clean up
172pub fn cleanup_task_waker(task_id: usize) {
173    let wakers_mutex = WAITPID_WAKERS.call_once(init_waitpid_wakers);
174    let mut wakers = wakers_mutex.lock();
175    wakers.remove(&task_id);
176}
177
178/// Clean up the parent waker for a specific task
179///
180/// This function should be called when a parent task is completely cleaned up.
181///
182/// # Arguments
183///
184/// * `parent_id` - The ID of the parent task to clean up
185pub fn cleanup_parent_waker(parent_id: usize) {
186    let wakers_mutex = PARENT_WAITPID_WAKERS.call_once(init_parent_waitpid_wakers);
187    let mut wakers = wakers_mutex.lock();
188    wakers.remove(&parent_id);
189}
190
191// pub fn cleanup_select_waker(...) was removed along with task-level select waker
192
193/// Types of blocked states for tasks
194#[derive(Debug, PartialEq, Clone, Copy)]
195pub enum BlockedType {
196    /// Interruptible blocking - can be interrupted by signals
197    Interruptible,
198    /// Uninterruptible blocking - cannot be interrupted, must wait for completion
199    Uninterruptible,
200}
201
202#[derive(Debug, PartialEq, Clone, Copy)]
203pub enum TaskState {
204    NotInitialized,
205    Ready,
206    Running,
207    Blocked(BlockedType),
208    Zombie,
209    Terminated,
210}
211
212impl TaskState {
213    /// Convert TaskState to u8 for atomic storage
214    pub const fn to_u8(self) -> u8 {
215        match self {
216            TaskState::NotInitialized => 0,
217            TaskState::Ready => 1,
218            TaskState::Running => 2,
219            TaskState::Blocked(bt) => match bt {
220                BlockedType::Interruptible => 3,
221                BlockedType::Uninterruptible => 4,
222            },
223            TaskState::Zombie => 5,
224            TaskState::Terminated => 6,
225        }
226    }
227
228    /// Convert u8 to TaskState
229    pub const fn from_u8(val: u8) -> Option<Self> {
230        match val {
231            0 => Some(TaskState::NotInitialized),
232            1 => Some(TaskState::Ready),
233            2 => Some(TaskState::Running),
234            3 => Some(TaskState::Blocked(BlockedType::Interruptible)),
235            4 => Some(TaskState::Blocked(BlockedType::Uninterruptible)),
236            5 => Some(TaskState::Zombie),
237            6 => Some(TaskState::Terminated),
238            _ => None,
239        }
240    }
241}
242
243/// Atomic task state for thread-safe state management
244pub struct AtomicTaskState {
245    inner: AtomicU8,
246}
247
248impl AtomicTaskState {
249    pub const fn new(state: TaskState) -> Self {
250        Self {
251            inner: AtomicU8::new(state.to_u8()),
252        }
253    }
254
255    pub fn load(&self, ordering: Ordering) -> TaskState {
256        TaskState::from_u8(self.inner.load(ordering)).unwrap_or(TaskState::NotInitialized)
257    }
258
259    pub fn store(&self, state: TaskState, ordering: Ordering) {
260        self.inner.store(state.to_u8(), ordering);
261    }
262
263    pub fn compare_exchange(
264        &self,
265        current: TaskState,
266        new: TaskState,
267        success: Ordering,
268        failure: Ordering,
269    ) -> Result<TaskState, TaskState> {
270        match self
271            .inner
272            .compare_exchange(current.to_u8(), new.to_u8(), success, failure)
273        {
274            Ok(_) => Ok(new),
275            Err(actual) => Err(TaskState::from_u8(actual).unwrap_or(TaskState::NotInitialized)),
276        }
277    }
278}
279
280#[derive(Debug, PartialEq, Clone, Copy)]
281pub enum TaskType {
282    Kernel,
283    User,
284}
285
286/// ABI Zone structure holding a memory range with an owned ABI module.
287pub struct AbiZone {
288    pub range: Range<usize>,
289    pub abi: Box<dyn AbiModule + Send + Sync>,
290}
291
292/// A cell type for task-local data that is only accessed by the hart currently
293/// executing the task.
294///
295/// # Safety
296///
297/// This type uses `UnsafeCell` internally and is `Sync` so it can live inside
298/// `Task` (which is `Send + Sync`).  The safety invariant is:
299///
300/// * **Only the hart that is currently running this task may access the
301///   contents.**  Because a task is scheduled on exactly one hart at a time,
302///   there is no concurrent access and no lock is needed.
303/// * During `clone_task`, the **parent** accesses its own `TaskLocal` fields
304///   (safe – it is the running hart) and writes to the **child's** `TaskLocal`
305///   fields (safe – the child has not been added to the scheduler yet, so no
306///   other hart can touch it).
307pub struct TaskLocal<T> {
308    inner: UnsafeCell<T>,
309}
310
311// SAFETY: Access is restricted to the hart executing the owning task.
312// See the doc comment on `TaskLocal` for the full safety argument.
313unsafe impl<T> Sync for TaskLocal<T> {}
314
315impl<T> TaskLocal<T> {
316    /// Create a new `TaskLocal` with the given value.
317    pub fn new(value: T) -> Self {
318        Self {
319            inner: UnsafeCell::new(value),
320        }
321    }
322
323    /// Get an immutable reference to the contained value.
324    ///
325    /// # Safety
326    ///
327    /// The caller must be the hart currently executing the owning task,
328    /// or the task must not yet be visible to the scheduler.
329    #[inline]
330    pub unsafe fn get(&self) -> &T {
331        // SAFETY: Upheld by caller (single-hart-per-task invariant).
332        &*self.inner.get()
333    }
334
335    /// Get a mutable reference to the contained value.
336    ///
337    /// # Safety
338    ///
339    /// The caller must be the hart currently executing the owning task,
340    /// or the task must not yet be visible to the scheduler.
341    #[inline]
342    #[allow(clippy::mut_from_ref)]
343    pub unsafe fn get_mut(&self) -> &mut T {
344        // SAFETY: Upheld by caller (single-hart-per-task invariant).
345        &mut *self.inner.get()
346    }
347}
348
349pub struct Task {
350    // === Read-only fields (set at creation) ===
351    id: usize,
352    /// Task ID within the task's namespace (may differ from global ID)
353    namespace_id: atomic::AtomicUsize,
354    /// Task namespace for ID management
355    namespace: RwLock<Arc<namespace::TaskNamespace>>,
356    pub task_type: TaskType,
357    pub entry: usize,
358    parent_id: Option<usize>,
359    /// Thread Group ID (TGID)
360    tgid: usize,
361    pub max_stack_size: usize,
362    pub max_data_size: usize,
363    pub max_text_size: usize,
364
365    // === Atomic fields (lock-free) ===
366    /// Task state with atomic transitions
367    pub state: AtomicTaskState,
368    /// Task priority
369    pub priority: AtomicU32,
370    /// Time slice for scheduling
371    pub time_slice: AtomicU32,
372    /// Stack size in bytes
373    pub stack_size: AtomicUsize,
374    /// Data segment size in bytes
375    pub data_size: AtomicUsize,
376    /// Text segment size in bytes
377    pub text_size: AtomicUsize,
378    /// Exit status (i32::MIN represents None)
379    pub exit_status: AtomicI32,
380    /// Program break (already thread-safe)
381    pub brk: Arc<AtomicUsize>,
382
383    // === RwLock fields (frequent reads) ===
384    /// Task name
385    pub name: RwLock<String>,
386    /// List of child task IDs
387    pub children: RwLock<Vec<usize>>,
388    /// Managed pages (auto-freed on termination)
389    pub managed_pages: RwLock<Vec<ManagedPage>>,
390    /// Virtual File System Manager
391    ///
392    /// # Usage Patterns
393    ///
394    /// - `None`: Task uses global filesystem namespace (traditional Unix-like behavior)
395    /// - `Some(Arc<VfsManager>)`: Task has isolated filesystem namespace (container-like behavior)
396    ///
397    /// # Thread Safety
398    ///
399    /// VfsManager is thread-safe and can be shared between tasks using Arc.
400    /// All internal operations use RwLock for concurrent access protection.
401    pub vfs: RwLock<Option<Arc<VfsManager>>>,
402    /// Software timer handlers
403    pub software_timers_handlers: RwLock<Vec<Arc<dyn TimerHandler>>>,
404
405    // === Mutex fields (complex operations) ===
406    /// VCPU state for context switching
407    pub vcpu: Mutex<Vcpu>,
408    /// Kernel context for context switching
409    pub kernel_context: Mutex<KernelContext>,
410    /// Virtual memory manager (already thread-safe internally)
411    pub vm_manager: VirtualMemoryManager,
412    /// Default ABI module (task-local: only accessed by the executing hart)
413    pub default_abi: TaskLocal<Option<Box<dyn AbiModule + Send + Sync>>>,
414    /// ABI zones map (task-local: only accessed by the executing hart)
415    pub abi_zones: TaskLocal<BTreeMap<usize, AbiZone>>,
416    /// Handle table for kernel objects (already thread-safe internally)
417    pub handle_table: HandleTable,
418    /// Waker for sleep operations (already thread-safe internally)
419    pub sleep_waker: Waker,
420    /// Kernel stack window base (slot_index, base_vaddr)
421    pub kernel_stack_window_base: Mutex<Option<(usize, usize)>>,
422
423    // === Already protected fields ===
424    /// Task-local event queue with priority ordering
425    pub event_queue: Mutex<crate::ipc::event::TaskEventQueue>,
426    /// Event processing enabled flag
427    pub events_enabled: Mutex<bool>,
428}
429
430#[derive(Debug, Clone)]
431pub struct ManagedPage {
432    pub vaddr: usize,
433    pub page: Box<Page>,
434}
435
436pub enum CloneFlagsDef {
437    Vm = 0b00000001,      // Clone the VM
438    Fs = 0b00000010,      // Clone the filesystem
439    Files = 0b00000100,   // Clone the file descriptors
440    Thread = 0b00001000,  // Join thread group (share TGID) - Linux CLONE_THREAD semantics
441    SetTls = 0b000010000, // Set TLS pointer for cloned task
442}
443
444#[derive(Debug, Clone, Copy)]
445pub struct CloneFlags {
446    raw: u64,
447}
448
449impl CloneFlags {
450    pub fn new() -> Self {
451        CloneFlags { raw: 0 }
452    }
453
454    pub fn from_raw(raw: u64) -> Self {
455        CloneFlags { raw }
456    }
457
458    pub fn set(&mut self, flag: CloneFlagsDef) {
459        self.raw |= flag as u64;
460    }
461
462    pub fn clear(&mut self, flag: CloneFlagsDef) {
463        self.raw &= !(flag as u64);
464    }
465
466    pub fn is_set(&self, flag: CloneFlagsDef) -> bool {
467        (self.raw & (flag as u64)) != 0
468    }
469
470    pub fn get_raw(&self) -> u64 {
471        self.raw
472    }
473}
474
475impl Default for CloneFlags {
476    fn default() -> Self {
477        let raw = CloneFlagsDef::Fs as u64 | CloneFlagsDef::Files as u64;
478        CloneFlags { raw }
479    }
480}
481
482impl Task {
483    /// Create a new task with the root namespace.
484    ///
485    /// # Arguments
486    /// * `name` - Task name
487    /// * `priority` - Task priority
488    /// * `task_type` - Task type (Kernel or User)
489    ///
490    /// # Returns
491    /// A new task in the root namespace
492    pub fn new(name: String, priority: u32, task_type: TaskType) -> Self {
493        Self::new_with_namespace(
494            name,
495            priority,
496            task_type,
497            namespace::get_root_namespace().clone(),
498        )
499    }
500
501    /// Create a new task with a specific namespace.
502    ///
503    /// # Arguments
504    /// * `name` - Task name
505    /// * `priority` - Task priority
506    /// * `task_type` - Task type (Kernel or User)
507    /// * `ns` - Task namespace
508    ///
509    /// # Returns
510    /// A new task in the specified namespace
511    pub fn new_with_namespace(
512        name: String,
513        priority: u32,
514        task_type: TaskType,
515        ns: Arc<namespace::TaskNamespace>,
516    ) -> Self {
517        Task {
518            // Read-only fields
519            id: 0,
520            namespace_id: AtomicUsize::new(0),
521            namespace: RwLock::new(ns),
522            task_type,
523            entry: 0,
524            parent_id: None,
525            tgid: 0,
526            max_stack_size: DEAFAULT_MAX_TASK_STACK_SIZE,
527            max_data_size: DEAFAULT_MAX_TASK_DATA_SIZE,
528            max_text_size: DEAFAULT_MAX_TASK_TEXT_SIZE,
529            // Atomic fields
530            state: AtomicTaskState::new(TaskState::NotInitialized),
531            priority: AtomicU32::new(priority),
532            time_slice: AtomicU32::new(10),
533            stack_size: AtomicUsize::new(0),
534            data_size: AtomicUsize::new(0),
535            text_size: AtomicUsize::new(0),
536            exit_status: AtomicI32::new(i32::MIN), // i32::MIN represents None
537            brk: Arc::new(AtomicUsize::new(usize::MAX)),
538            // RwLock fields
539            name: RwLock::new(name),
540            children: RwLock::new(Vec::new()),
541            managed_pages: RwLock::new(Vec::new()),
542            vfs: RwLock::new(None),
543            software_timers_handlers: RwLock::new(Vec::new()),
544            // Mutex fields
545            vcpu: Mutex::new(Vcpu::new(match task_type {
546                TaskType::Kernel => crate::arch::vcpu::Mode::Kernel,
547                TaskType::User => crate::arch::vcpu::Mode::User,
548            })),
549            kernel_context: Mutex::new(KernelContext::new()),
550            vm_manager: VirtualMemoryManager::new(),
551            default_abi: TaskLocal::new(Some(Box::new(ScarletAbi::default()))),
552            abi_zones: TaskLocal::new(BTreeMap::new()),
553            handle_table: HandleTable::new(),
554            sleep_waker: Waker::new_interruptible("task_sleep_waker"),
555            kernel_stack_window_base: Mutex::new(None),
556            // Already protected
557            event_queue: Mutex::new(crate::ipc::event::TaskEventQueue::new()),
558            events_enabled: Mutex::new(true),
559        }
560    }
561
562    pub fn init(&self) {
563        // Initialize kernel context with the task's entry point
564        // The kernel stack is allocated within the KernelContext
565        *self.kernel_context.lock() = KernelContext::new();
566
567        match self.task_type {
568            TaskType::Kernel => {
569                user_kernel_vm_init(self);
570                /* Set sp to the top of the kernel stack */
571                self.vcpu.lock().set_sp(KERNEL_VM_STACK_END + 1);
572                /* Set pc to the task's entry point */
573                self.vcpu.lock().set_pc(self.entry as u64);
574            }
575            TaskType::User => {
576                user_vm_init(self);
577                /* Set sp to the top of the user stack */
578                self.vcpu.lock().set_sp(USER_STACK_END);
579                /* PC will be set when loading the ELF binary */
580            }
581        }
582
583        /* Set the task state to Ready */
584        self.state.store(TaskState::Ready, Ordering::SeqCst);
585        self.time_slice.store(1, Ordering::SeqCst);
586    }
587
588    pub fn get_id(&self) -> usize {
589        assert!(
590            self.id != 0,
591            "Task ID is 0 - task may not have been added to scheduler yet"
592        );
593        self.id
594    }
595
596    /// Set the task ID (used by TaskPool during task addition)
597    pub fn set_id(&mut self, id: usize) {
598        self.id = id;
599        // For new tasks, initialize TGID to equal ID (thread group leader)
600        // This will be overridden in clone_task for CLONE_VM threads
601        if self.tgid == 0 {
602            self.tgid = id;
603        }
604    }
605
606    /// Set the namespace ID (used by TaskPool during task addition)
607    pub fn set_namespace_id(&self, namespace_id: usize) {
608        self.namespace_id
609            .store(namespace_id, atomic::Ordering::SeqCst);
610    }
611
612    /// Get the task ID within its namespace.
613    ///
614    /// This ID is local to the task's namespace and may differ from the global ID.
615    /// This is the ID that should be exposed to user space and ABI syscalls.
616    ///
617    /// # Returns
618    /// The namespace-local task ID
619    pub fn get_namespace_id(&self) -> usize {
620        let namespace_id = self.namespace_id.load(atomic::Ordering::SeqCst);
621        assert!(
622            namespace_id != 0,
623            "Task namespace_id is 0 - task may not have been added to scheduler yet"
624        );
625        namespace_id
626    }
627
628    /// Get the task's namespace.
629    ///
630    /// # Returns
631    /// Reference to the task's namespace
632    pub fn get_namespace(&self) -> Arc<namespace::TaskNamespace> {
633        self.namespace.read().clone()
634    }
635
636    /// Set the task's namespace.
637    ///
638    /// This allows changing a task's namespace, useful for ABI transitions
639    /// or when moving tasks between namespace contexts.
640    ///
641    /// **Warning**: This method allocates a new namespace-local ID each time
642    /// it's called. Changing a task's namespace multiple times may lead to
643    /// ID conflicts or unexpected behavior. This method should typically only
644    /// be called once during task initialization or ABI transition.
645    ///
646    /// # Arguments
647    /// * `ns` - New namespace for the task
648    pub fn set_namespace(&self, ns: Arc<namespace::TaskNamespace>) {
649        *self.namespace.write() = ns;
650        // Allocate a new namespace-local ID (and register translation mapping)
651        self.namespace_id.store(
652            self.namespace.write().allocate_task_id_for(self.id),
653            atomic::Ordering::SeqCst,
654        );
655    }
656
657    /// Get the Thread Group ID (TGID)
658    ///
659    /// The TGID identifies the thread group (process). For tasks created with
660    /// CLONE_VM (threads), all threads in the group share the same TGID.
661    /// For standalone tasks (no CLONE_VM), TGID equals the task ID.
662    ///
663    /// # Returns
664    /// The thread group ID
665    pub fn get_tgid(&self) -> usize {
666        self.tgid
667    }
668
669    /// Set the Thread Group ID (TGID)
670    ///
671    /// This is used internally when cloning tasks with CLONE_VM to make
672    /// the child thread share the parent's thread group.
673    ///
674    /// # Arguments
675    /// * `tgid` - New thread group ID
676    pub fn set_tgid(&mut self, tgid: usize) {
677        self.tgid = tgid;
678    }
679
680    /// Set the task state
681    ///
682    /// # Arguments
683    /// * `state` - The new task state
684    ///
685    pub fn set_state(&self, state: TaskState) {
686        self.state.store(state, Ordering::SeqCst);
687    }
688
689    /// Get the task state
690    ///
691    /// # Returns
692    /// The task state
693    ///
694    pub fn get_state(&self) -> TaskState {
695        self.state.load(Ordering::SeqCst)
696    }
697
698    /// Get the size of the task.
699    ///
700    /// # Returns
701    /// The size of the task in bytes.
702    pub fn get_size(&self) -> usize {
703        self.stack_size.load(Ordering::SeqCst)
704            + self.text_size.load(Ordering::SeqCst)
705            + self.data_size.load(Ordering::SeqCst)
706    }
707
708    /// Get the program break (NOT work in Kernel task)
709    ///
710    /// # Returns
711    /// The program break address
712    pub fn get_brk(&self) -> usize {
713        // Return brk if set (represents program end address)
714        // Otherwise fallback to legacy size-based calculation for compatibility
715        let brk = self.brk.load(Ordering::SeqCst);
716        if brk == usize::MAX {
717            self.text_size.load(Ordering::SeqCst) + self.data_size.load(Ordering::SeqCst)
718        } else {
719            brk
720        }
721    }
722
723    /// Set the program break (NOT work in Kernel task)
724    ///
725    /// # Arguments
726    /// * `brk` - The new program break address
727    ///
728    /// # Returns
729    /// If successful, returns Ok(()), otherwise returns an error.
730    pub fn set_brk(&self, brk: usize) -> Result<(), &'static str> {
731        let prev_brk = self.get_brk();
732        if brk < prev_brk {
733            /* Free pages */
734            /* Round address to the page boundary */
735            let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
736            let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
737            let num_of_pages = (prev_addr - addr) / PAGE_SIZE;
738            self.free_data_pages(addr, num_of_pages);
739        } else if brk > prev_brk {
740            /* Allocate pages */
741            /* Round address to the page boundary */
742            let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
743            let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
744            let num_of_pages = (addr - prev_addr) / PAGE_SIZE;
745
746            // crate::println!("[set_brk] Expanding: prev_brk={:#x} -> brk={:#x}", prev_brk, brk);
747            // crate::println!("[set_brk] Page allocation: prev_addr={:#x}, addr={:#x}, num_pages={}",
748            //     prev_addr, addr, num_of_pages);
749
750            if num_of_pages > 0 {
751                match self.vm_manager.search_memory_map(prev_addr) {
752                    Some(_existing_map) => {
753                        // crate::println!("[set_brk] Existing mapping found: VA {:#x}-{:#x}, skipping allocation",
754                        //     existing_map.vmarea.start, existing_map.vmarea.end);
755                    }
756                    None => {
757                        // crate::println!("[set_brk] No existing mapping, allocating {} pages at {:#x}",
758                        //     num_of_pages, prev_addr);
759                        match self.allocate_data_pages(prev_addr, num_of_pages) {
760                            Ok(_) => {
761                                // crate::println!("[set_brk] Successfully allocated {} pages", num_of_pages);
762                            }
763                            Err(_e) => {
764                                // crate::println!("[set_brk] Failed to allocate pages: {}", e);
765                                return Err("Failed to allocate pages");
766                            }
767                        }
768                    }
769                }
770            }
771        }
772        self.brk.store(brk, Ordering::SeqCst);
773        Ok(())
774    }
775
776    /// Allocate pages for the task.
777    ///
778    /// # Arguments
779    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
780    /// * `num_of_pages` - The number of pages to allocate
781    /// * `segment` - The segment type to allocate pages
782    ///
783    /// # Returns
784    /// The memory map of the allocated pages, if successful.
785    ///
786    /// # Errors
787    /// If the address is not page aligned, or if the pages cannot be allocated.
788    ///
789    /// # Note
790    /// This function don't increment the size of the task.
791    /// You must increment the size of the task manually.
792    ///
793    pub fn allocate_pages(
794        &self,
795        vaddr: usize,
796        num_of_pages: usize,
797        permissions: usize,
798    ) -> Result<VirtualMemoryMap, &'static str> {
799        if vaddr % PAGE_SIZE != 0 {
800            return Err("Address is not page aligned");
801        }
802
803        let pages = allocate_raw_pages(num_of_pages);
804        let size = num_of_pages * PAGE_SIZE;
805        let paddr = pages as usize;
806        let mmap = VirtualMemoryMap {
807            pmarea: MemoryArea {
808                start: paddr,
809                end: paddr + size - 1,
810            },
811            vmarea: MemoryArea {
812                start: vaddr,
813                end: vaddr + size - 1,
814            },
815            permissions,
816            is_shared: false, // Default to not shared for task-allocated pages
817            owner: None,
818        };
819        self.vm_manager
820            .add_memory_map(mmap.clone())
821            .map_err(|e| panic!("Failed to add memory map: {}", e))?;
822
823        for i in 0..num_of_pages {
824            let page = unsafe { Box::from_raw(pages.wrapping_add(i)) };
825            let vaddr = mmap.vmarea.start + i * PAGE_SIZE;
826            self.add_managed_page(ManagedPage { vaddr, page });
827        }
828
829        Ok(mmap)
830    }
831
832    /// Free pages for the task.
833    ///
834    /// # Arguments
835    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
836    /// * `num_of_pages` - The number of pages to free
837    pub fn free_pages(&self, vaddr: usize, num_of_pages: usize) {
838        let page = vaddr / PAGE_SIZE;
839        for p in 0..num_of_pages {
840            let vaddr = (page + p) * PAGE_SIZE;
841            match self.vm_manager.remove_memory_map_by_addr(vaddr) {
842                Some(mmap) => {
843                    if p == 0 && mmap.vmarea.start < vaddr {
844                        /* Re add the first part of the memory map */
845                        let size = vaddr - mmap.vmarea.start;
846                        let paddr = mmap.pmarea.start;
847                        let mmap1 = VirtualMemoryMap {
848                            pmarea: MemoryArea {
849                                start: paddr,
850                                end: paddr + size - 1,
851                            },
852                            vmarea: MemoryArea {
853                                start: mmap.vmarea.start,
854                                end: vaddr - 1,
855                            },
856                            permissions: mmap.permissions,
857                            is_shared: mmap.is_shared,
858                            owner: mmap.owner.clone(),
859                        };
860                        self.vm_manager
861                            .add_memory_map(mmap1)
862                            .map_err(|e| panic!("Failed to add memory map: {}", e))
863                            .unwrap();
864                        // println!("Removed map : {:#x} - {:#x}", mmap.vmarea.start, mmap.vmarea.end);
865                        // println!("Re added map: {:#x} - {:#x}", mmap1.vmarea.start, mmap1.vmarea.end);
866                    }
867                    if p == num_of_pages - 1 && mmap.vmarea.end > vaddr + PAGE_SIZE - 1 {
868                        /* Re add the second part of the memory map */
869                        let size = mmap.vmarea.end - (vaddr + PAGE_SIZE) + 1;
870                        let paddr = mmap.pmarea.start + (vaddr + PAGE_SIZE - mmap.vmarea.start);
871                        let mmap2 = VirtualMemoryMap {
872                            pmarea: MemoryArea {
873                                start: paddr,
874                                end: paddr + size - 1,
875                            },
876                            vmarea: MemoryArea {
877                                start: vaddr + PAGE_SIZE,
878                                end: mmap.vmarea.end,
879                            },
880                            permissions: mmap.permissions,
881                            is_shared: mmap.is_shared,
882                            owner: mmap.owner.clone(),
883                        };
884                        self.vm_manager
885                            .add_memory_map(mmap2)
886                            .map_err(|e| panic!("Failed to add memory map: {}", e))
887                            .unwrap();
888                        // println!("Removed map : {:#x} - {:#x}", mmap.vmarea.start, mmap.vmarea.end);
889                        // println!("Re added map: {:#x} - {:#x}", mmap2.vmarea.start, mmap2.vmarea.end);
890                    }
891                    // let offset = vaddr - mmap.vmarea.start;
892                    // free_raw_pages((mmap.pmarea.start + offset) as *mut Page, 1);
893
894                    if let Some(free_page) = self.remove_managed_page(vaddr) {
895                        free_boxed_page(free_page.page);
896                    }
897
898                    // println!("Freed pages : {:#x} - {:#x}", vaddr, vaddr + PAGE_SIZE - 1);
899                }
900                None => {}
901            }
902        }
903        /* Unmap pages */
904        let asid = self.vm_manager.get_asid();
905        let root_pagetable = self.vm_manager.get_root_page_table().unwrap();
906        for p in 0..num_of_pages {
907            let vaddr = (page + p) * PAGE_SIZE;
908            root_pagetable.unmap(asid, vaddr);
909        }
910    }
911
912    /// Allocate text pages for the task. And increment the size of the task.
913    ///
914    /// # Arguments
915    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
916    /// * `num_of_pages` - The number of pages to allocate
917    ///
918    /// # Returns
919    /// The memory map of the allocated pages, if successful.
920    ///
921    /// # Errors
922    /// If the address is not page aligned, or if the pages cannot be allocated.
923    ///
924    pub fn allocate_text_pages(
925        &self,
926        vaddr: usize,
927        num_of_pages: usize,
928    ) -> Result<VirtualMemoryMap, &'static str> {
929        let permissions = VirtualMemoryRegion::Text.default_permissions();
930        let res = self.allocate_pages(vaddr, num_of_pages, permissions);
931        if res.is_ok() {
932            self.text_size
933                .fetch_add(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
934        }
935        res
936    }
937
938    /// Free text pages for the task. And decrement the size of the task.
939    ///
940    /// # Arguments
941    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
942    /// * `num_of_pages` - The number of pages to free
943    ///
944    pub fn free_text_pages(&self, vaddr: usize, num_of_pages: usize) {
945        self.free_pages(vaddr, num_of_pages);
946        self.text_size
947            .fetch_sub(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
948    }
949
950    /// Allocate stack pages for the task. And increment the size of the task.
951    ///
952    /// # Arguments
953    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
954    /// * `num_of_pages` - The number of pages to allocate
955    ///
956    /// # Returns
957    /// The memory map of the allocated pages, if successful.
958    ///
959    /// # Errors
960    /// If the address is not page aligned, or if the pages cannot be allocated.
961    ///
962    pub fn allocate_stack_pages(
963        &self,
964        vaddr: usize,
965        num_of_pages: usize,
966    ) -> Result<VirtualMemoryMap, &'static str> {
967        let permissions = VirtualMemoryRegion::Stack.default_permissions();
968        let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
969        self.stack_size
970            .fetch_add(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
971        Ok(res)
972    }
973
974    /// Free stack pages for the task. And decrement the size of the task.
975    ///
976    /// # Arguments
977    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
978    /// * `num_of_pages` - The number of pages to free
979    ///
980    pub fn free_stack_pages(&self, vaddr: usize, num_of_pages: usize) {
981        self.free_pages(vaddr, num_of_pages);
982        self.stack_size
983            .fetch_sub(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
984    }
985
986    /// Allocate data pages for the task. And increment the size of the task.
987    ///
988    /// # Arguments
989    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
990    /// * `num_of_pages` - The number of pages to allocate
991    ///
992    /// # Returns
993    /// The memory map of the allocated pages, if successful.
994    ///
995    /// # Errors
996    /// If the address is not page aligned, or if the pages cannot be allocated.
997    ///
998    pub fn allocate_data_pages(
999        &self,
1000        vaddr: usize,
1001        num_of_pages: usize,
1002    ) -> Result<VirtualMemoryMap, &'static str> {
1003        let permissions = VirtualMemoryRegion::Data.default_permissions();
1004        let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
1005        self.data_size
1006            .fetch_add(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
1007        Ok(res)
1008    }
1009
1010    /// Free data pages for the task. And decrement the size of the task.
1011    ///
1012    /// # Arguments
1013    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
1014    /// * `num_of_pages` - The number of pages to free
1015    ///
1016    pub fn free_data_pages(&self, vaddr: usize, num_of_pages: usize) {
1017        self.free_pages(vaddr, num_of_pages);
1018        self.data_size
1019            .fetch_sub(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
1020    }
1021
1022    /// Allocate guard pages for the task.
1023    ///
1024    /// # Arguments
1025    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
1026    /// * `num_of_pages` - The number of pages to allocate
1027    ///
1028    /// # Returns
1029    /// The memory map of the allocated pages, if successful.
1030    ///
1031    /// # Errors
1032    /// If the address is not page aligned, or if the pages cannot be allocated.
1033    ///
1034    /// # Note
1035    /// Gurad pages are not allocated in the physical memory space.
1036    /// This function only maps the pages to the virtual memory space.
1037    ///
1038    pub fn allocate_guard_pages(
1039        &self,
1040        vaddr: usize,
1041        num_of_pages: usize,
1042    ) -> Result<VirtualMemoryMap, &'static str> {
1043        let permissions = VirtualMemoryRegion::Guard.default_permissions();
1044        let mmap = VirtualMemoryMap {
1045            pmarea: MemoryArea { start: 0, end: 0 },
1046            vmarea: MemoryArea {
1047                start: vaddr,
1048                end: vaddr + num_of_pages * PAGE_SIZE - 1,
1049            },
1050            permissions,
1051            is_shared: VirtualMemoryRegion::Guard.is_shareable(), // Guard pages can be shared
1052            owner: None,
1053        };
1054        Ok(mmap)
1055    }
1056
1057    /// Add pages to the task
1058    ///
1059    /// # Arguments
1060    /// * `pages` - The managed page to add
1061    ///
1062    /// # Note
1063    /// Pages added as ManagedPage of the Task will be automatically freed when the Task is terminated.
1064    /// So, you must not free them by calling free_raw_pages/free_boxed_pages manually.
1065    ///
1066    pub fn add_managed_page(&self, pages: ManagedPage) {
1067        self.managed_pages.write().push(pages);
1068    }
1069
1070    /// Get managed page
1071    ///
1072    /// # Arguments
1073    /// * `vaddr` - The virtual address of the page
1074    ///
1075    /// # Returns
1076    /// The managed page if found, otherwise None
1077    ///
1078    fn get_managed_page(&self, vaddr: usize) -> Option<ManagedPage> {
1079        let pages = self.managed_pages.read();
1080        for page in pages.iter() {
1081            if page.vaddr == vaddr {
1082                return Some(ManagedPage {
1083                    vaddr: page.vaddr,
1084                    page: page.page.clone(),
1085                });
1086            }
1087        }
1088        None
1089    }
1090
1091    /// Remove managed page
1092    ///
1093    /// # Arguments
1094    /// * `vaddr` - The virtual address of the page
1095    ///
1096    /// # Returns
1097    /// The removed managed page if found, otherwise None
1098    ///
1099    pub fn remove_managed_page(&self, vaddr: usize) -> Option<crate::task::ManagedPage> {
1100        let mut pages = self.managed_pages.write();
1101        for i in 0..pages.len() {
1102            if pages[i].vaddr == vaddr {
1103                let page = pages.remove(i);
1104                return Some(page);
1105            }
1106        }
1107        None
1108    }
1109
1110    // Set the entry point
1111    pub fn set_entry_point(&self, entry: usize) {
1112        self.vcpu.lock().set_pc(entry as u64);
1113    }
1114
1115    /// Get the parent ID
1116    ///
1117    /// # Returns
1118    /// The parent task ID, or None if there is no parent
1119    pub fn get_parent_id(&self) -> Option<usize> {
1120        self.parent_id
1121    }
1122
1123    /// Set the parent task
1124    ///
1125    /// # Arguments
1126    /// * `parent_id` - The ID of the parent task
1127    pub fn set_parent_id(&mut self, parent_id: usize) {
1128        self.parent_id = Some(parent_id);
1129    }
1130
1131    /// Add a child task
1132    ///
1133    /// # Arguments
1134    /// * `child_id` - The ID of the child task
1135    pub fn add_child(&self, child_id: usize) {
1136        let mut children = self.children.write();
1137        if !children.contains(&child_id) {
1138            children.push(child_id);
1139        }
1140    }
1141
1142    /// Remove a child task
1143    ///
1144    /// # Arguments
1145    /// * `child_id` - The ID of the child task to remove
1146    ///
1147    /// # Returns
1148    /// true if the removal was successful, false if the child task was not found
1149    pub fn remove_child(&self, child_id: usize) -> bool {
1150        let mut children = self.children.write();
1151        if let Some(pos) = children.iter().position(|&id| id == child_id) {
1152            children.remove(pos);
1153            true
1154        } else {
1155            false
1156        }
1157    }
1158
1159    /// Get the list of child tasks
1160    ///
1161    /// # Returns
1162    /// A vector of child task IDs
1163    pub fn get_children(&self) -> Vec<usize> {
1164        self.children.read().clone()
1165    }
1166
1167    /// Set the exit status
1168    ///
1169    /// # Arguments
1170    /// * `status` - The exit status
1171    pub fn set_exit_status(&self, status: i32) {
1172        self.exit_status.store(status, Ordering::SeqCst);
1173    }
1174
1175    /// Get the exit status
1176    ///
1177    /// # Returns
1178    /// The exit status, or None if not set
1179    pub fn get_exit_status(&self) -> Option<i32> {
1180        let status = self.exit_status.load(Ordering::SeqCst);
1181        if status == i32::MIN {
1182            None
1183        } else {
1184            Some(status)
1185        }
1186    }
1187
1188    /// Resolve the ABI to use for the given address
1189    ///
1190    /// This method calls a closure with the ABI module that should be used
1191    /// for a system call issued from the given address. It searches the ABI zones map
1192    /// and returns the appropriate ABI, falling back to the default ABI if no zone matches.
1193    ///
1194    /// # Arguments
1195    /// * `addr` - The program counter address where the system call was issued
1196    /// * `f` - Closure to call with the ABI module
1197    ///
1198    /// # Returns
1199    /// The result of the closure
1200    pub fn with_resolve_abi_mut<R, F>(&self, addr: usize, f: F) -> R
1201    where
1202        F: FnOnce(&mut (dyn AbiModule + Send + Sync)) -> R,
1203    {
1204        // Search for the zone containing addr using efficient BTreeMap range query
1205        // SAFETY: This is the currently executing task on this hart
1206        let abi_zones = unsafe { self.abi_zones.get_mut() };
1207        if let Some((_start, zone)) = abi_zones.range_mut(..=addr).next_back() {
1208            if zone.range.contains(&addr) {
1209                return f(zone.abi.as_mut());
1210            }
1211        }
1212        // No zone found, use default ABI
1213        // SAFETY: This is the currently executing task on this hart
1214        let abi = unsafe { self.default_abi.get_mut() };
1215        f(abi.as_deref_mut().expect("default_abi not set"))
1216    }
1217
1218    /// Execute a closure with the default ABI
1219    ///
1220    /// # Arguments
1221    /// * `f` - Closure to call with the default ABI
1222    ///
1223    /// # Returns
1224    /// The result of the closure
1225    pub fn with_default_abi<F, R>(&self, f: F) -> R
1226    where
1227        F: FnOnce(&(dyn AbiModule + Send + Sync)) -> R,
1228    {
1229        // SAFETY: This is the currently executing task on this hart
1230        let abi = unsafe { self.default_abi.get() };
1231        f(abi.as_deref().expect("default_abi not set"))
1232    }
1233
1234    /// Run a closure with mutable access to the default ABI and a reference to the task
1235    ///
1236    /// Since `default_abi` is task-local (no lock), we can safely provide both
1237    /// `&mut AbiModule` and `&Task` without any take/restore dance.
1238    pub fn with_default_abi_mut<R, F>(&self, f: F) -> R
1239    where
1240        F: FnOnce(&mut (dyn AbiModule + Send + Sync), &Task) -> R,
1241    {
1242        // SAFETY: This is the currently executing task on this hart
1243        let abi = unsafe { self.default_abi.get_mut() };
1244        let abi_ref = abi.as_deref_mut().expect("default_abi not set");
1245        f(abi_ref, self)
1246    }
1247
1248    /// Get the file descriptor table
1249    ///
1250    /// # Returns
1251    /// A reference to the file descriptor table
1252    ///
1253    /// Clone this task, creating a near-identical copy
1254    ///
1255    /// # Arguments
1256    ///
1257    /// # Returns
1258    /// The cloned task
1259    ///
1260    /// # Errors
1261    /// If the task cannot be cloned, an error is returned.
1262    ///
1263    pub fn clone_task(&self, flags: CloneFlags) -> Result<Task, &'static str> {
1264        // Create a new task in the same namespace as the parent
1265        let mut child = Task::new_with_namespace(
1266            self.name.read().clone(),
1267            self.priority.load(Ordering::SeqCst),
1268            self.task_type,
1269            self.namespace.read().clone(),
1270        );
1271
1272        // First, set up the virtual memory manager with the same ASID allocation
1273        match self.task_type {
1274            TaskType::Kernel => {
1275                // For kernel tasks, we need to call init to set up the kernel VM
1276                child.init();
1277            }
1278            TaskType::User => {
1279                if !flags.is_set(CloneFlagsDef::Vm) {
1280                    // For user tasks, manually set up VM without calling init()
1281                    // to avoid creating new stack that would overwrite parent's stack content
1282                    let asid = alloc_virtual_address_space();
1283                    child.vm_manager.set_asid(asid);
1284                } else {
1285                    // CLONE_VM: share the same address space via Arc<VirtualMemoryManager>
1286                    child.vm_manager = self.vm_manager.clone();
1287                }
1288            }
1289        }
1290
1291        if !flags.is_set(CloneFlagsDef::Vm) {
1292            // Copy or share memory maps from parent to child without cloning lists
1293            self.vm_manager.memmaps_iter_with(|iter| {
1294                for mmap in iter {
1295                    let num_pages =
1296                        (mmap.vmarea.end - mmap.vmarea.start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
1297                    if num_pages == 0 {
1298                        continue;
1299                    }
1300
1301                    let vaddr = mmap.vmarea.start;
1302                    if mmap.is_shared {
1303                        // Shared memory regions: just reference the same physical pages
1304                        let shared_mmap = VirtualMemoryMap {
1305                            pmarea: mmap.pmarea,
1306                            vmarea: mmap.vmarea,
1307                            permissions: mmap.permissions,
1308                            is_shared: true,
1309                            owner: mmap.owner.clone(),
1310                        };
1311                        child
1312                            .vm_manager
1313                            .add_memory_map(shared_mmap.clone())
1314                            .map_err(|_| "Failed to add shared memory map to child task")?;
1315
1316                        // Pre-map trampoline page if applicable
1317                        if mmap.vmarea.start == 0xffff_ffff_ffff_f000 {
1318                            if let Some(root_pagetable) = child.vm_manager.get_root_page_table() {
1319                                root_pagetable
1320                                    .map_memory_area(
1321                                        child.vm_manager.get_asid(),
1322                                        shared_mmap,
1323                                        true,
1324                                        true,
1325                                    )
1326                                    .map_err(|_| "Failed to map trampoline page")?;
1327                            }
1328                        }
1329                    } else {
1330                        // Private memory regions: allocate new pages and copy contents
1331                        let permissions = mmap.permissions;
1332                        let pages = allocate_raw_pages(num_pages);
1333                        let size = num_pages * PAGE_SIZE;
1334                        let paddr = pages as usize;
1335                        let new_mmap = VirtualMemoryMap {
1336                            pmarea: MemoryArea {
1337                                start: paddr,
1338                                end: paddr + (size - 1),
1339                            },
1340                            vmarea: MemoryArea {
1341                                start: vaddr,
1342                                end: vaddr + (size - 1),
1343                            },
1344                            permissions,
1345                            is_shared: false,
1346                            owner: mmap.owner.clone(),
1347                        };
1348
1349                        // Copy original contents page-by-page
1350                        for i in 0..num_pages {
1351                            let src_page_addr = mmap.pmarea.start + i * PAGE_SIZE;
1352                            let dst_page_addr = new_mmap.pmarea.start + i * PAGE_SIZE;
1353                            unsafe {
1354                                core::ptr::copy_nonoverlapping(
1355                                    src_page_addr as *const u8,
1356                                    dst_page_addr as *mut u8,
1357                                    PAGE_SIZE,
1358                                );
1359                            }
1360                            child.add_managed_page(ManagedPage {
1361                                vaddr: new_mmap.vmarea.start + i * PAGE_SIZE,
1362                                page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
1363                            });
1364                        }
1365
1366                        child
1367                            .vm_manager
1368                            .add_memory_map(new_mmap)
1369                            .map_err(|_| "Failed to add memory map to child task")?;
1370                    }
1371                }
1372                Ok::<(), &'static str>(())
1373            })?;
1374        }
1375
1376        // Copy register states (architecture-specific VCPU state)
1377        self.vcpu.lock().clone_to(&mut child.vcpu.lock());
1378
1379        // Clone the default ABI and ABI zones
1380        // SAFETY: Child task is not yet visible to scheduler, parent is currently executing
1381        unsafe {
1382            *child.default_abi.get_mut() = Some(
1383                self.default_abi
1384                    .get()
1385                    .as_ref()
1386                    .expect("default_abi not set")
1387                    .clone_boxed(),
1388            );
1389            // Clone ABI zones (each zone contains a boxed ABI that needs to be cloned)
1390            for (start, zone) in self.abi_zones.get().iter() {
1391                let new_zone = AbiZone {
1392                    range: zone.range.clone(),
1393                    abi: zone.abi.clone_boxed(),
1394                };
1395                child.abi_zones.get_mut().insert(*start, new_zone);
1396            }
1397            // Notify child's default ABI instance that cloning has completed
1398            // Child is not yet in the scheduler, so direct access is safe.
1399            if let Some(abi_boxed) = child.default_abi.get_mut().as_mut() {
1400                let _ = abi_boxed.on_task_cloned(self, &child, flags);
1401            }
1402        }
1403
1404        // Copy state such as data size
1405        child
1406            .stack_size
1407            .store(self.stack_size.load(Ordering::SeqCst), Ordering::SeqCst);
1408        child
1409            .data_size
1410            .store(self.data_size.load(Ordering::SeqCst), Ordering::SeqCst);
1411        child
1412            .text_size
1413            .store(self.text_size.load(Ordering::SeqCst), Ordering::SeqCst);
1414        child.max_stack_size = self.max_stack_size;
1415        child.max_data_size = self.max_data_size;
1416        child.max_text_size = self.max_text_size;
1417        // Program break must be shared when CLONE_VM is set, because the heap lives in the shared
1418        // address space. If not shared, the child gets an independent copy of the current brk.
1419        if flags.is_set(CloneFlagsDef::Vm) {
1420            child.brk = self.brk.clone();
1421        } else {
1422            let parent_brk = self.brk.load(Ordering::SeqCst);
1423            child.brk = Arc::new(AtomicUsize::new(parent_brk));
1424        }
1425
1426        // Copy scheduling and event handling state
1427        child
1428            .time_slice
1429            .store(self.time_slice.load(Ordering::SeqCst), Ordering::SeqCst);
1430        // Note: software_timers_handlers, sleep_waker, event_queue are NOT copied
1431        // as they are task-specific runtime state that should start fresh
1432
1433        // Set the same entry point
1434        child.entry = self.entry;
1435
1436        if flags.is_set(CloneFlagsDef::Files) {
1437            // Share the handle table (CLONE_FILES behavior)
1438            // clone() creates a shallow copy that shares the same underlying data
1439            child.handle_table = self.handle_table.clone();
1440        } else {
1441            // Create an independent copy of the handle table (fork-like behavior)
1442            child.handle_table = self.handle_table.deep_clone();
1443        }
1444
1445        if flags.is_set(CloneFlagsDef::Fs) {
1446            // Clone the filesystem manager
1447            if let Some(vfs) = self.vfs.read().clone() {
1448                *child.vfs.write() = Some(vfs.clone());
1449                // Current working directory is managed within VfsManager
1450            } else {
1451                *child.vfs.write() = None;
1452            }
1453        }
1454
1455        // Ensure the cloned task has its own high-VA kernel stack window.
1456        // Task::new() already allocates a per-task kernel stack (KernelContext), but clone paths
1457        // intentionally avoid Task::init() (especially for user tasks). That means the trampoline-
1458        // managed kstack window mapping must be set up here.
1459        if child.get_kernel_stack_window_base().is_none() {
1460            crate::vm::setup_trampoline_for_task_kstack_window(&mut child)?;
1461        }
1462        // Set the state to Ready
1463        child
1464            .state
1465            .store(self.state.load(Ordering::SeqCst), Ordering::SeqCst);
1466
1467        // NOTE: Parent-child relationship will be established AFTER add_task()
1468        // when the child has a valid ID. The caller is responsible for calling:
1469        //   child.set_parent_id(self.id);
1470        //   self.add_child(child.get_id());
1471        // after adding the child to the scheduler.
1472
1473        // Set TGID: if CLONE_THREAD, share parent's TGID (join thread group)
1474        // Otherwise, child becomes a new thread group leader (TGID will be set to its own ID)
1475        // This matches Linux CLONE_THREAD semantics
1476        if flags.is_set(CloneFlagsDef::Thread) {
1477            // Thread: share parent's TGID (join existing thread group)
1478            child.tgid = self.tgid;
1479        } // else: new process, TGID will be set to child's ID in set_id()
1480
1481        Ok(child)
1482    }
1483
1484    /// Exit the task
1485    ///
1486    /// # Arguments
1487    /// * `status` - The exit status
1488    ///
1489    pub fn exit(&self, status: i32) {
1490        // Close all open handles only if this task is the sole owner of the
1491        // handle table.  When CLONE_FILES is used (thread::spawn), multiple
1492        // tasks share the same Arc<HandleTableInner>.  Closing all handles
1493        // here would destroy handles that sibling/parent threads still need.
1494        if self.handle_table.is_sole_owner() {
1495            self.handle_table.close_all();
1496        }
1497        // Let current ABI perform exit-time cleanup (Linux: clear_child_tid, robust list, etc.)
1498        // Use take/restore to avoid aliasing &mut self and &mut field
1499        self.with_default_abi_mut(|abi, task| abi.on_task_exit(task));
1500
1501        match self.parent_id {
1502            Some(parent_id) => {
1503                if get_scheduler().get_task_by_id(parent_id).is_none() {
1504                    // crate::println!("Task {}: Parent {} not found, terminating", self.id, parent_id);
1505                    self.state.store(TaskState::Terminated, Ordering::SeqCst);
1506                    return;
1507                }
1508                /* Set the exit status */
1509                self.set_exit_status(status);
1510                self.state.store(TaskState::Zombie, Ordering::SeqCst);
1511
1512                // TODO: Notify parent via ABI-specific mechanism
1513                // crate::println!("Task {}: Set to Zombie state, parent {}", self.id, parent_id);
1514            }
1515            None => {
1516                /* If the task has no parent, it is terminated */
1517                // crate::println!("Task {}: No parent, terminating", self.id);
1518                self.state.store(TaskState::Terminated, Ordering::SeqCst);
1519            }
1520        }
1521
1522        // Task cleanup completed - ABI module handles event cleanup
1523
1524        if mytask().is_none() || mytask().unwrap().get_id() != self.id {
1525            // Not the current task, nothing more to do
1526            return;
1527        }
1528
1529        // The scheduler will handle saving the current task state internally
1530        if let Some(current_task) = mytask() {
1531            get_scheduler().schedule(current_task.get_trapframe());
1532        }
1533    }
1534
1535    /// Exit all tasks in the thread group
1536    ///
1537    /// This terminates all tasks with the same TGID (thread group).
1538    /// This is similar to Linux's exit_group system call.
1539    ///
1540    /// # Arguments
1541    /// * `status` - The exit status for all tasks in the group
1542    ///
1543    /// # Behavior
1544    /// - Terminates all tasks with the same TGID
1545    /// - The calling task is set to Zombie/Terminated
1546    /// - Other tasks in the group are forcefully terminated
1547    pub fn exit_group(&self, status: i32) {
1548        let tgid = self.tgid;
1549        let my_id = self.id;
1550
1551        // Get all task IDs in the system
1552        let scheduler = get_scheduler();
1553        let all_task_ids = scheduler.get_all_task_ids();
1554
1555        // Terminate all tasks with the same TGID (except self)
1556        for task_id in all_task_ids {
1557            if task_id == my_id {
1558                continue; // Skip self
1559            }
1560
1561            if let Some(task) = scheduler.get_task_by_id(task_id) {
1562                if task.get_tgid() == tgid {
1563                    // Terminate this thread group member
1564                    crate::println!(
1565                        "[exit_group] Task {} terminating sibling task {} (TGD={})",
1566                        my_id,
1567                        task_id,
1568                        tgid
1569                    );
1570                    // Set state to Terminated directly (bypass normal exit)
1571                    // Use unsafe to modify state through immutable reference
1572                    // This is safe because we're in a termination context
1573                    let task_ptr = task as *const Task as *mut Task;
1574                    unsafe {
1575                        (*task_ptr)
1576                            .state
1577                            .store(TaskState::Terminated, Ordering::SeqCst);
1578                        (*task_ptr).exit_status.store(status, Ordering::SeqCst);
1579                        // Close handles to prevent resource leaks
1580                        (*task_ptr).handle_table.close_all();
1581                    }
1582                }
1583            }
1584        }
1585
1586        // Now exit the current task normally
1587        self.exit(status);
1588    }
1589
1590    /// Wait for a child task to exit and collect its status
1591    ///
1592    /// # Arguments
1593    /// * `child_id` - The ID of the child task to wait for
1594    ///
1595    /// # Returns
1596    /// The exit status of the child task, or an error if the child is not found or not in Zombie state
1597    pub fn wait(&self, child_id: usize) -> Result<i32, WaitError> {
1598        if !self.children.read().contains(&child_id) {
1599            crate::println!("[Task {}] wait: No such child task: {}", self.id, child_id);
1600            return Err(WaitError::NoSuchChild("No such child task".to_string()));
1601        }
1602
1603        if let Some(child_task) = get_scheduler().get_task_by_id(child_id) {
1604            if child_task.get_state() == TaskState::Zombie {
1605                let status = child_task.get_exit_status().unwrap_or(-1);
1606                child_task.set_state(TaskState::Terminated);
1607                self.remove_child(child_id);
1608                Ok(status)
1609            } else {
1610                Err(WaitError::ChildNotExited(
1611                    "Child has not exited or is not a zombie".to_string(),
1612                ))
1613            }
1614        } else {
1615            Err(WaitError::ChildTaskNotFound(
1616                "Child task not found".to_string(),
1617            ))
1618        }
1619    }
1620
1621    /// Sleep the current task for the specified number of ticks.
1622    /// This blocks the task and registers a timer to wake it up.
1623    ///
1624    /// # Arguments
1625    /// * `trapframe` - The trapframe of the current CPU state
1626    /// * `ticks` - The number of ticks to sleep
1627    ///
1628    pub fn sleep(&self, trapframe: &mut Trapframe, ticks: u64) {
1629        struct SleepWakerHandler {
1630            task_id: usize,
1631            _start_tick: u64,
1632        }
1633
1634        impl TimerHandler for SleepWakerHandler {
1635            fn on_timer_expired(self: Arc<Self>, _context: usize) {
1636                if let Some(task) = get_scheduler().get_task_by_id(self.task_id) {
1637                    let handler: Arc<dyn TimerHandler> = self.clone();
1638                    task.remove_software_timer_handler(&handler);
1639                    // Memory barrier to ensure state change is visible
1640                    core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
1641                    // crate::println!("Task {} woke up after {} ticks", self.task_id, get_tick() - self.start_tick);
1642                    let waker = get_waitpid_waker(self.task_id);
1643                    waker.wake_all();
1644                }
1645            }
1646        }
1647
1648        let wake_tick = get_tick() + ticks;
1649        let handler: Arc<dyn crate::timer::TimerHandler> = Arc::new(SleepWakerHandler {
1650            task_id: self.id,
1651            _start_tick: get_tick(),
1652        });
1653        add_timer(wake_tick, &handler, 0);
1654
1655        self.add_software_timer_handler(handler);
1656        // Memory barrier to ensure timer handler registration is visible
1657        core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
1658        let waker = get_waitpid_waker(self.id);
1659        waker.wait(self.get_id(), trapframe);
1660    }
1661
1662    // VFS Helper Methods
1663
1664    /// Set the VFS manager
1665    ///
1666    /// # Arguments
1667    /// * `vfs` - The VfsManager to set as the VFS
1668    pub fn set_vfs(&self, vfs: Arc<VfsManager>) {
1669        *self.vfs.write() = Some(vfs);
1670    }
1671
1672    /// Get a reference to the VFS
1673    pub fn get_vfs(&self) -> Option<Arc<VfsManager>> {
1674        self.vfs.read().clone()
1675    }
1676
1677    pub fn add_software_timer_handler(&self, timer: Arc<dyn TimerHandler>) {
1678        self.software_timers_handlers.write().push(timer);
1679    }
1680
1681    pub fn remove_software_timer_handler(&self, timer: &Arc<dyn TimerHandler>) {
1682        let mut handlers = self.software_timers_handlers.write();
1683        if let Some(pos) = handlers.iter().position(|x| Arc::ptr_eq(x, timer)) {
1684            handlers.remove(pos);
1685        }
1686    }
1687
1688    /// Enable event processing for this task (similar to enabling interrupts)
1689    pub fn enable_events(&self) {
1690        let mut enabled = self.events_enabled.lock();
1691        *enabled = true;
1692    }
1693
1694    /// Disable event processing for this task (similar to disabling interrupts)
1695    pub fn disable_events(&self) {
1696        let mut enabled = self.events_enabled.lock();
1697        *enabled = false;
1698    }
1699
1700    /// Check if events are enabled for this task
1701    pub fn events_enabled(&self) -> bool {
1702        *self.events_enabled.lock()
1703    }
1704
1705    /// Process pending events if events are enabled
1706    /// This should be called by the scheduler before resuming the task
1707    ///
1708    /// Following signal-like semantics:
1709    /// - Process a limited number of events per scheduler cycle to avoid starvation
1710    /// - Critical events (like KILL) are processed immediately
1711    /// - Normal events are batched and processed in priority order
1712    pub fn process_pending_events(&self) -> Result<(), &'static str> {
1713        // Check if events are enabled
1714        if !self.events_enabled() {
1715            return Ok(()); // Events disabled, skip processing
1716        }
1717
1718        // Delegate to ABI module for event processing
1719        self.with_default_abi_mut(|abi, _| {
1720            const MAX_EVENTS_PER_CYCLE: usize = 8; // Prevent scheduler starvation
1721            let mut processed_count = 0;
1722
1723            // Process events with limits to prevent infinite loops
1724            while processed_count < MAX_EVENTS_PER_CYCLE {
1725                let event = {
1726                    let mut queue = self.event_queue.lock();
1727                    queue.dequeue()
1728                };
1729
1730                match event {
1731                    Some(event) => {
1732                        processed_count += 1;
1733
1734                        // Check if this is a critical event that requires immediate attention
1735                        let is_critical = self.is_critical_event(&event);
1736
1737                        // Let ABI handle the event
1738                        abi.handle_event(event, self.id as u32)?;
1739
1740                        // Check if events were disabled during handling
1741                        if !self.events_enabled() {
1742                            break;
1743                        }
1744
1745                        // If we processed a critical event, we can stop here
1746                        // to allow the ABI module to take appropriate action
1747                        if is_critical {
1748                            break;
1749                        }
1750                    }
1751                    None => break, // No more events
1752                }
1753            }
1754
1755            // If we hit the limit and there are still events, the scheduler
1756            // will call us again on the next cycle
1757            if processed_count == MAX_EVENTS_PER_CYCLE {
1758                let queue = self.event_queue.lock();
1759                if !queue.is_empty() {
1760                    // Log that we're deferring events to next cycle
1761                    // crate::early_println!("Task {}: Deferring {} events to next scheduler cycle",
1762                    //                      self.id, queue.len());
1763                }
1764            }
1765
1766            Ok(())
1767        })
1768    }
1769
1770    /// Check if an event is critical and should be processed immediately
1771    /// Critical events typically cannot be ignored and affect task state directly
1772    fn is_critical_event(&self, event: &crate::ipc::event::Event) -> bool {
1773        use crate::ipc::event::EventPriority;
1774
1775        // High/Critical priority events are always considered critical
1776        match event.metadata.priority {
1777            EventPriority::Critical => return true,
1778            EventPriority::High => {
1779                // Some high priority events are critical depending on content
1780                match &event.content {
1781                    EventContent::ProcessControl(ProcessControlType::Kill) => true,
1782                    EventContent::Custom { event_id, .. } => {
1783                        // Could map specific event IDs to critical signals
1784                        *event_id == 9 // SIGKILL-like event
1785                    }
1786                    _ => false,
1787                }
1788            }
1789            _ => false,
1790        }
1791    }
1792
1793    // /// Get a mutable reference to the kernel context for context switching
1794    // pub fn get_kernel_context_mut(&mut self) -> &mut KernelContext {
1795    //     self.kernel_context.lock().deref_mut()
1796    // }
1797
1798    // /// Get a reference to the kernel context
1799    // pub fn get_kernel_context(&self) -> &KernelContext {
1800    //     self.kernel_context.lock().deref()
1801    // }
1802
1803    pub fn with_kernel_context<R>(&self, f: impl FnOnce(&mut KernelContext) -> R) -> R {
1804        let mut kctx = self.kernel_context.lock();
1805        f(&mut kctx)
1806    }
1807
1808    /// Get the kernel stack bottom address for this task
1809    ///
1810    /// # Returns
1811    /// The kernel stack bottom address as u64, or 0 if no kernel stack is allocated
1812    pub fn get_kernel_stack_bottom_paddr(&self) -> u64 {
1813        self.kernel_context.lock().get_kernel_stack_bottom_paddr()
1814    }
1815
1816    /// Get the kernel stack memory area for this task
1817    ///
1818    /// # Returns
1819    /// The kernel stack memory area as a MemoryArea
1820    ///
1821    pub fn get_kernel_stack_memory_area_paddr(&self) -> MemoryArea {
1822        self.kernel_context
1823            .lock()
1824            .get_kernel_stack_memory_area_paddr()
1825    }
1826
1827    /// Get a mutable reference to the trapframe for this task
1828    ///
1829    /// The trapframe contains the user-space register state and is located
1830    /// at the top of the kernel stack. This provides access to modify the
1831    /// user context during system calls, interrupts, and context switches.
1832    ///
1833    /// If a kernel stack window is mapped (high-VA), this returns a reference
1834    /// via the mapped virtual address. Otherwise, it returns a reference via
1835    /// the physical address directly.
1836    ///
1837    /// # Returns
1838    /// A mutable reference to the Trapframe
1839    pub fn get_trapframe(&self) -> &mut Trapframe {
1840        // If we have a kernel stack window mapped, use the high-VA address
1841        if let Some((_slot, base)) = *self.kernel_stack_window_base.lock() {
1842            let trapframe_offset = crate::environment::PAGE_SIZE
1843                + crate::environment::TASK_KERNEL_STACK_SIZE
1844                - core::mem::size_of::<Trapframe>();
1845            let trapframe_vaddr = base + trapframe_offset;
1846            unsafe { &mut *(trapframe_vaddr as *mut Trapframe) }
1847        } else {
1848            // Fallback to physical address (should not happen for user tasks after init)
1849            // self.kernel_context.lock().get_trapframe()
1850            panic!("get_trapframe: No kernel stack window mapped");
1851        }
1852    }
1853
1854    /// Internal: set kernel stack window base (slot index and base vaddr)
1855    pub fn set_kernel_stack_window_base(&self, base: Option<(usize, usize)>) {
1856        *self.kernel_stack_window_base.lock() = base;
1857    }
1858
1859    /// Get kernel stack window base (slot index and base vaddr)
1860    pub fn get_kernel_stack_window_base(&self) -> Option<(usize, usize)> {
1861        *self.kernel_stack_window_base.lock()
1862    }
1863}
1864
1865#[derive(Debug)]
1866pub enum WaitError {
1867    NoSuchChild(String),
1868    ChildNotExited(String),
1869    ChildTaskNotFound(String),
1870}
1871
1872impl WaitError {
1873    pub fn message(&self) -> &str {
1874        match self {
1875            WaitError::NoSuchChild(msg) => msg,
1876            WaitError::ChildNotExited(msg) => msg,
1877            WaitError::ChildTaskNotFound(msg) => msg,
1878        }
1879    }
1880}
1881
1882impl Drop for Task {
1883    fn drop(&mut self) {
1884        // Best-effort teardown of kernel stack window mapping
1885        crate::vm::teardown_trampoline_for_task_kstack_window(self);
1886    }
1887}
1888
1889/// Create a new kernel task.
1890///
1891/// # Arguments
1892/// * `name` - The name of the task
1893/// * `priority` - The priority of the task
1894/// * `func` - The function to run in the task
1895///
1896/// # Returns
1897/// The new task.
1898pub fn new_kernel_task(name: String, priority: u32, func: fn()) -> Task {
1899    let mut task = Task::new(name, priority, TaskType::Kernel);
1900    task.entry = func as usize;
1901    task
1902}
1903
1904/// Create a new user task.
1905///
1906/// # Arguments
1907/// * `name` - The name of the task
1908/// * `priority` - The priority of the task
1909///
1910/// # Returns
1911/// The new task.
1912pub fn new_user_task(name: String, priority: u32) -> Task {
1913    Task::new(name, priority, TaskType::User)
1914}
1915
1916#[cfg(test)]
1917static mut MOCK_CURRENT_TASK: Option<*mut Task> = None;
1918
1919#[cfg(test)]
1920/// Set a mock current task for testing purposes
1921///
1922/// This function allows tests to override the return value of mytask()
1923/// for controlled testing scenarios.
1924///
1925/// # Arguments
1926/// * `task` - The task to return from mytask()
1927///
1928/// # Safety
1929/// The caller must ensure the task pointer remains valid for the duration
1930/// of the test and that clear_mock_current_task() is called when done.
1931/// This function is only safe to call in single-threaded test environments.
1932pub unsafe fn set_mock_current_task(task: &'static mut Task) {
1933    unsafe {
1934        MOCK_CURRENT_TASK = Some(task as *mut Task);
1935    }
1936}
1937
1938#[cfg(test)]
1939/// Clear the mock current task, reverting to normal scheduler behavior
1940///
1941/// # Safety
1942/// This function is only safe to call in single-threaded test environments.
1943pub unsafe fn clear_mock_current_task() {
1944    unsafe {
1945        MOCK_CURRENT_TASK = None;
1946    }
1947}
1948
1949/// Get the current task.
1950///
1951/// # Returns
1952/// The current task if it exists.
1953pub fn mytask() -> Option<&'static Task> {
1954    #[cfg(test)]
1955    {
1956        unsafe {
1957            if let Some(task_ptr) = MOCK_CURRENT_TASK {
1958                return Some(&*task_ptr);
1959            }
1960        }
1961    }
1962
1963    let cpu = get_cpu();
1964    get_scheduler().get_current_task(cpu.get_cpuid())
1965}
1966
1967/// Set the current working directory for the current task via VfsManager
1968///
1969/// This function sets the current working directory of the calling task
1970/// using the VfsManager's path-based API.
1971///
1972/// # Arguments
1973/// * `path` - The new working directory path
1974///
1975/// # Returns
1976/// * `true` if successful, `false` if no current task or VfsManager
1977pub fn set_current_task_cwd(path: String) -> bool {
1978    if let Some(task) = mytask() {
1979        if let Some(vfs) = task.vfs.read().as_ref() {
1980            // Use VfsManager to set current working directory
1981            vfs.set_cwd_by_path(&path).is_ok()
1982        } else {
1983            false // No VfsManager available
1984        }
1985    } else {
1986        false
1987    }
1988}
1989
1990/// Internal function to perform kernel context switch between tasks
1991/// This function is called when a task is first scheduled.
1992pub fn task_initial_kernel_entrypoint() -> ! {
1993    let cpu = get_cpu();
1994    let current_task = unsafe {
1995        get_scheduler()
1996            .get_current_task_mut(cpu.get_cpuid())
1997            .unwrap()
1998    };
1999    Scheduler::setup_task_execution(cpu, current_task);
2000    arch_switch_to_user_space(current_task.get_trapframe());
2001}
2002
2003#[cfg(test)]
2004mod tests {
2005    use alloc::string::ToString;
2006    use alloc::sync::Arc;
2007    use core::sync::atomic::Ordering;
2008
2009    use crate::task::CloneFlags;
2010
2011    #[test_case]
2012    fn test_set_brk() {
2013        let mut task = super::new_user_task("Task0".to_string(), 0);
2014        task.init();
2015        assert_eq!(task.get_brk(), 0);
2016        task.set_brk(0x1000).unwrap();
2017        assert_eq!(task.get_brk(), 0x1000);
2018        task.set_brk(0x2000).unwrap();
2019        assert_eq!(task.get_brk(), 0x2000);
2020        task.set_brk(0x1008).unwrap();
2021        assert_eq!(task.get_brk(), 0x1008);
2022        task.set_brk(0x1000).unwrap();
2023        assert_eq!(task.get_brk(), 0x1000);
2024    }
2025
2026    #[test_case]
2027    fn test_task_parent_child_relationship() {
2028        // Reset scheduler state before test
2029        let scheduler = crate::sched::scheduler::get_scheduler();
2030        scheduler.reset();
2031
2032        let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
2033        parent_task.init();
2034
2035        let mut child_task = super::new_user_task("ChildTask".to_string(), 0);
2036        child_task.init();
2037
2038        // Add tasks to scheduler to allocate IDs
2039        let parent_id = scheduler.add_task(parent_task, 0);
2040        let child_id = scheduler.add_task(child_task, 0);
2041
2042        // Set parent-child relationship using allocated IDs
2043        // We need to do this sequentially due to borrow checker
2044        {
2045            let child_task = scheduler.get_task_by_id(child_id).unwrap();
2046            child_task.set_parent_id(parent_id);
2047        }
2048        {
2049            let parent_task = scheduler.get_task_by_id(parent_id).unwrap();
2050            parent_task.add_child(child_id);
2051        }
2052
2053        // Verify parent-child relationship
2054        {
2055            let child_task = scheduler.get_task_by_id(child_id).unwrap();
2056            assert_eq!(child_task.get_parent_id(), Some(parent_id));
2057        }
2058        {
2059            let parent_task = scheduler.get_task_by_id(parent_id).unwrap();
2060            assert!(parent_task.get_children().contains(&child_id));
2061        }
2062
2063        // Remove child and verify
2064        {
2065            let parent_task = scheduler.get_task_by_id(parent_id).unwrap();
2066            assert!(parent_task.remove_child(child_id));
2067            assert!(!parent_task.get_children().contains(&child_id));
2068        }
2069    }
2070
2071    #[test_case]
2072    fn test_task_exit_status() {
2073        let mut task = super::new_user_task("TaskWithExitStatus".to_string(), 0);
2074        task.init();
2075
2076        // Verify initial exit status is None
2077        assert_eq!(task.get_exit_status(), None);
2078
2079        // Set and verify exit status
2080        task.set_exit_status(0);
2081        assert_eq!(task.get_exit_status(), Some(0));
2082
2083        task.set_exit_status(1);
2084        assert_eq!(task.get_exit_status(), Some(1));
2085    }
2086
2087    #[test_case]
2088    fn test_clone_task_memory_copy() {
2089        // Reset scheduler state before test
2090        let scheduler = crate::sched::scheduler::get_scheduler();
2091        scheduler.reset();
2092
2093        let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
2094        parent_task.init();
2095
2096        // Allocate some memory pages for the parent task
2097        let vaddr = 0x1000;
2098        let num_pages = 2;
2099        let mmap = parent_task.allocate_data_pages(vaddr, num_pages).unwrap();
2100
2101        // Save the physical address and permissions before adding to scheduler
2102        let parent_paddr = mmap.pmarea.start;
2103        let parent_vaddr_start = mmap.vmarea.start;
2104        let parent_vaddr_end = mmap.vmarea.end;
2105        let parent_perms = mmap.permissions;
2106
2107        // Write test data to parent's memory
2108        let test_data: [u8; 8] = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0];
2109        unsafe {
2110            let dst_ptr = mmap.pmarea.start as *mut u8;
2111            core::ptr::copy_nonoverlapping(test_data.as_ptr(), dst_ptr, test_data.len());
2112        }
2113
2114        // Get parent memory map count before cloning
2115        let parent_memmap_count = parent_task.vm_manager.memmap_len();
2116
2117        // Clone the parent task
2118        let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
2119
2120        // For fork-like clones (no CLONE_VM), brk must NOT be shared.
2121        assert!(
2122            !Arc::ptr_eq(&child_task.brk, &parent_task.brk),
2123            "Child should not share brk with parent unless CLONE_VM is set"
2124        );
2125
2126        // Get child memory map count after cloning
2127        let child_memmap_count = child_task.vm_manager.memmap_len();
2128
2129        // Verify that the number of memory maps are identical
2130        assert_eq!(
2131            child_memmap_count, parent_memmap_count,
2132            "Child should have the same number of memory maps as parent: child={}, parent={}",
2133            child_memmap_count, parent_memmap_count
2134        );
2135
2136        // Save values that will be needed after add_task
2137        let parent_pc = parent_task.vcpu.lock().get_pc();
2138        let parent_entry = parent_task.entry;
2139        let parent_state = parent_task.state.load(Ordering::SeqCst);
2140        let child_pc = child_task.vcpu.lock().get_pc();
2141        let child_entry = child_task.entry;
2142        let child_state = child_task.state.load(Ordering::SeqCst);
2143        let child_managed_pages_len = child_task.managed_pages.read().len();
2144
2145        // Add both tasks to scheduler to establish parent-child relationship
2146        let scheduler = crate::sched::scheduler::get_scheduler();
2147        let parent_id = scheduler.add_task(parent_task, 0);
2148        let child_id = scheduler.add_task(child_task, 0);
2149
2150        // Establish parent-child relationship
2151        {
2152            let child = scheduler.get_task_by_id(child_id).unwrap();
2153            child.set_parent_id(parent_id);
2154        }
2155        {
2156            let parent = scheduler.get_task_by_id(parent_id).unwrap();
2157            parent.add_child(child_id);
2158        }
2159
2160        // Verify parent-child relationship was established (in separate scopes)
2161        {
2162            let child = scheduler.get_task_by_id(child_id).unwrap();
2163            assert_eq!(child.get_parent_id(), Some(parent_id));
2164        }
2165        {
2166            let parent = scheduler.get_task_by_id(parent_id).unwrap();
2167            assert!(parent.get_children().contains(&child_id));
2168        }
2169
2170        // Get references for further verification (in separate scopes)
2171        let child_stack_size = {
2172            let child = scheduler.get_task_by_id(child_id).unwrap();
2173            child.stack_size.load(Ordering::SeqCst)
2174        };
2175        let child_data_size = {
2176            let child = scheduler.get_task_by_id(child_id).unwrap();
2177            child.data_size.load(Ordering::SeqCst)
2178        };
2179        let child_text_size = {
2180            let child = scheduler.get_task_by_id(child_id).unwrap();
2181            child.text_size.load(Ordering::SeqCst)
2182        };
2183        let parent_stack_size = {
2184            let parent = scheduler.get_task_by_id(parent_id).unwrap();
2185            parent.stack_size.load(Ordering::SeqCst)
2186        };
2187        let parent_data_size = {
2188            let parent = scheduler.get_task_by_id(parent_id).unwrap();
2189            parent.data_size.load(Ordering::SeqCst)
2190        };
2191        let parent_text_size = {
2192            let parent = scheduler.get_task_by_id(parent_id).unwrap();
2193            parent.text_size.load(Ordering::SeqCst)
2194        };
2195
2196        // Verify memory sizes were copied
2197        assert_eq!(child_stack_size, parent_stack_size);
2198        assert_eq!(child_data_size, parent_data_size);
2199        assert_eq!(child_text_size, parent_text_size);
2200
2201        // Find the corresponding memory map in child that matches our test allocation
2202        let child_mmap = {
2203            let mut found = None;
2204            let child = scheduler.get_task_by_id(child_id).unwrap();
2205            child.vm_manager.with_memmaps(|mm| {
2206                for m in mm.values() {
2207                    if m.vmarea.start == vaddr
2208                        && m.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1
2209                    {
2210                        found = Some(m.clone());
2211                        break;
2212                    }
2213                }
2214            });
2215            found.expect("Test memory map not found in child task")
2216        };
2217
2218        // Verify the virtual memory ranges match
2219        assert_eq!(child_mmap.vmarea.start, parent_vaddr_start);
2220        assert_eq!(child_mmap.vmarea.end, parent_vaddr_end);
2221        assert_eq!(child_mmap.permissions, parent_perms);
2222
2223        // Verify the data was copied correctly
2224        unsafe {
2225            let parent_ptr = parent_paddr as *const u8;
2226            let child_ptr = child_mmap.pmarea.start as *const u8;
2227
2228            // Check that physical addresses are different (separate memory)
2229            assert_ne!(
2230                parent_ptr, child_ptr,
2231                "Parent and child should have different physical memory"
2232            );
2233
2234            // Check that the data content is identical
2235            for i in 0..test_data.len() {
2236                let parent_byte = *parent_ptr.offset(i as isize);
2237                let child_byte = *child_ptr.offset(i as isize);
2238                assert_eq!(parent_byte, child_byte, "Data mismatch at offset {}", i);
2239            }
2240        }
2241
2242        // Verify that modifying parent's memory doesn't affect child's memory
2243        unsafe {
2244            let parent_ptr = mmap.pmarea.start as *mut u8;
2245            let original_value = *parent_ptr;
2246            *parent_ptr = 0xFF; // Modify first byte in parent
2247
2248            let child_ptr = child_mmap.pmarea.start as *const u8;
2249            let child_first_byte = *child_ptr;
2250
2251            // Child's first byte should still be the original value
2252            assert_eq!(
2253                child_first_byte, original_value,
2254                "Child memory should be independent from parent"
2255            );
2256        }
2257
2258        // Verify register states were copied
2259        assert_eq!(child_pc, parent_pc);
2260
2261        // Verify entry point was copied
2262        assert_eq!(child_entry, parent_entry);
2263
2264        // Verify state was copied
2265        assert_eq!(child_state, parent_state);
2266
2267        // Verify that both tasks have the correct number of managed pages
2268        assert!(
2269            child_managed_pages_len >= num_pages,
2270            "Child should have at least the test pages in managed pages"
2271        );
2272    }
2273
2274    #[test_case]
2275    fn test_clone_task_stack_copy() {
2276        // Reset scheduler state before test
2277        let scheduler = crate::sched::scheduler::get_scheduler();
2278        scheduler.reset();
2279
2280        let mut parent_task = super::new_user_task("ParentWithStack".to_string(), 0);
2281        parent_task.init();
2282
2283        // Find the stack memory map in parent
2284        let stack_mmap = {
2285            let mut found = None;
2286            parent_task.vm_manager.with_memmaps(|mm| {
2287                for mmap in mm.values() {
2288                    // Stack should be near USER_STACK_END and have stack permissions
2289                    use crate::vm::vmem::VirtualMemoryRegion;
2290                    if mmap.vmarea.end == crate::environment::USER_STACK_END - 1
2291                        && mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
2292                    {
2293                        found = Some(mmap.clone());
2294                        break;
2295                    }
2296                }
2297            });
2298            found.expect("Stack memory map not found in parent task")
2299        };
2300
2301        // Write test data to parent's stack
2302        let stack_test_data: [u8; 16] = [
2303            0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
2304            0x99, 0x00,
2305        ];
2306        unsafe {
2307            let stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
2308            core::ptr::copy_nonoverlapping(
2309                stack_test_data.as_ptr(),
2310                stack_ptr,
2311                stack_test_data.len(),
2312            );
2313        }
2314
2315        // Clone the parent task
2316        let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
2317
2318        // Find the corresponding stack memory map in child
2319        let child_stack_mmap = {
2320            let mut found = None;
2321            child_task.vm_manager.with_memmaps(|mm| {
2322                for mmap in mm.values() {
2323                    use crate::vm::vmem::VirtualMemoryRegion;
2324                    if mmap.vmarea.start == stack_mmap.vmarea.start
2325                        && mmap.vmarea.end == stack_mmap.vmarea.end
2326                        && mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
2327                    {
2328                        found = Some(mmap.clone());
2329                        break;
2330                    }
2331                }
2332            });
2333            found.expect("Stack memory map not found in child task")
2334        };
2335
2336        // Verify that stack content was copied correctly
2337        unsafe {
2338            let parent_stack_ptr =
2339                (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
2340            let child_stack_ptr =
2341                (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
2342
2343            // Check that physical addresses are different (separate memory)
2344            assert_ne!(
2345                parent_stack_ptr, child_stack_ptr,
2346                "Parent and child should have different stack physical memory"
2347            );
2348
2349            // Check that the stack data content is identical
2350            for i in 0..stack_test_data.len() {
2351                let parent_byte = *parent_stack_ptr.offset(i as isize);
2352                let child_byte = *child_stack_ptr.offset(i as isize);
2353                assert_eq!(
2354                    parent_byte, child_byte,
2355                    "Stack data mismatch at offset {}: parent={:#x}, child={:#x}",
2356                    i, parent_byte, child_byte
2357                );
2358            }
2359        }
2360
2361        // Verify that modifying parent's stack doesn't affect child's stack
2362        unsafe {
2363            let parent_stack_ptr =
2364                (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
2365            let original_value = *parent_stack_ptr;
2366            *parent_stack_ptr = 0xFE; // Modify first byte in parent stack
2367
2368            let child_stack_ptr =
2369                (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
2370            let child_first_byte = *child_stack_ptr;
2371
2372            // Child's first byte should still be the original value
2373            assert_eq!(
2374                child_first_byte, original_value,
2375                "Child stack should be independent from parent stack"
2376            );
2377        }
2378
2379        // Verify stack sizes match
2380        assert_eq!(
2381            child_task.stack_size.load(Ordering::SeqCst),
2382            parent_task.stack_size.load(Ordering::SeqCst),
2383            "Child and parent should have the same stack size"
2384        );
2385    }
2386
2387    #[test_case]
2388    fn test_clone_task_shared_memory() {
2389        // Reset scheduler state before test
2390        let scheduler = crate::sched::scheduler::get_scheduler();
2391        scheduler.reset();
2392
2393        use crate::environment::PAGE_SIZE;
2394        use crate::mem::page::allocate_raw_pages;
2395        use crate::vm::vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryPermission};
2396
2397        let mut parent_task = super::new_user_task("ParentWithShared".to_string(), 0);
2398        parent_task.init();
2399
2400        // Manually add a shared memory region to test sharing behavior
2401        let shared_vaddr = 0x5000;
2402        let num_pages = 1;
2403        let pages = allocate_raw_pages(num_pages);
2404        let paddr = pages as usize;
2405
2406        let shared_mmap = VirtualMemoryMap {
2407            pmarea: MemoryArea {
2408                start: paddr,
2409                end: paddr + PAGE_SIZE - 1,
2410            },
2411            vmarea: MemoryArea {
2412                start: shared_vaddr,
2413                end: shared_vaddr + PAGE_SIZE - 1,
2414            },
2415            permissions: VirtualMemoryPermission::Read as usize
2416                | VirtualMemoryPermission::Write as usize,
2417            is_shared: true, // This should be shared between parent and child
2418            owner: None,
2419        };
2420
2421        // Add shared memory map to parent
2422        parent_task
2423            .vm_manager
2424            .add_memory_map(shared_mmap.clone())
2425            .unwrap();
2426
2427        // Write test data to shared memory
2428        let test_data: [u8; 8] = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22];
2429        unsafe {
2430            let shared_ptr = paddr as *mut u8;
2431            core::ptr::copy_nonoverlapping(test_data.as_ptr(), shared_ptr, test_data.len());
2432        }
2433
2434        // Clone the parent task
2435        let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
2436
2437        // Find the shared memory map in child
2438        let child_shared_mmap = {
2439            let mut found = None;
2440            child_task.vm_manager.with_memmaps(|mm| {
2441                for mmap in mm.values() {
2442                    if mmap.vmarea.start == shared_vaddr && mmap.is_shared {
2443                        found = Some(mmap.clone());
2444                        break;
2445                    }
2446                }
2447            });
2448            found.expect("Shared memory map not found in child task")
2449        };
2450
2451        // Verify that the physical addresses are the same (shared memory)
2452        assert_eq!(
2453            child_shared_mmap.pmarea.start, shared_mmap.pmarea.start,
2454            "Shared memory should have the same physical address in parent and child"
2455        );
2456
2457        // Verify that the virtual addresses are the same
2458        assert_eq!(child_shared_mmap.vmarea.start, shared_mmap.vmarea.start);
2459        assert_eq!(child_shared_mmap.vmarea.end, shared_mmap.vmarea.end);
2460
2461        // Verify that is_shared flag is preserved
2462        assert!(
2463            child_shared_mmap.is_shared,
2464            "Shared memory should remain marked as shared"
2465        );
2466
2467        // Verify that modifying shared memory from child affects parent
2468        unsafe {
2469            let child_shared_ptr = child_shared_mmap.pmarea.start as *mut u8;
2470            let original_value = *child_shared_ptr;
2471            *child_shared_ptr = 0xFF; // Modify first byte through child reference
2472
2473            let parent_shared_ptr = shared_mmap.pmarea.start as *const u8;
2474            let parent_first_byte = *parent_shared_ptr;
2475
2476            // Parent should see the change made by child (shared memory)
2477            assert_eq!(
2478                parent_first_byte, 0xFF,
2479                "Parent should see changes made through child's shared memory reference"
2480            );
2481
2482            // Restore original value
2483            *child_shared_ptr = original_value;
2484        }
2485
2486        // Verify that the shared data content is accessible from both
2487        unsafe {
2488            let child_ptr = child_shared_mmap.pmarea.start as *const u8;
2489            let parent_ptr = shared_mmap.pmarea.start as *const u8;
2490
2491            // Check that the data content is identical and accessible from both
2492            for i in 0..test_data.len() {
2493                let parent_byte = *parent_ptr.offset(i as isize);
2494                let child_byte = *child_ptr.offset(i as isize);
2495                assert_eq!(
2496                    parent_byte, child_byte,
2497                    "Shared memory data should be identical from both parent and child views"
2498                );
2499            }
2500        }
2501    }
2502
2503    #[test_case]
2504    fn test_clone_task_with_clone_vm_shares_address_space() {
2505        // Reset scheduler state before test
2506        let scheduler = crate::sched::scheduler::get_scheduler();
2507        scheduler.reset();
2508
2509        use crate::environment::PAGE_SIZE;
2510
2511        let mut parent = super::new_user_task("ParentCloneVm".to_string(), 0);
2512        parent.init();
2513
2514        // Allocate one page initially in the parent
2515        let base_vaddr = 0x4000;
2516        parent.allocate_data_pages(base_vaddr, 1).unwrap();
2517        let parent_len_before = parent.vm_manager.memmap_len();
2518
2519        // Clone with CLONE_VM flag (share the address space only)
2520        let mut flags = super::CloneFlags::new();
2521        flags.set(super::CloneFlagsDef::Vm);
2522        let child = parent.clone_task(flags).unwrap();
2523
2524        // CLONE_VM: brk must be shared because heap lives in the shared address space.
2525        assert!(
2526            Arc::ptr_eq(&child.brk, &parent.brk),
2527            "CLONE_VM tasks must share brk"
2528        );
2529
2530        // Indirectly verify that both share the same ASID/address space
2531        assert_eq!(child.vm_manager.get_asid(), parent.vm_manager.get_asid());
2532        assert_eq!(child.vm_manager.memmap_len(), parent_len_before);
2533
2534        // Adding another page in the parent should be immediately visible to the child
2535        parent
2536            .allocate_data_pages(base_vaddr + PAGE_SIZE, 1)
2537            .unwrap();
2538        assert_eq!(
2539            child.vm_manager.memmap_len(),
2540            parent.vm_manager.memmap_len()
2541        );
2542
2543        // Managed pages are per-task; child should not acquire new managed pages
2544        // when sharing VM (physical memory isn't privately managed by the child)
2545        assert!(child.managed_pages.read().len() <= parent.managed_pages.read().len());
2546    }
2547
2548    #[test_case]
2549    fn test_task_namespace_creation() {
2550        // Reset scheduler state before test
2551        let scheduler = crate::sched::scheduler::get_scheduler();
2552        scheduler.reset();
2553
2554        use super::namespace;
2555
2556        // Create task in root namespace
2557        let task = super::new_user_task("TestTask".to_string(), 0);
2558        assert_eq!(task.get_namespace().get_name(), "root");
2559        assert!(task.get_namespace().is_root());
2560
2561        // Add task to scheduler to allocate namespace ID
2562        let task_id = scheduler.add_task(task, 0);
2563
2564        // Verify namespace-local ID was allocated
2565        let ns_id = scheduler
2566            .get_task_by_id(task_id)
2567            .unwrap()
2568            .get_namespace_id();
2569        assert!(ns_id >= 1); // Should start from 1
2570    }
2571
2572    #[test_case]
2573    fn test_task_namespace_inheritance() {
2574        // Reset scheduler state before test
2575        let scheduler = crate::sched::scheduler::get_scheduler();
2576        scheduler.reset();
2577
2578        use super::namespace;
2579
2580        let mut parent = super::new_user_task("Parent".to_string(), 0);
2581        parent.init();
2582
2583        // Clone should inherit parent's namespace
2584        let child = parent.clone_task(CloneFlags::default()).unwrap();
2585
2586        // Both should be in same namespace
2587        assert_eq!(
2588            parent.get_namespace().get_id(),
2589            child.get_namespace().get_id()
2590        );
2591
2592        // Add both to scheduler to allocate namespace IDs
2593        let scheduler = crate::sched::scheduler::get_scheduler();
2594        let parent_id = scheduler.add_task(parent, 0);
2595        let child_id = scheduler.add_task(child, 0);
2596
2597        // But should have different namespace-local IDs
2598        let parent_ns_id = scheduler
2599            .get_task_by_id(parent_id)
2600            .unwrap()
2601            .get_namespace_id();
2602        let child_ns_id = scheduler
2603            .get_task_by_id(child_id)
2604            .unwrap()
2605            .get_namespace_id();
2606        assert_ne!(parent_ns_id, child_ns_id);
2607    }
2608
2609    #[test_case]
2610    fn test_task_namespace_id_allocation() {
2611        // Reset scheduler state before test
2612        let scheduler = crate::sched::scheduler::get_scheduler();
2613        scheduler.reset();
2614
2615        use super::namespace;
2616
2617        // Create custom namespace
2618        let custom_ns = namespace::TaskNamespace::new_child(
2619            namespace::get_root_namespace().clone(),
2620            "test_ns".to_string(),
2621        );
2622
2623        // Create multiple tasks in the same namespace
2624        let mut task1 = super::Task::new_with_namespace(
2625            "Task1".to_string(),
2626            0,
2627            super::TaskType::User,
2628            custom_ns.clone(),
2629        );
2630        let mut task2 = super::Task::new_with_namespace(
2631            "Task2".to_string(),
2632            0,
2633            super::TaskType::User,
2634            custom_ns.clone(),
2635        );
2636        let mut task3 = super::Task::new_with_namespace(
2637            "Task3".to_string(),
2638            0,
2639            super::TaskType::User,
2640            custom_ns.clone(),
2641        );
2642
2643        // Initialize tasks before adding to scheduler
2644        task1.init();
2645        task2.init();
2646        task3.init();
2647
2648        // Add tasks to scheduler to allocate IDs
2649        let scheduler = crate::sched::scheduler::get_scheduler();
2650        let id1 = scheduler.add_task(task1, 0);
2651        let id2 = scheduler.add_task(task2, 0);
2652        let id3 = scheduler.add_task(task3, 0);
2653
2654        // All should have sequential namespace-local IDs
2655        let ns_id1 = scheduler.get_task_by_id(id1).unwrap().get_namespace_id();
2656        let ns_id2 = scheduler.get_task_by_id(id2).unwrap().get_namespace_id();
2657        let ns_id3 = scheduler.get_task_by_id(id3).unwrap().get_namespace_id();
2658        assert_eq!(ns_id1, 1);
2659        assert_eq!(ns_id2, 2);
2660        assert_eq!(ns_id3, 3);
2661
2662        // All should have different global IDs
2663        assert_ne!(id1, id2);
2664        assert_ne!(id2, id3);
2665        assert_ne!(id1, id3);
2666    }
2667
2668    #[test_case]
2669    fn test_namespace_hierarchy() {
2670        use super::namespace;
2671
2672        let root = namespace::get_root_namespace();
2673        let child_ns = namespace::TaskNamespace::new_child(root.clone(), "child".to_string());
2674        let grandchild_ns =
2675            namespace::TaskNamespace::new_child(child_ns.clone(), "grandchild".to_string());
2676
2677        // Verify hierarchy
2678        assert!(root.is_root());
2679        assert!(!child_ns.is_root());
2680        assert!(!grandchild_ns.is_root());
2681
2682        // Verify parent relationships
2683        assert!(child_ns.get_parent().is_some());
2684        assert_eq!(child_ns.get_parent().unwrap().get_id(), root.get_id());
2685        assert_eq!(
2686            grandchild_ns.get_parent().unwrap().get_id(),
2687            child_ns.get_id()
2688        );
2689    }
2690
2691    #[test_case]
2692    fn test_all_abis_share_root_namespace_by_default() {
2693        // Reset scheduler state before test
2694        let scheduler = crate::sched::scheduler::get_scheduler();
2695        scheduler.reset();
2696
2697        use super::namespace;
2698        use alloc::vec::Vec;
2699
2700        // Create tasks using default Task::new (which uses root namespace)
2701        let mut task1 = super::new_user_task("Task1".to_string(), 0);
2702        let mut task2 = super::new_user_task("Task2".to_string(), 0);
2703        let mut task3 = super::new_user_task("Task3".to_string(), 0);
2704
2705        // Initialize tasks before adding to scheduler
2706        task1.init();
2707        task2.init();
2708        task3.init();
2709
2710        // Add tasks to scheduler to allocate namespace IDs
2711        let id1 = scheduler.add_task(task1, 0);
2712        let id2 = scheduler.add_task(task2, 0);
2713        let id3 = scheduler.add_task(task3, 0);
2714
2715        // Verify all tasks have valid IDs after being added to scheduler
2716        assert_ne!(id1, 0, "Task ID should be non-zero after add_task");
2717        assert_ne!(id2, 0, "Task ID should be non-zero after add_task");
2718        assert_ne!(id3, 0, "Task ID should be non-zero after add_task");
2719
2720        // Get namespace IDs to verify (in separate scopes to avoid borrow issues)
2721        let ns_id1 = scheduler.get_task_by_id(id1).unwrap().get_namespace_id();
2722        assert_ne!(ns_id1, 0, "Namespace ID should be non-zero after add_task");
2723
2724        let ns_id2 = scheduler.get_task_by_id(id2).unwrap().get_namespace_id();
2725        assert_ne!(ns_id2, 0, "Namespace ID should be non-zero after add_task");
2726
2727        let ns_id3 = scheduler.get_task_by_id(id3).unwrap().get_namespace_id();
2728        assert_ne!(ns_id3, 0, "Namespace ID should be non-zero after add_task");
2729
2730        // Verify namespace IDs are unique
2731        assert_ne!(ns_id1, ns_id2, "Namespace IDs should be unique");
2732        assert_ne!(ns_id2, ns_id3, "Namespace IDs should be unique");
2733
2734        // Verify all tasks are in root namespace (in separate scopes)
2735        {
2736            let task1 = scheduler.get_task_by_id(id1).unwrap();
2737            assert_eq!(task1.get_namespace().get_name(), "root");
2738        }
2739        {
2740            let task2 = scheduler.get_task_by_id(id2).unwrap();
2741            assert_eq!(task2.get_namespace().get_name(), "root");
2742        }
2743        {
2744            let task3 = scheduler.get_task_by_id(id3).unwrap();
2745            assert_eq!(task3.get_namespace().get_name(), "root");
2746        }
2747
2748        // Verify all tasks share the same namespace instance
2749        let ns1_id = scheduler
2750            .get_task_by_id(id1)
2751            .unwrap()
2752            .get_namespace()
2753            .get_id();
2754        let ns2_id = scheduler
2755            .get_task_by_id(id2)
2756            .unwrap()
2757            .get_namespace()
2758            .get_id();
2759        let ns3_id = scheduler
2760            .get_task_by_id(id3)
2761            .unwrap()
2762            .get_namespace()
2763            .get_id();
2764        assert_eq!(ns1_id, ns2_id, "All tasks should share root namespace");
2765        assert_eq!(ns2_id, ns3_id, "All tasks should share root namespace");
2766    }
2767}