1pub mod elf_loader;
6pub mod namespace;
7pub mod syscall;
8
9extern crate alloc;
10
11use alloc::{
12 boxed::Box,
13 string::{String, ToString},
14 sync::Arc,
15 vec::Vec,
16};
17use core::{cell::UnsafeCell, sync::atomic};
18use spin::{Mutex, RwLock};
19
20use crate::abi::{AbiModule, scarlet::ScarletAbi};
21use crate::sync::waker::Waker;
22use crate::{
23 arch::{
24 KernelContext, Trapframe, get_cpu, trap::user::arch_switch_to_user_space, vcpu::Vcpu,
25 vm::alloc_virtual_address_space,
26 },
27 environment::{
28 DEAFAULT_MAX_TASK_DATA_SIZE, DEAFAULT_MAX_TASK_STACK_SIZE, DEAFAULT_MAX_TASK_TEXT_SIZE,
29 KERNEL_VM_STACK_END, PAGE_SIZE, USER_STACK_END,
30 },
31 fs::VfsManager,
32 ipc::{EventContent, event::ProcessControlType},
33 mem::page::{Page, allocate_raw_pages, free_boxed_page},
34 object::handle::HandleTable,
35 sched::scheduler::{Scheduler, get_scheduler},
36 timer::{TimerHandler, add_timer, get_tick},
37 vm::{
38 manager::VirtualMemoryManager,
39 user_kernel_vm_init, user_vm_init,
40 vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryRegion},
41 },
42};
43use alloc::collections::BTreeMap;
44use core::ops::Range;
45use core::sync::atomic::{AtomicI32, AtomicU8, AtomicU32, AtomicUsize, Ordering};
46use spin::Once;
47
48static WAITPID_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
50
51static PARENT_WAITPID_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
57
58fn init_waitpid_wakers() -> Mutex<BTreeMap<usize, Waker>> {
60 Mutex::new(BTreeMap::new())
61}
62
63fn init_parent_waitpid_wakers() -> Mutex<BTreeMap<usize, Waker>> {
65 Mutex::new(BTreeMap::new())
66}
67
68pub fn get_waitpid_waker(task_id: usize) -> &'static Waker {
82 let wakers_mutex = WAITPID_WAKERS.call_once(init_waitpid_wakers);
83 let mut wakers = wakers_mutex.lock();
84 if !wakers.contains_key(&task_id) {
85 let waker_name = alloc::format!("task_{}", task_id);
86 let static_name = Box::leak(waker_name.into_boxed_str());
88 wakers.insert(task_id, Waker::new_interruptible(static_name));
89 }
90 unsafe {
93 let waker_ptr = wakers.get(&task_id).unwrap() as *const Waker;
94 &*waker_ptr
95 }
96}
97
98pub fn get_parent_waitpid_waker(parent_id: usize) -> &'static Waker {
114 let wakers_mutex = PARENT_WAITPID_WAKERS.call_once(init_parent_waitpid_wakers);
115 let mut wakers = wakers_mutex.lock();
116
117 if !wakers.contains_key(&parent_id) {
119 let waker_name = alloc::format!("parent_waker_{}", parent_id);
120 let static_name = alloc::boxed::Box::leak(waker_name.into_boxed_str());
122 wakers.insert(parent_id, Waker::new_interruptible(static_name));
123 }
124
125 unsafe {
128 let waker_ptr = wakers.get(&parent_id).unwrap() as *const Waker;
129 &*waker_ptr
130 }
131}
132
133pub fn wake_task_waiters(task_id: usize) {
142 let wakers_mutex = WAITPID_WAKERS.call_once(init_waitpid_wakers);
143 let wakers = wakers_mutex.lock();
144 if let Some(waker) = wakers.get(&task_id) {
145 waker.wake_all();
146 }
147}
148
149pub fn wake_parent_waiters(parent_id: usize) {
157 let wakers_mutex = PARENT_WAITPID_WAKERS.call_once(init_parent_waitpid_wakers);
158 let wakers = wakers_mutex.lock();
159 if let Some(waker) = wakers.get(&parent_id) {
160 waker.wake_all();
161 }
162}
163
164pub fn cleanup_task_waker(task_id: usize) {
173 let wakers_mutex = WAITPID_WAKERS.call_once(init_waitpid_wakers);
174 let mut wakers = wakers_mutex.lock();
175 wakers.remove(&task_id);
176}
177
178pub fn cleanup_parent_waker(parent_id: usize) {
186 let wakers_mutex = PARENT_WAITPID_WAKERS.call_once(init_parent_waitpid_wakers);
187 let mut wakers = wakers_mutex.lock();
188 wakers.remove(&parent_id);
189}
190
191#[derive(Debug, PartialEq, Clone, Copy)]
195pub enum BlockedType {
196 Interruptible,
198 Uninterruptible,
200}
201
202#[derive(Debug, PartialEq, Clone, Copy)]
203pub enum TaskState {
204 NotInitialized,
205 Ready,
206 Running,
207 Blocked(BlockedType),
208 Zombie,
209 Terminated,
210}
211
212impl TaskState {
213 pub const fn to_u8(self) -> u8 {
215 match self {
216 TaskState::NotInitialized => 0,
217 TaskState::Ready => 1,
218 TaskState::Running => 2,
219 TaskState::Blocked(bt) => match bt {
220 BlockedType::Interruptible => 3,
221 BlockedType::Uninterruptible => 4,
222 },
223 TaskState::Zombie => 5,
224 TaskState::Terminated => 6,
225 }
226 }
227
228 pub const fn from_u8(val: u8) -> Option<Self> {
230 match val {
231 0 => Some(TaskState::NotInitialized),
232 1 => Some(TaskState::Ready),
233 2 => Some(TaskState::Running),
234 3 => Some(TaskState::Blocked(BlockedType::Interruptible)),
235 4 => Some(TaskState::Blocked(BlockedType::Uninterruptible)),
236 5 => Some(TaskState::Zombie),
237 6 => Some(TaskState::Terminated),
238 _ => None,
239 }
240 }
241}
242
243pub struct AtomicTaskState {
245 inner: AtomicU8,
246}
247
248impl AtomicTaskState {
249 pub const fn new(state: TaskState) -> Self {
250 Self {
251 inner: AtomicU8::new(state.to_u8()),
252 }
253 }
254
255 pub fn load(&self, ordering: Ordering) -> TaskState {
256 TaskState::from_u8(self.inner.load(ordering)).unwrap_or(TaskState::NotInitialized)
257 }
258
259 pub fn store(&self, state: TaskState, ordering: Ordering) {
260 self.inner.store(state.to_u8(), ordering);
261 }
262
263 pub fn compare_exchange(
264 &self,
265 current: TaskState,
266 new: TaskState,
267 success: Ordering,
268 failure: Ordering,
269 ) -> Result<TaskState, TaskState> {
270 match self
271 .inner
272 .compare_exchange(current.to_u8(), new.to_u8(), success, failure)
273 {
274 Ok(_) => Ok(new),
275 Err(actual) => Err(TaskState::from_u8(actual).unwrap_or(TaskState::NotInitialized)),
276 }
277 }
278}
279
280#[derive(Debug, PartialEq, Clone, Copy)]
281pub enum TaskType {
282 Kernel,
283 User,
284}
285
286pub struct AbiZone {
288 pub range: Range<usize>,
289 pub abi: Box<dyn AbiModule + Send + Sync>,
290}
291
292pub struct TaskLocal<T> {
308 inner: UnsafeCell<T>,
309}
310
311unsafe impl<T> Sync for TaskLocal<T> {}
314
315impl<T> TaskLocal<T> {
316 pub fn new(value: T) -> Self {
318 Self {
319 inner: UnsafeCell::new(value),
320 }
321 }
322
323 #[inline]
330 pub unsafe fn get(&self) -> &T {
331 &*self.inner.get()
333 }
334
335 #[inline]
342 #[allow(clippy::mut_from_ref)]
343 pub unsafe fn get_mut(&self) -> &mut T {
344 &mut *self.inner.get()
346 }
347}
348
349pub struct Task {
350 id: usize,
352 namespace_id: atomic::AtomicUsize,
354 namespace: RwLock<Arc<namespace::TaskNamespace>>,
356 pub task_type: TaskType,
357 pub entry: usize,
358 parent_id: Option<usize>,
359 tgid: usize,
361 pub max_stack_size: usize,
362 pub max_data_size: usize,
363 pub max_text_size: usize,
364
365 pub state: AtomicTaskState,
368 pub priority: AtomicU32,
370 pub time_slice: AtomicU32,
372 pub stack_size: AtomicUsize,
374 pub data_size: AtomicUsize,
376 pub text_size: AtomicUsize,
378 pub exit_status: AtomicI32,
380 pub brk: Arc<AtomicUsize>,
382
383 pub name: RwLock<String>,
386 pub children: RwLock<Vec<usize>>,
388 pub managed_pages: RwLock<Vec<ManagedPage>>,
390 pub vfs: RwLock<Option<Arc<VfsManager>>>,
402 pub software_timers_handlers: RwLock<Vec<Arc<dyn TimerHandler>>>,
404
405 pub vcpu: Mutex<Vcpu>,
408 pub kernel_context: Mutex<KernelContext>,
410 pub vm_manager: VirtualMemoryManager,
412 pub default_abi: TaskLocal<Option<Box<dyn AbiModule + Send + Sync>>>,
414 pub abi_zones: TaskLocal<BTreeMap<usize, AbiZone>>,
416 pub handle_table: HandleTable,
418 pub sleep_waker: Waker,
420 pub kernel_stack_window_base: Mutex<Option<(usize, usize)>>,
422
423 pub event_queue: Mutex<crate::ipc::event::TaskEventQueue>,
426 pub events_enabled: Mutex<bool>,
428}
429
430#[derive(Debug, Clone)]
431pub struct ManagedPage {
432 pub vaddr: usize,
433 pub page: Box<Page>,
434}
435
436pub enum CloneFlagsDef {
437 Vm = 0b00000001, Fs = 0b00000010, Files = 0b00000100, Thread = 0b00001000, SetTls = 0b000010000, }
443
444#[derive(Debug, Clone, Copy)]
445pub struct CloneFlags {
446 raw: u64,
447}
448
449impl CloneFlags {
450 pub fn new() -> Self {
451 CloneFlags { raw: 0 }
452 }
453
454 pub fn from_raw(raw: u64) -> Self {
455 CloneFlags { raw }
456 }
457
458 pub fn set(&mut self, flag: CloneFlagsDef) {
459 self.raw |= flag as u64;
460 }
461
462 pub fn clear(&mut self, flag: CloneFlagsDef) {
463 self.raw &= !(flag as u64);
464 }
465
466 pub fn is_set(&self, flag: CloneFlagsDef) -> bool {
467 (self.raw & (flag as u64)) != 0
468 }
469
470 pub fn get_raw(&self) -> u64 {
471 self.raw
472 }
473}
474
475impl Default for CloneFlags {
476 fn default() -> Self {
477 let raw = CloneFlagsDef::Fs as u64 | CloneFlagsDef::Files as u64;
478 CloneFlags { raw }
479 }
480}
481
482impl Task {
483 pub fn new(name: String, priority: u32, task_type: TaskType) -> Self {
493 Self::new_with_namespace(
494 name,
495 priority,
496 task_type,
497 namespace::get_root_namespace().clone(),
498 )
499 }
500
501 pub fn new_with_namespace(
512 name: String,
513 priority: u32,
514 task_type: TaskType,
515 ns: Arc<namespace::TaskNamespace>,
516 ) -> Self {
517 Task {
518 id: 0,
520 namespace_id: AtomicUsize::new(0),
521 namespace: RwLock::new(ns),
522 task_type,
523 entry: 0,
524 parent_id: None,
525 tgid: 0,
526 max_stack_size: DEAFAULT_MAX_TASK_STACK_SIZE,
527 max_data_size: DEAFAULT_MAX_TASK_DATA_SIZE,
528 max_text_size: DEAFAULT_MAX_TASK_TEXT_SIZE,
529 state: AtomicTaskState::new(TaskState::NotInitialized),
531 priority: AtomicU32::new(priority),
532 time_slice: AtomicU32::new(10),
533 stack_size: AtomicUsize::new(0),
534 data_size: AtomicUsize::new(0),
535 text_size: AtomicUsize::new(0),
536 exit_status: AtomicI32::new(i32::MIN), brk: Arc::new(AtomicUsize::new(usize::MAX)),
538 name: RwLock::new(name),
540 children: RwLock::new(Vec::new()),
541 managed_pages: RwLock::new(Vec::new()),
542 vfs: RwLock::new(None),
543 software_timers_handlers: RwLock::new(Vec::new()),
544 vcpu: Mutex::new(Vcpu::new(match task_type {
546 TaskType::Kernel => crate::arch::vcpu::Mode::Kernel,
547 TaskType::User => crate::arch::vcpu::Mode::User,
548 })),
549 kernel_context: Mutex::new(KernelContext::new()),
550 vm_manager: VirtualMemoryManager::new(),
551 default_abi: TaskLocal::new(Some(Box::new(ScarletAbi::default()))),
552 abi_zones: TaskLocal::new(BTreeMap::new()),
553 handle_table: HandleTable::new(),
554 sleep_waker: Waker::new_interruptible("task_sleep_waker"),
555 kernel_stack_window_base: Mutex::new(None),
556 event_queue: Mutex::new(crate::ipc::event::TaskEventQueue::new()),
558 events_enabled: Mutex::new(true),
559 }
560 }
561
562 pub fn init(&self) {
563 *self.kernel_context.lock() = KernelContext::new();
566
567 match self.task_type {
568 TaskType::Kernel => {
569 user_kernel_vm_init(self);
570 self.vcpu.lock().set_sp(KERNEL_VM_STACK_END + 1);
572 self.vcpu.lock().set_pc(self.entry as u64);
574 }
575 TaskType::User => {
576 user_vm_init(self);
577 self.vcpu.lock().set_sp(USER_STACK_END);
579 }
581 }
582
583 self.state.store(TaskState::Ready, Ordering::SeqCst);
585 self.time_slice.store(1, Ordering::SeqCst);
586 }
587
588 pub fn get_id(&self) -> usize {
589 assert!(
590 self.id != 0,
591 "Task ID is 0 - task may not have been added to scheduler yet"
592 );
593 self.id
594 }
595
596 pub fn set_id(&mut self, id: usize) {
598 self.id = id;
599 if self.tgid == 0 {
602 self.tgid = id;
603 }
604 }
605
606 pub fn set_namespace_id(&self, namespace_id: usize) {
608 self.namespace_id
609 .store(namespace_id, atomic::Ordering::SeqCst);
610 }
611
612 pub fn get_namespace_id(&self) -> usize {
620 let namespace_id = self.namespace_id.load(atomic::Ordering::SeqCst);
621 assert!(
622 namespace_id != 0,
623 "Task namespace_id is 0 - task may not have been added to scheduler yet"
624 );
625 namespace_id
626 }
627
628 pub fn get_namespace(&self) -> Arc<namespace::TaskNamespace> {
633 self.namespace.read().clone()
634 }
635
636 pub fn set_namespace(&self, ns: Arc<namespace::TaskNamespace>) {
649 *self.namespace.write() = ns;
650 self.namespace_id.store(
652 self.namespace.write().allocate_task_id_for(self.id),
653 atomic::Ordering::SeqCst,
654 );
655 }
656
657 pub fn get_tgid(&self) -> usize {
666 self.tgid
667 }
668
669 pub fn set_tgid(&mut self, tgid: usize) {
677 self.tgid = tgid;
678 }
679
680 pub fn set_state(&self, state: TaskState) {
686 self.state.store(state, Ordering::SeqCst);
687 }
688
689 pub fn get_state(&self) -> TaskState {
695 self.state.load(Ordering::SeqCst)
696 }
697
698 pub fn get_size(&self) -> usize {
703 self.stack_size.load(Ordering::SeqCst)
704 + self.text_size.load(Ordering::SeqCst)
705 + self.data_size.load(Ordering::SeqCst)
706 }
707
708 pub fn get_brk(&self) -> usize {
713 let brk = self.brk.load(Ordering::SeqCst);
716 if brk == usize::MAX {
717 self.text_size.load(Ordering::SeqCst) + self.data_size.load(Ordering::SeqCst)
718 } else {
719 brk
720 }
721 }
722
723 pub fn set_brk(&self, brk: usize) -> Result<(), &'static str> {
731 let prev_brk = self.get_brk();
732 if brk < prev_brk {
733 let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
736 let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
737 let num_of_pages = (prev_addr - addr) / PAGE_SIZE;
738 self.free_data_pages(addr, num_of_pages);
739 } else if brk > prev_brk {
740 let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
743 let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
744 let num_of_pages = (addr - prev_addr) / PAGE_SIZE;
745
746 if num_of_pages > 0 {
751 match self.vm_manager.search_memory_map(prev_addr) {
752 Some(_existing_map) => {
753 }
756 None => {
757 match self.allocate_data_pages(prev_addr, num_of_pages) {
760 Ok(_) => {
761 }
763 Err(_e) => {
764 return Err("Failed to allocate pages");
766 }
767 }
768 }
769 }
770 }
771 }
772 self.brk.store(brk, Ordering::SeqCst);
773 Ok(())
774 }
775
776 pub fn allocate_pages(
794 &self,
795 vaddr: usize,
796 num_of_pages: usize,
797 permissions: usize,
798 ) -> Result<VirtualMemoryMap, &'static str> {
799 if vaddr % PAGE_SIZE != 0 {
800 return Err("Address is not page aligned");
801 }
802
803 let pages = allocate_raw_pages(num_of_pages);
804 let size = num_of_pages * PAGE_SIZE;
805 let paddr = pages as usize;
806 let mmap = VirtualMemoryMap {
807 pmarea: MemoryArea {
808 start: paddr,
809 end: paddr + size - 1,
810 },
811 vmarea: MemoryArea {
812 start: vaddr,
813 end: vaddr + size - 1,
814 },
815 permissions,
816 is_shared: false, owner: None,
818 };
819 self.vm_manager
820 .add_memory_map(mmap.clone())
821 .map_err(|e| panic!("Failed to add memory map: {}", e))?;
822
823 for i in 0..num_of_pages {
824 let page = unsafe { Box::from_raw(pages.wrapping_add(i)) };
825 let vaddr = mmap.vmarea.start + i * PAGE_SIZE;
826 self.add_managed_page(ManagedPage { vaddr, page });
827 }
828
829 Ok(mmap)
830 }
831
832 pub fn free_pages(&self, vaddr: usize, num_of_pages: usize) {
838 let page = vaddr / PAGE_SIZE;
839 for p in 0..num_of_pages {
840 let vaddr = (page + p) * PAGE_SIZE;
841 match self.vm_manager.remove_memory_map_by_addr(vaddr) {
842 Some(mmap) => {
843 if p == 0 && mmap.vmarea.start < vaddr {
844 let size = vaddr - mmap.vmarea.start;
846 let paddr = mmap.pmarea.start;
847 let mmap1 = VirtualMemoryMap {
848 pmarea: MemoryArea {
849 start: paddr,
850 end: paddr + size - 1,
851 },
852 vmarea: MemoryArea {
853 start: mmap.vmarea.start,
854 end: vaddr - 1,
855 },
856 permissions: mmap.permissions,
857 is_shared: mmap.is_shared,
858 owner: mmap.owner.clone(),
859 };
860 self.vm_manager
861 .add_memory_map(mmap1)
862 .map_err(|e| panic!("Failed to add memory map: {}", e))
863 .unwrap();
864 }
867 if p == num_of_pages - 1 && mmap.vmarea.end > vaddr + PAGE_SIZE - 1 {
868 let size = mmap.vmarea.end - (vaddr + PAGE_SIZE) + 1;
870 let paddr = mmap.pmarea.start + (vaddr + PAGE_SIZE - mmap.vmarea.start);
871 let mmap2 = VirtualMemoryMap {
872 pmarea: MemoryArea {
873 start: paddr,
874 end: paddr + size - 1,
875 },
876 vmarea: MemoryArea {
877 start: vaddr + PAGE_SIZE,
878 end: mmap.vmarea.end,
879 },
880 permissions: mmap.permissions,
881 is_shared: mmap.is_shared,
882 owner: mmap.owner.clone(),
883 };
884 self.vm_manager
885 .add_memory_map(mmap2)
886 .map_err(|e| panic!("Failed to add memory map: {}", e))
887 .unwrap();
888 }
891 if let Some(free_page) = self.remove_managed_page(vaddr) {
895 free_boxed_page(free_page.page);
896 }
897
898 }
900 None => {}
901 }
902 }
903 let asid = self.vm_manager.get_asid();
905 let root_pagetable = self.vm_manager.get_root_page_table().unwrap();
906 for p in 0..num_of_pages {
907 let vaddr = (page + p) * PAGE_SIZE;
908 root_pagetable.unmap(asid, vaddr);
909 }
910 }
911
912 pub fn allocate_text_pages(
925 &self,
926 vaddr: usize,
927 num_of_pages: usize,
928 ) -> Result<VirtualMemoryMap, &'static str> {
929 let permissions = VirtualMemoryRegion::Text.default_permissions();
930 let res = self.allocate_pages(vaddr, num_of_pages, permissions);
931 if res.is_ok() {
932 self.text_size
933 .fetch_add(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
934 }
935 res
936 }
937
938 pub fn free_text_pages(&self, vaddr: usize, num_of_pages: usize) {
945 self.free_pages(vaddr, num_of_pages);
946 self.text_size
947 .fetch_sub(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
948 }
949
950 pub fn allocate_stack_pages(
963 &self,
964 vaddr: usize,
965 num_of_pages: usize,
966 ) -> Result<VirtualMemoryMap, &'static str> {
967 let permissions = VirtualMemoryRegion::Stack.default_permissions();
968 let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
969 self.stack_size
970 .fetch_add(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
971 Ok(res)
972 }
973
974 pub fn free_stack_pages(&self, vaddr: usize, num_of_pages: usize) {
981 self.free_pages(vaddr, num_of_pages);
982 self.stack_size
983 .fetch_sub(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
984 }
985
986 pub fn allocate_data_pages(
999 &self,
1000 vaddr: usize,
1001 num_of_pages: usize,
1002 ) -> Result<VirtualMemoryMap, &'static str> {
1003 let permissions = VirtualMemoryRegion::Data.default_permissions();
1004 let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
1005 self.data_size
1006 .fetch_add(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
1007 Ok(res)
1008 }
1009
1010 pub fn free_data_pages(&self, vaddr: usize, num_of_pages: usize) {
1017 self.free_pages(vaddr, num_of_pages);
1018 self.data_size
1019 .fetch_sub(num_of_pages * PAGE_SIZE, Ordering::SeqCst);
1020 }
1021
1022 pub fn allocate_guard_pages(
1039 &self,
1040 vaddr: usize,
1041 num_of_pages: usize,
1042 ) -> Result<VirtualMemoryMap, &'static str> {
1043 let permissions = VirtualMemoryRegion::Guard.default_permissions();
1044 let mmap = VirtualMemoryMap {
1045 pmarea: MemoryArea { start: 0, end: 0 },
1046 vmarea: MemoryArea {
1047 start: vaddr,
1048 end: vaddr + num_of_pages * PAGE_SIZE - 1,
1049 },
1050 permissions,
1051 is_shared: VirtualMemoryRegion::Guard.is_shareable(), owner: None,
1053 };
1054 Ok(mmap)
1055 }
1056
1057 pub fn add_managed_page(&self, pages: ManagedPage) {
1067 self.managed_pages.write().push(pages);
1068 }
1069
1070 fn get_managed_page(&self, vaddr: usize) -> Option<ManagedPage> {
1079 let pages = self.managed_pages.read();
1080 for page in pages.iter() {
1081 if page.vaddr == vaddr {
1082 return Some(ManagedPage {
1083 vaddr: page.vaddr,
1084 page: page.page.clone(),
1085 });
1086 }
1087 }
1088 None
1089 }
1090
1091 pub fn remove_managed_page(&self, vaddr: usize) -> Option<crate::task::ManagedPage> {
1100 let mut pages = self.managed_pages.write();
1101 for i in 0..pages.len() {
1102 if pages[i].vaddr == vaddr {
1103 let page = pages.remove(i);
1104 return Some(page);
1105 }
1106 }
1107 None
1108 }
1109
1110 pub fn set_entry_point(&self, entry: usize) {
1112 self.vcpu.lock().set_pc(entry as u64);
1113 }
1114
1115 pub fn get_parent_id(&self) -> Option<usize> {
1120 self.parent_id
1121 }
1122
1123 pub fn set_parent_id(&mut self, parent_id: usize) {
1128 self.parent_id = Some(parent_id);
1129 }
1130
1131 pub fn add_child(&self, child_id: usize) {
1136 let mut children = self.children.write();
1137 if !children.contains(&child_id) {
1138 children.push(child_id);
1139 }
1140 }
1141
1142 pub fn remove_child(&self, child_id: usize) -> bool {
1150 let mut children = self.children.write();
1151 if let Some(pos) = children.iter().position(|&id| id == child_id) {
1152 children.remove(pos);
1153 true
1154 } else {
1155 false
1156 }
1157 }
1158
1159 pub fn get_children(&self) -> Vec<usize> {
1164 self.children.read().clone()
1165 }
1166
1167 pub fn set_exit_status(&self, status: i32) {
1172 self.exit_status.store(status, Ordering::SeqCst);
1173 }
1174
1175 pub fn get_exit_status(&self) -> Option<i32> {
1180 let status = self.exit_status.load(Ordering::SeqCst);
1181 if status == i32::MIN {
1182 None
1183 } else {
1184 Some(status)
1185 }
1186 }
1187
1188 pub fn with_resolve_abi_mut<R, F>(&self, addr: usize, f: F) -> R
1201 where
1202 F: FnOnce(&mut (dyn AbiModule + Send + Sync)) -> R,
1203 {
1204 let abi_zones = unsafe { self.abi_zones.get_mut() };
1207 if let Some((_start, zone)) = abi_zones.range_mut(..=addr).next_back() {
1208 if zone.range.contains(&addr) {
1209 return f(zone.abi.as_mut());
1210 }
1211 }
1212 let abi = unsafe { self.default_abi.get_mut() };
1215 f(abi.as_deref_mut().expect("default_abi not set"))
1216 }
1217
1218 pub fn with_default_abi<F, R>(&self, f: F) -> R
1226 where
1227 F: FnOnce(&(dyn AbiModule + Send + Sync)) -> R,
1228 {
1229 let abi = unsafe { self.default_abi.get() };
1231 f(abi.as_deref().expect("default_abi not set"))
1232 }
1233
1234 pub fn with_default_abi_mut<R, F>(&self, f: F) -> R
1239 where
1240 F: FnOnce(&mut (dyn AbiModule + Send + Sync), &Task) -> R,
1241 {
1242 let abi = unsafe { self.default_abi.get_mut() };
1244 let abi_ref = abi.as_deref_mut().expect("default_abi not set");
1245 f(abi_ref, self)
1246 }
1247
1248 pub fn clone_task(&self, flags: CloneFlags) -> Result<Task, &'static str> {
1264 let mut child = Task::new_with_namespace(
1266 self.name.read().clone(),
1267 self.priority.load(Ordering::SeqCst),
1268 self.task_type,
1269 self.namespace.read().clone(),
1270 );
1271
1272 match self.task_type {
1274 TaskType::Kernel => {
1275 child.init();
1277 }
1278 TaskType::User => {
1279 if !flags.is_set(CloneFlagsDef::Vm) {
1280 let asid = alloc_virtual_address_space();
1283 child.vm_manager.set_asid(asid);
1284 } else {
1285 child.vm_manager = self.vm_manager.clone();
1287 }
1288 }
1289 }
1290
1291 if !flags.is_set(CloneFlagsDef::Vm) {
1292 self.vm_manager.memmaps_iter_with(|iter| {
1294 for mmap in iter {
1295 let num_pages =
1296 (mmap.vmarea.end - mmap.vmarea.start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
1297 if num_pages == 0 {
1298 continue;
1299 }
1300
1301 let vaddr = mmap.vmarea.start;
1302 if mmap.is_shared {
1303 let shared_mmap = VirtualMemoryMap {
1305 pmarea: mmap.pmarea,
1306 vmarea: mmap.vmarea,
1307 permissions: mmap.permissions,
1308 is_shared: true,
1309 owner: mmap.owner.clone(),
1310 };
1311 child
1312 .vm_manager
1313 .add_memory_map(shared_mmap.clone())
1314 .map_err(|_| "Failed to add shared memory map to child task")?;
1315
1316 if mmap.vmarea.start == 0xffff_ffff_ffff_f000 {
1318 if let Some(root_pagetable) = child.vm_manager.get_root_page_table() {
1319 root_pagetable
1320 .map_memory_area(
1321 child.vm_manager.get_asid(),
1322 shared_mmap,
1323 true,
1324 true,
1325 )
1326 .map_err(|_| "Failed to map trampoline page")?;
1327 }
1328 }
1329 } else {
1330 let permissions = mmap.permissions;
1332 let pages = allocate_raw_pages(num_pages);
1333 let size = num_pages * PAGE_SIZE;
1334 let paddr = pages as usize;
1335 let new_mmap = VirtualMemoryMap {
1336 pmarea: MemoryArea {
1337 start: paddr,
1338 end: paddr + (size - 1),
1339 },
1340 vmarea: MemoryArea {
1341 start: vaddr,
1342 end: vaddr + (size - 1),
1343 },
1344 permissions,
1345 is_shared: false,
1346 owner: mmap.owner.clone(),
1347 };
1348
1349 for i in 0..num_pages {
1351 let src_page_addr = mmap.pmarea.start + i * PAGE_SIZE;
1352 let dst_page_addr = new_mmap.pmarea.start + i * PAGE_SIZE;
1353 unsafe {
1354 core::ptr::copy_nonoverlapping(
1355 src_page_addr as *const u8,
1356 dst_page_addr as *mut u8,
1357 PAGE_SIZE,
1358 );
1359 }
1360 child.add_managed_page(ManagedPage {
1361 vaddr: new_mmap.vmarea.start + i * PAGE_SIZE,
1362 page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
1363 });
1364 }
1365
1366 child
1367 .vm_manager
1368 .add_memory_map(new_mmap)
1369 .map_err(|_| "Failed to add memory map to child task")?;
1370 }
1371 }
1372 Ok::<(), &'static str>(())
1373 })?;
1374 }
1375
1376 self.vcpu.lock().clone_to(&mut child.vcpu.lock());
1378
1379 unsafe {
1382 *child.default_abi.get_mut() = Some(
1383 self.default_abi
1384 .get()
1385 .as_ref()
1386 .expect("default_abi not set")
1387 .clone_boxed(),
1388 );
1389 for (start, zone) in self.abi_zones.get().iter() {
1391 let new_zone = AbiZone {
1392 range: zone.range.clone(),
1393 abi: zone.abi.clone_boxed(),
1394 };
1395 child.abi_zones.get_mut().insert(*start, new_zone);
1396 }
1397 if let Some(abi_boxed) = child.default_abi.get_mut().as_mut() {
1400 let _ = abi_boxed.on_task_cloned(self, &child, flags);
1401 }
1402 }
1403
1404 child
1406 .stack_size
1407 .store(self.stack_size.load(Ordering::SeqCst), Ordering::SeqCst);
1408 child
1409 .data_size
1410 .store(self.data_size.load(Ordering::SeqCst), Ordering::SeqCst);
1411 child
1412 .text_size
1413 .store(self.text_size.load(Ordering::SeqCst), Ordering::SeqCst);
1414 child.max_stack_size = self.max_stack_size;
1415 child.max_data_size = self.max_data_size;
1416 child.max_text_size = self.max_text_size;
1417 if flags.is_set(CloneFlagsDef::Vm) {
1420 child.brk = self.brk.clone();
1421 } else {
1422 let parent_brk = self.brk.load(Ordering::SeqCst);
1423 child.brk = Arc::new(AtomicUsize::new(parent_brk));
1424 }
1425
1426 child
1428 .time_slice
1429 .store(self.time_slice.load(Ordering::SeqCst), Ordering::SeqCst);
1430 child.entry = self.entry;
1435
1436 if flags.is_set(CloneFlagsDef::Files) {
1437 child.handle_table = self.handle_table.clone();
1440 } else {
1441 child.handle_table = self.handle_table.deep_clone();
1443 }
1444
1445 if flags.is_set(CloneFlagsDef::Fs) {
1446 if let Some(vfs) = self.vfs.read().clone() {
1448 *child.vfs.write() = Some(vfs.clone());
1449 } else {
1451 *child.vfs.write() = None;
1452 }
1453 }
1454
1455 if child.get_kernel_stack_window_base().is_none() {
1460 crate::vm::setup_trampoline_for_task_kstack_window(&mut child)?;
1461 }
1462 child
1464 .state
1465 .store(self.state.load(Ordering::SeqCst), Ordering::SeqCst);
1466
1467 if flags.is_set(CloneFlagsDef::Thread) {
1477 child.tgid = self.tgid;
1479 } Ok(child)
1482 }
1483
1484 pub fn exit(&self, status: i32) {
1490 if self.handle_table.is_sole_owner() {
1495 self.handle_table.close_all();
1496 }
1497 self.with_default_abi_mut(|abi, task| abi.on_task_exit(task));
1500
1501 match self.parent_id {
1502 Some(parent_id) => {
1503 if get_scheduler().get_task_by_id(parent_id).is_none() {
1504 self.state.store(TaskState::Terminated, Ordering::SeqCst);
1506 return;
1507 }
1508 self.set_exit_status(status);
1510 self.state.store(TaskState::Zombie, Ordering::SeqCst);
1511
1512 }
1515 None => {
1516 self.state.store(TaskState::Terminated, Ordering::SeqCst);
1519 }
1520 }
1521
1522 if mytask().is_none() || mytask().unwrap().get_id() != self.id {
1525 return;
1527 }
1528
1529 if let Some(current_task) = mytask() {
1531 get_scheduler().schedule(current_task.get_trapframe());
1532 }
1533 }
1534
1535 pub fn exit_group(&self, status: i32) {
1548 let tgid = self.tgid;
1549 let my_id = self.id;
1550
1551 let scheduler = get_scheduler();
1553 let all_task_ids = scheduler.get_all_task_ids();
1554
1555 for task_id in all_task_ids {
1557 if task_id == my_id {
1558 continue; }
1560
1561 if let Some(task) = scheduler.get_task_by_id(task_id) {
1562 if task.get_tgid() == tgid {
1563 crate::println!(
1565 "[exit_group] Task {} terminating sibling task {} (TGD={})",
1566 my_id,
1567 task_id,
1568 tgid
1569 );
1570 let task_ptr = task as *const Task as *mut Task;
1574 unsafe {
1575 (*task_ptr)
1576 .state
1577 .store(TaskState::Terminated, Ordering::SeqCst);
1578 (*task_ptr).exit_status.store(status, Ordering::SeqCst);
1579 (*task_ptr).handle_table.close_all();
1581 }
1582 }
1583 }
1584 }
1585
1586 self.exit(status);
1588 }
1589
1590 pub fn wait(&self, child_id: usize) -> Result<i32, WaitError> {
1598 if !self.children.read().contains(&child_id) {
1599 crate::println!("[Task {}] wait: No such child task: {}", self.id, child_id);
1600 return Err(WaitError::NoSuchChild("No such child task".to_string()));
1601 }
1602
1603 if let Some(child_task) = get_scheduler().get_task_by_id(child_id) {
1604 if child_task.get_state() == TaskState::Zombie {
1605 let status = child_task.get_exit_status().unwrap_or(-1);
1606 child_task.set_state(TaskState::Terminated);
1607 self.remove_child(child_id);
1608 Ok(status)
1609 } else {
1610 Err(WaitError::ChildNotExited(
1611 "Child has not exited or is not a zombie".to_string(),
1612 ))
1613 }
1614 } else {
1615 Err(WaitError::ChildTaskNotFound(
1616 "Child task not found".to_string(),
1617 ))
1618 }
1619 }
1620
1621 pub fn sleep(&self, trapframe: &mut Trapframe, ticks: u64) {
1629 struct SleepWakerHandler {
1630 task_id: usize,
1631 _start_tick: u64,
1632 }
1633
1634 impl TimerHandler for SleepWakerHandler {
1635 fn on_timer_expired(self: Arc<Self>, _context: usize) {
1636 if let Some(task) = get_scheduler().get_task_by_id(self.task_id) {
1637 let handler: Arc<dyn TimerHandler> = self.clone();
1638 task.remove_software_timer_handler(&handler);
1639 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
1641 let waker = get_waitpid_waker(self.task_id);
1643 waker.wake_all();
1644 }
1645 }
1646 }
1647
1648 let wake_tick = get_tick() + ticks;
1649 let handler: Arc<dyn crate::timer::TimerHandler> = Arc::new(SleepWakerHandler {
1650 task_id: self.id,
1651 _start_tick: get_tick(),
1652 });
1653 add_timer(wake_tick, &handler, 0);
1654
1655 self.add_software_timer_handler(handler);
1656 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
1658 let waker = get_waitpid_waker(self.id);
1659 waker.wait(self.get_id(), trapframe);
1660 }
1661
1662 pub fn set_vfs(&self, vfs: Arc<VfsManager>) {
1669 *self.vfs.write() = Some(vfs);
1670 }
1671
1672 pub fn get_vfs(&self) -> Option<Arc<VfsManager>> {
1674 self.vfs.read().clone()
1675 }
1676
1677 pub fn add_software_timer_handler(&self, timer: Arc<dyn TimerHandler>) {
1678 self.software_timers_handlers.write().push(timer);
1679 }
1680
1681 pub fn remove_software_timer_handler(&self, timer: &Arc<dyn TimerHandler>) {
1682 let mut handlers = self.software_timers_handlers.write();
1683 if let Some(pos) = handlers.iter().position(|x| Arc::ptr_eq(x, timer)) {
1684 handlers.remove(pos);
1685 }
1686 }
1687
1688 pub fn enable_events(&self) {
1690 let mut enabled = self.events_enabled.lock();
1691 *enabled = true;
1692 }
1693
1694 pub fn disable_events(&self) {
1696 let mut enabled = self.events_enabled.lock();
1697 *enabled = false;
1698 }
1699
1700 pub fn events_enabled(&self) -> bool {
1702 *self.events_enabled.lock()
1703 }
1704
1705 pub fn process_pending_events(&self) -> Result<(), &'static str> {
1713 if !self.events_enabled() {
1715 return Ok(()); }
1717
1718 self.with_default_abi_mut(|abi, _| {
1720 const MAX_EVENTS_PER_CYCLE: usize = 8; let mut processed_count = 0;
1722
1723 while processed_count < MAX_EVENTS_PER_CYCLE {
1725 let event = {
1726 let mut queue = self.event_queue.lock();
1727 queue.dequeue()
1728 };
1729
1730 match event {
1731 Some(event) => {
1732 processed_count += 1;
1733
1734 let is_critical = self.is_critical_event(&event);
1736
1737 abi.handle_event(event, self.id as u32)?;
1739
1740 if !self.events_enabled() {
1742 break;
1743 }
1744
1745 if is_critical {
1748 break;
1749 }
1750 }
1751 None => break, }
1753 }
1754
1755 if processed_count == MAX_EVENTS_PER_CYCLE {
1758 let queue = self.event_queue.lock();
1759 if !queue.is_empty() {
1760 }
1764 }
1765
1766 Ok(())
1767 })
1768 }
1769
1770 fn is_critical_event(&self, event: &crate::ipc::event::Event) -> bool {
1773 use crate::ipc::event::EventPriority;
1774
1775 match event.metadata.priority {
1777 EventPriority::Critical => return true,
1778 EventPriority::High => {
1779 match &event.content {
1781 EventContent::ProcessControl(ProcessControlType::Kill) => true,
1782 EventContent::Custom { event_id, .. } => {
1783 *event_id == 9 }
1786 _ => false,
1787 }
1788 }
1789 _ => false,
1790 }
1791 }
1792
1793 pub fn with_kernel_context<R>(&self, f: impl FnOnce(&mut KernelContext) -> R) -> R {
1804 let mut kctx = self.kernel_context.lock();
1805 f(&mut kctx)
1806 }
1807
1808 pub fn get_kernel_stack_bottom_paddr(&self) -> u64 {
1813 self.kernel_context.lock().get_kernel_stack_bottom_paddr()
1814 }
1815
1816 pub fn get_kernel_stack_memory_area_paddr(&self) -> MemoryArea {
1822 self.kernel_context
1823 .lock()
1824 .get_kernel_stack_memory_area_paddr()
1825 }
1826
1827 pub fn get_trapframe(&self) -> &mut Trapframe {
1840 if let Some((_slot, base)) = *self.kernel_stack_window_base.lock() {
1842 let trapframe_offset = crate::environment::PAGE_SIZE
1843 + crate::environment::TASK_KERNEL_STACK_SIZE
1844 - core::mem::size_of::<Trapframe>();
1845 let trapframe_vaddr = base + trapframe_offset;
1846 unsafe { &mut *(trapframe_vaddr as *mut Trapframe) }
1847 } else {
1848 panic!("get_trapframe: No kernel stack window mapped");
1851 }
1852 }
1853
1854 pub fn set_kernel_stack_window_base(&self, base: Option<(usize, usize)>) {
1856 *self.kernel_stack_window_base.lock() = base;
1857 }
1858
1859 pub fn get_kernel_stack_window_base(&self) -> Option<(usize, usize)> {
1861 *self.kernel_stack_window_base.lock()
1862 }
1863}
1864
1865#[derive(Debug)]
1866pub enum WaitError {
1867 NoSuchChild(String),
1868 ChildNotExited(String),
1869 ChildTaskNotFound(String),
1870}
1871
1872impl WaitError {
1873 pub fn message(&self) -> &str {
1874 match self {
1875 WaitError::NoSuchChild(msg) => msg,
1876 WaitError::ChildNotExited(msg) => msg,
1877 WaitError::ChildTaskNotFound(msg) => msg,
1878 }
1879 }
1880}
1881
1882impl Drop for Task {
1883 fn drop(&mut self) {
1884 crate::vm::teardown_trampoline_for_task_kstack_window(self);
1886 }
1887}
1888
1889pub fn new_kernel_task(name: String, priority: u32, func: fn()) -> Task {
1899 let mut task = Task::new(name, priority, TaskType::Kernel);
1900 task.entry = func as usize;
1901 task
1902}
1903
1904pub fn new_user_task(name: String, priority: u32) -> Task {
1913 Task::new(name, priority, TaskType::User)
1914}
1915
1916#[cfg(test)]
1917static mut MOCK_CURRENT_TASK: Option<*mut Task> = None;
1918
1919#[cfg(test)]
1920pub unsafe fn set_mock_current_task(task: &'static mut Task) {
1933 unsafe {
1934 MOCK_CURRENT_TASK = Some(task as *mut Task);
1935 }
1936}
1937
1938#[cfg(test)]
1939pub unsafe fn clear_mock_current_task() {
1944 unsafe {
1945 MOCK_CURRENT_TASK = None;
1946 }
1947}
1948
1949pub fn mytask() -> Option<&'static Task> {
1954 #[cfg(test)]
1955 {
1956 unsafe {
1957 if let Some(task_ptr) = MOCK_CURRENT_TASK {
1958 return Some(&*task_ptr);
1959 }
1960 }
1961 }
1962
1963 let cpu = get_cpu();
1964 get_scheduler().get_current_task(cpu.get_cpuid())
1965}
1966
1967pub fn set_current_task_cwd(path: String) -> bool {
1978 if let Some(task) = mytask() {
1979 if let Some(vfs) = task.vfs.read().as_ref() {
1980 vfs.set_cwd_by_path(&path).is_ok()
1982 } else {
1983 false }
1985 } else {
1986 false
1987 }
1988}
1989
1990pub fn task_initial_kernel_entrypoint() -> ! {
1993 let cpu = get_cpu();
1994 let current_task = unsafe {
1995 get_scheduler()
1996 .get_current_task_mut(cpu.get_cpuid())
1997 .unwrap()
1998 };
1999 Scheduler::setup_task_execution(cpu, current_task);
2000 arch_switch_to_user_space(current_task.get_trapframe());
2001}
2002
2003#[cfg(test)]
2004mod tests {
2005 use alloc::string::ToString;
2006 use alloc::sync::Arc;
2007 use core::sync::atomic::Ordering;
2008
2009 use crate::task::CloneFlags;
2010
2011 #[test_case]
2012 fn test_set_brk() {
2013 let mut task = super::new_user_task("Task0".to_string(), 0);
2014 task.init();
2015 assert_eq!(task.get_brk(), 0);
2016 task.set_brk(0x1000).unwrap();
2017 assert_eq!(task.get_brk(), 0x1000);
2018 task.set_brk(0x2000).unwrap();
2019 assert_eq!(task.get_brk(), 0x2000);
2020 task.set_brk(0x1008).unwrap();
2021 assert_eq!(task.get_brk(), 0x1008);
2022 task.set_brk(0x1000).unwrap();
2023 assert_eq!(task.get_brk(), 0x1000);
2024 }
2025
2026 #[test_case]
2027 fn test_task_parent_child_relationship() {
2028 let scheduler = crate::sched::scheduler::get_scheduler();
2030 scheduler.reset();
2031
2032 let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
2033 parent_task.init();
2034
2035 let mut child_task = super::new_user_task("ChildTask".to_string(), 0);
2036 child_task.init();
2037
2038 let parent_id = scheduler.add_task(parent_task, 0);
2040 let child_id = scheduler.add_task(child_task, 0);
2041
2042 {
2045 let child_task = scheduler.get_task_by_id(child_id).unwrap();
2046 child_task.set_parent_id(parent_id);
2047 }
2048 {
2049 let parent_task = scheduler.get_task_by_id(parent_id).unwrap();
2050 parent_task.add_child(child_id);
2051 }
2052
2053 {
2055 let child_task = scheduler.get_task_by_id(child_id).unwrap();
2056 assert_eq!(child_task.get_parent_id(), Some(parent_id));
2057 }
2058 {
2059 let parent_task = scheduler.get_task_by_id(parent_id).unwrap();
2060 assert!(parent_task.get_children().contains(&child_id));
2061 }
2062
2063 {
2065 let parent_task = scheduler.get_task_by_id(parent_id).unwrap();
2066 assert!(parent_task.remove_child(child_id));
2067 assert!(!parent_task.get_children().contains(&child_id));
2068 }
2069 }
2070
2071 #[test_case]
2072 fn test_task_exit_status() {
2073 let mut task = super::new_user_task("TaskWithExitStatus".to_string(), 0);
2074 task.init();
2075
2076 assert_eq!(task.get_exit_status(), None);
2078
2079 task.set_exit_status(0);
2081 assert_eq!(task.get_exit_status(), Some(0));
2082
2083 task.set_exit_status(1);
2084 assert_eq!(task.get_exit_status(), Some(1));
2085 }
2086
2087 #[test_case]
2088 fn test_clone_task_memory_copy() {
2089 let scheduler = crate::sched::scheduler::get_scheduler();
2091 scheduler.reset();
2092
2093 let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
2094 parent_task.init();
2095
2096 let vaddr = 0x1000;
2098 let num_pages = 2;
2099 let mmap = parent_task.allocate_data_pages(vaddr, num_pages).unwrap();
2100
2101 let parent_paddr = mmap.pmarea.start;
2103 let parent_vaddr_start = mmap.vmarea.start;
2104 let parent_vaddr_end = mmap.vmarea.end;
2105 let parent_perms = mmap.permissions;
2106
2107 let test_data: [u8; 8] = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0];
2109 unsafe {
2110 let dst_ptr = mmap.pmarea.start as *mut u8;
2111 core::ptr::copy_nonoverlapping(test_data.as_ptr(), dst_ptr, test_data.len());
2112 }
2113
2114 let parent_memmap_count = parent_task.vm_manager.memmap_len();
2116
2117 let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
2119
2120 assert!(
2122 !Arc::ptr_eq(&child_task.brk, &parent_task.brk),
2123 "Child should not share brk with parent unless CLONE_VM is set"
2124 );
2125
2126 let child_memmap_count = child_task.vm_manager.memmap_len();
2128
2129 assert_eq!(
2131 child_memmap_count, parent_memmap_count,
2132 "Child should have the same number of memory maps as parent: child={}, parent={}",
2133 child_memmap_count, parent_memmap_count
2134 );
2135
2136 let parent_pc = parent_task.vcpu.lock().get_pc();
2138 let parent_entry = parent_task.entry;
2139 let parent_state = parent_task.state.load(Ordering::SeqCst);
2140 let child_pc = child_task.vcpu.lock().get_pc();
2141 let child_entry = child_task.entry;
2142 let child_state = child_task.state.load(Ordering::SeqCst);
2143 let child_managed_pages_len = child_task.managed_pages.read().len();
2144
2145 let scheduler = crate::sched::scheduler::get_scheduler();
2147 let parent_id = scheduler.add_task(parent_task, 0);
2148 let child_id = scheduler.add_task(child_task, 0);
2149
2150 {
2152 let child = scheduler.get_task_by_id(child_id).unwrap();
2153 child.set_parent_id(parent_id);
2154 }
2155 {
2156 let parent = scheduler.get_task_by_id(parent_id).unwrap();
2157 parent.add_child(child_id);
2158 }
2159
2160 {
2162 let child = scheduler.get_task_by_id(child_id).unwrap();
2163 assert_eq!(child.get_parent_id(), Some(parent_id));
2164 }
2165 {
2166 let parent = scheduler.get_task_by_id(parent_id).unwrap();
2167 assert!(parent.get_children().contains(&child_id));
2168 }
2169
2170 let child_stack_size = {
2172 let child = scheduler.get_task_by_id(child_id).unwrap();
2173 child.stack_size.load(Ordering::SeqCst)
2174 };
2175 let child_data_size = {
2176 let child = scheduler.get_task_by_id(child_id).unwrap();
2177 child.data_size.load(Ordering::SeqCst)
2178 };
2179 let child_text_size = {
2180 let child = scheduler.get_task_by_id(child_id).unwrap();
2181 child.text_size.load(Ordering::SeqCst)
2182 };
2183 let parent_stack_size = {
2184 let parent = scheduler.get_task_by_id(parent_id).unwrap();
2185 parent.stack_size.load(Ordering::SeqCst)
2186 };
2187 let parent_data_size = {
2188 let parent = scheduler.get_task_by_id(parent_id).unwrap();
2189 parent.data_size.load(Ordering::SeqCst)
2190 };
2191 let parent_text_size = {
2192 let parent = scheduler.get_task_by_id(parent_id).unwrap();
2193 parent.text_size.load(Ordering::SeqCst)
2194 };
2195
2196 assert_eq!(child_stack_size, parent_stack_size);
2198 assert_eq!(child_data_size, parent_data_size);
2199 assert_eq!(child_text_size, parent_text_size);
2200
2201 let child_mmap = {
2203 let mut found = None;
2204 let child = scheduler.get_task_by_id(child_id).unwrap();
2205 child.vm_manager.with_memmaps(|mm| {
2206 for m in mm.values() {
2207 if m.vmarea.start == vaddr
2208 && m.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1
2209 {
2210 found = Some(m.clone());
2211 break;
2212 }
2213 }
2214 });
2215 found.expect("Test memory map not found in child task")
2216 };
2217
2218 assert_eq!(child_mmap.vmarea.start, parent_vaddr_start);
2220 assert_eq!(child_mmap.vmarea.end, parent_vaddr_end);
2221 assert_eq!(child_mmap.permissions, parent_perms);
2222
2223 unsafe {
2225 let parent_ptr = parent_paddr as *const u8;
2226 let child_ptr = child_mmap.pmarea.start as *const u8;
2227
2228 assert_ne!(
2230 parent_ptr, child_ptr,
2231 "Parent and child should have different physical memory"
2232 );
2233
2234 for i in 0..test_data.len() {
2236 let parent_byte = *parent_ptr.offset(i as isize);
2237 let child_byte = *child_ptr.offset(i as isize);
2238 assert_eq!(parent_byte, child_byte, "Data mismatch at offset {}", i);
2239 }
2240 }
2241
2242 unsafe {
2244 let parent_ptr = mmap.pmarea.start as *mut u8;
2245 let original_value = *parent_ptr;
2246 *parent_ptr = 0xFF; let child_ptr = child_mmap.pmarea.start as *const u8;
2249 let child_first_byte = *child_ptr;
2250
2251 assert_eq!(
2253 child_first_byte, original_value,
2254 "Child memory should be independent from parent"
2255 );
2256 }
2257
2258 assert_eq!(child_pc, parent_pc);
2260
2261 assert_eq!(child_entry, parent_entry);
2263
2264 assert_eq!(child_state, parent_state);
2266
2267 assert!(
2269 child_managed_pages_len >= num_pages,
2270 "Child should have at least the test pages in managed pages"
2271 );
2272 }
2273
2274 #[test_case]
2275 fn test_clone_task_stack_copy() {
2276 let scheduler = crate::sched::scheduler::get_scheduler();
2278 scheduler.reset();
2279
2280 let mut parent_task = super::new_user_task("ParentWithStack".to_string(), 0);
2281 parent_task.init();
2282
2283 let stack_mmap = {
2285 let mut found = None;
2286 parent_task.vm_manager.with_memmaps(|mm| {
2287 for mmap in mm.values() {
2288 use crate::vm::vmem::VirtualMemoryRegion;
2290 if mmap.vmarea.end == crate::environment::USER_STACK_END - 1
2291 && mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
2292 {
2293 found = Some(mmap.clone());
2294 break;
2295 }
2296 }
2297 });
2298 found.expect("Stack memory map not found in parent task")
2299 };
2300
2301 let stack_test_data: [u8; 16] = [
2303 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
2304 0x99, 0x00,
2305 ];
2306 unsafe {
2307 let stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
2308 core::ptr::copy_nonoverlapping(
2309 stack_test_data.as_ptr(),
2310 stack_ptr,
2311 stack_test_data.len(),
2312 );
2313 }
2314
2315 let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
2317
2318 let child_stack_mmap = {
2320 let mut found = None;
2321 child_task.vm_manager.with_memmaps(|mm| {
2322 for mmap in mm.values() {
2323 use crate::vm::vmem::VirtualMemoryRegion;
2324 if mmap.vmarea.start == stack_mmap.vmarea.start
2325 && mmap.vmarea.end == stack_mmap.vmarea.end
2326 && mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
2327 {
2328 found = Some(mmap.clone());
2329 break;
2330 }
2331 }
2332 });
2333 found.expect("Stack memory map not found in child task")
2334 };
2335
2336 unsafe {
2338 let parent_stack_ptr =
2339 (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
2340 let child_stack_ptr =
2341 (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
2342
2343 assert_ne!(
2345 parent_stack_ptr, child_stack_ptr,
2346 "Parent and child should have different stack physical memory"
2347 );
2348
2349 for i in 0..stack_test_data.len() {
2351 let parent_byte = *parent_stack_ptr.offset(i as isize);
2352 let child_byte = *child_stack_ptr.offset(i as isize);
2353 assert_eq!(
2354 parent_byte, child_byte,
2355 "Stack data mismatch at offset {}: parent={:#x}, child={:#x}",
2356 i, parent_byte, child_byte
2357 );
2358 }
2359 }
2360
2361 unsafe {
2363 let parent_stack_ptr =
2364 (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
2365 let original_value = *parent_stack_ptr;
2366 *parent_stack_ptr = 0xFE; let child_stack_ptr =
2369 (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
2370 let child_first_byte = *child_stack_ptr;
2371
2372 assert_eq!(
2374 child_first_byte, original_value,
2375 "Child stack should be independent from parent stack"
2376 );
2377 }
2378
2379 assert_eq!(
2381 child_task.stack_size.load(Ordering::SeqCst),
2382 parent_task.stack_size.load(Ordering::SeqCst),
2383 "Child and parent should have the same stack size"
2384 );
2385 }
2386
2387 #[test_case]
2388 fn test_clone_task_shared_memory() {
2389 let scheduler = crate::sched::scheduler::get_scheduler();
2391 scheduler.reset();
2392
2393 use crate::environment::PAGE_SIZE;
2394 use crate::mem::page::allocate_raw_pages;
2395 use crate::vm::vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryPermission};
2396
2397 let mut parent_task = super::new_user_task("ParentWithShared".to_string(), 0);
2398 parent_task.init();
2399
2400 let shared_vaddr = 0x5000;
2402 let num_pages = 1;
2403 let pages = allocate_raw_pages(num_pages);
2404 let paddr = pages as usize;
2405
2406 let shared_mmap = VirtualMemoryMap {
2407 pmarea: MemoryArea {
2408 start: paddr,
2409 end: paddr + PAGE_SIZE - 1,
2410 },
2411 vmarea: MemoryArea {
2412 start: shared_vaddr,
2413 end: shared_vaddr + PAGE_SIZE - 1,
2414 },
2415 permissions: VirtualMemoryPermission::Read as usize
2416 | VirtualMemoryPermission::Write as usize,
2417 is_shared: true, owner: None,
2419 };
2420
2421 parent_task
2423 .vm_manager
2424 .add_memory_map(shared_mmap.clone())
2425 .unwrap();
2426
2427 let test_data: [u8; 8] = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22];
2429 unsafe {
2430 let shared_ptr = paddr as *mut u8;
2431 core::ptr::copy_nonoverlapping(test_data.as_ptr(), shared_ptr, test_data.len());
2432 }
2433
2434 let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
2436
2437 let child_shared_mmap = {
2439 let mut found = None;
2440 child_task.vm_manager.with_memmaps(|mm| {
2441 for mmap in mm.values() {
2442 if mmap.vmarea.start == shared_vaddr && mmap.is_shared {
2443 found = Some(mmap.clone());
2444 break;
2445 }
2446 }
2447 });
2448 found.expect("Shared memory map not found in child task")
2449 };
2450
2451 assert_eq!(
2453 child_shared_mmap.pmarea.start, shared_mmap.pmarea.start,
2454 "Shared memory should have the same physical address in parent and child"
2455 );
2456
2457 assert_eq!(child_shared_mmap.vmarea.start, shared_mmap.vmarea.start);
2459 assert_eq!(child_shared_mmap.vmarea.end, shared_mmap.vmarea.end);
2460
2461 assert!(
2463 child_shared_mmap.is_shared,
2464 "Shared memory should remain marked as shared"
2465 );
2466
2467 unsafe {
2469 let child_shared_ptr = child_shared_mmap.pmarea.start as *mut u8;
2470 let original_value = *child_shared_ptr;
2471 *child_shared_ptr = 0xFF; let parent_shared_ptr = shared_mmap.pmarea.start as *const u8;
2474 let parent_first_byte = *parent_shared_ptr;
2475
2476 assert_eq!(
2478 parent_first_byte, 0xFF,
2479 "Parent should see changes made through child's shared memory reference"
2480 );
2481
2482 *child_shared_ptr = original_value;
2484 }
2485
2486 unsafe {
2488 let child_ptr = child_shared_mmap.pmarea.start as *const u8;
2489 let parent_ptr = shared_mmap.pmarea.start as *const u8;
2490
2491 for i in 0..test_data.len() {
2493 let parent_byte = *parent_ptr.offset(i as isize);
2494 let child_byte = *child_ptr.offset(i as isize);
2495 assert_eq!(
2496 parent_byte, child_byte,
2497 "Shared memory data should be identical from both parent and child views"
2498 );
2499 }
2500 }
2501 }
2502
2503 #[test_case]
2504 fn test_clone_task_with_clone_vm_shares_address_space() {
2505 let scheduler = crate::sched::scheduler::get_scheduler();
2507 scheduler.reset();
2508
2509 use crate::environment::PAGE_SIZE;
2510
2511 let mut parent = super::new_user_task("ParentCloneVm".to_string(), 0);
2512 parent.init();
2513
2514 let base_vaddr = 0x4000;
2516 parent.allocate_data_pages(base_vaddr, 1).unwrap();
2517 let parent_len_before = parent.vm_manager.memmap_len();
2518
2519 let mut flags = super::CloneFlags::new();
2521 flags.set(super::CloneFlagsDef::Vm);
2522 let child = parent.clone_task(flags).unwrap();
2523
2524 assert!(
2526 Arc::ptr_eq(&child.brk, &parent.brk),
2527 "CLONE_VM tasks must share brk"
2528 );
2529
2530 assert_eq!(child.vm_manager.get_asid(), parent.vm_manager.get_asid());
2532 assert_eq!(child.vm_manager.memmap_len(), parent_len_before);
2533
2534 parent
2536 .allocate_data_pages(base_vaddr + PAGE_SIZE, 1)
2537 .unwrap();
2538 assert_eq!(
2539 child.vm_manager.memmap_len(),
2540 parent.vm_manager.memmap_len()
2541 );
2542
2543 assert!(child.managed_pages.read().len() <= parent.managed_pages.read().len());
2546 }
2547
2548 #[test_case]
2549 fn test_task_namespace_creation() {
2550 let scheduler = crate::sched::scheduler::get_scheduler();
2552 scheduler.reset();
2553
2554 use super::namespace;
2555
2556 let task = super::new_user_task("TestTask".to_string(), 0);
2558 assert_eq!(task.get_namespace().get_name(), "root");
2559 assert!(task.get_namespace().is_root());
2560
2561 let task_id = scheduler.add_task(task, 0);
2563
2564 let ns_id = scheduler
2566 .get_task_by_id(task_id)
2567 .unwrap()
2568 .get_namespace_id();
2569 assert!(ns_id >= 1); }
2571
2572 #[test_case]
2573 fn test_task_namespace_inheritance() {
2574 let scheduler = crate::sched::scheduler::get_scheduler();
2576 scheduler.reset();
2577
2578 use super::namespace;
2579
2580 let mut parent = super::new_user_task("Parent".to_string(), 0);
2581 parent.init();
2582
2583 let child = parent.clone_task(CloneFlags::default()).unwrap();
2585
2586 assert_eq!(
2588 parent.get_namespace().get_id(),
2589 child.get_namespace().get_id()
2590 );
2591
2592 let scheduler = crate::sched::scheduler::get_scheduler();
2594 let parent_id = scheduler.add_task(parent, 0);
2595 let child_id = scheduler.add_task(child, 0);
2596
2597 let parent_ns_id = scheduler
2599 .get_task_by_id(parent_id)
2600 .unwrap()
2601 .get_namespace_id();
2602 let child_ns_id = scheduler
2603 .get_task_by_id(child_id)
2604 .unwrap()
2605 .get_namespace_id();
2606 assert_ne!(parent_ns_id, child_ns_id);
2607 }
2608
2609 #[test_case]
2610 fn test_task_namespace_id_allocation() {
2611 let scheduler = crate::sched::scheduler::get_scheduler();
2613 scheduler.reset();
2614
2615 use super::namespace;
2616
2617 let custom_ns = namespace::TaskNamespace::new_child(
2619 namespace::get_root_namespace().clone(),
2620 "test_ns".to_string(),
2621 );
2622
2623 let mut task1 = super::Task::new_with_namespace(
2625 "Task1".to_string(),
2626 0,
2627 super::TaskType::User,
2628 custom_ns.clone(),
2629 );
2630 let mut task2 = super::Task::new_with_namespace(
2631 "Task2".to_string(),
2632 0,
2633 super::TaskType::User,
2634 custom_ns.clone(),
2635 );
2636 let mut task3 = super::Task::new_with_namespace(
2637 "Task3".to_string(),
2638 0,
2639 super::TaskType::User,
2640 custom_ns.clone(),
2641 );
2642
2643 task1.init();
2645 task2.init();
2646 task3.init();
2647
2648 let scheduler = crate::sched::scheduler::get_scheduler();
2650 let id1 = scheduler.add_task(task1, 0);
2651 let id2 = scheduler.add_task(task2, 0);
2652 let id3 = scheduler.add_task(task3, 0);
2653
2654 let ns_id1 = scheduler.get_task_by_id(id1).unwrap().get_namespace_id();
2656 let ns_id2 = scheduler.get_task_by_id(id2).unwrap().get_namespace_id();
2657 let ns_id3 = scheduler.get_task_by_id(id3).unwrap().get_namespace_id();
2658 assert_eq!(ns_id1, 1);
2659 assert_eq!(ns_id2, 2);
2660 assert_eq!(ns_id3, 3);
2661
2662 assert_ne!(id1, id2);
2664 assert_ne!(id2, id3);
2665 assert_ne!(id1, id3);
2666 }
2667
2668 #[test_case]
2669 fn test_namespace_hierarchy() {
2670 use super::namespace;
2671
2672 let root = namespace::get_root_namespace();
2673 let child_ns = namespace::TaskNamespace::new_child(root.clone(), "child".to_string());
2674 let grandchild_ns =
2675 namespace::TaskNamespace::new_child(child_ns.clone(), "grandchild".to_string());
2676
2677 assert!(root.is_root());
2679 assert!(!child_ns.is_root());
2680 assert!(!grandchild_ns.is_root());
2681
2682 assert!(child_ns.get_parent().is_some());
2684 assert_eq!(child_ns.get_parent().unwrap().get_id(), root.get_id());
2685 assert_eq!(
2686 grandchild_ns.get_parent().unwrap().get_id(),
2687 child_ns.get_id()
2688 );
2689 }
2690
2691 #[test_case]
2692 fn test_all_abis_share_root_namespace_by_default() {
2693 let scheduler = crate::sched::scheduler::get_scheduler();
2695 scheduler.reset();
2696
2697 use super::namespace;
2698 use alloc::vec::Vec;
2699
2700 let mut task1 = super::new_user_task("Task1".to_string(), 0);
2702 let mut task2 = super::new_user_task("Task2".to_string(), 0);
2703 let mut task3 = super::new_user_task("Task3".to_string(), 0);
2704
2705 task1.init();
2707 task2.init();
2708 task3.init();
2709
2710 let id1 = scheduler.add_task(task1, 0);
2712 let id2 = scheduler.add_task(task2, 0);
2713 let id3 = scheduler.add_task(task3, 0);
2714
2715 assert_ne!(id1, 0, "Task ID should be non-zero after add_task");
2717 assert_ne!(id2, 0, "Task ID should be non-zero after add_task");
2718 assert_ne!(id3, 0, "Task ID should be non-zero after add_task");
2719
2720 let ns_id1 = scheduler.get_task_by_id(id1).unwrap().get_namespace_id();
2722 assert_ne!(ns_id1, 0, "Namespace ID should be non-zero after add_task");
2723
2724 let ns_id2 = scheduler.get_task_by_id(id2).unwrap().get_namespace_id();
2725 assert_ne!(ns_id2, 0, "Namespace ID should be non-zero after add_task");
2726
2727 let ns_id3 = scheduler.get_task_by_id(id3).unwrap().get_namespace_id();
2728 assert_ne!(ns_id3, 0, "Namespace ID should be non-zero after add_task");
2729
2730 assert_ne!(ns_id1, ns_id2, "Namespace IDs should be unique");
2732 assert_ne!(ns_id2, ns_id3, "Namespace IDs should be unique");
2733
2734 {
2736 let task1 = scheduler.get_task_by_id(id1).unwrap();
2737 assert_eq!(task1.get_namespace().get_name(), "root");
2738 }
2739 {
2740 let task2 = scheduler.get_task_by_id(id2).unwrap();
2741 assert_eq!(task2.get_namespace().get_name(), "root");
2742 }
2743 {
2744 let task3 = scheduler.get_task_by_id(id3).unwrap();
2745 assert_eq!(task3.get_namespace().get_name(), "root");
2746 }
2747
2748 let ns1_id = scheduler
2750 .get_task_by_id(id1)
2751 .unwrap()
2752 .get_namespace()
2753 .get_id();
2754 let ns2_id = scheduler
2755 .get_task_by_id(id2)
2756 .unwrap()
2757 .get_namespace()
2758 .get_id();
2759 let ns3_id = scheduler
2760 .get_task_by_id(id3)
2761 .unwrap()
2762 .get_namespace()
2763 .get_id();
2764 assert_eq!(ns1_id, ns2_id, "All tasks should share root namespace");
2765 assert_eq!(ns2_id, ns3_id, "All tasks should share root namespace");
2766 }
2767}