kernel/arch/riscv64/trap/
user.rs

1use core::arch::naked_asm;
2use core::{arch::asm, mem::transmute};
3
4use super::exception::arch_exception_handler;
5use super::interrupt::arch_interrupt_handler;
6
7use crate::arch::{Trapframe, get_kernel_trapvector_paddr, set_trapvector};
8
9#[unsafe(link_section = ".trampoline.text")]
10#[unsafe(export_name = "_user_trap_entry")]
11#[unsafe(naked)]
12pub extern "C" fn _user_trap_entry() {
13    unsafe {
14        naked_asm!(
15            "
16        .option norvc
17        .option norelax
18        .align 8
19                /* Disable the interrupt */
20                csrci   sstatus, 0x2
21
22                /* Save a0 to sscratch and load the Riscv64 struct pointer */
23                csrrw   a0, sscratch, a0
24                /* Store sp to Riscv64.scratch */
25                sd      sp, 0(a0)
26
27                /* Load the satp for the kernel space from Riscv64.satp */
28                ld      sp, 16(a0) // sp = Riscv64.satp
29                /* Switch to kernel memory space */
30                csrrw   sp, satp, sp
31                sfence.vma zero, zero
32                /* Store the user memory space */
33                sd      sp, 16(a0) // Riscv64.satp = sp
34                
35                /* Load kernel stack pointer from Riscv64.kernel_stack */
36                ld      sp, 24(a0)
37
38                /* Allocate space on the kernel stack for saving user context */
39                addi    sp, sp, -272 /* sizeof(Trapframe) = 272 bytes */
40
41                /* Save the context of the current hart */
42                sd      x0, 0(sp)
43                sd      x1, 8(sp)
44                // sd      x2, 16(sp) (x2 is sp, which we are modifying)
45                sd      x3, 24(sp)
46                sd      x4, 32(sp)
47                sd      x5, 40(sp)
48                sd      x6, 48(sp)
49                sd      x7, 56(sp)
50                sd      x8, 64(sp)
51                sd      x9, 72(sp)
52                // sd      x10, 80(sp) (x10 is a0, which we are modifying)
53                sd      x11, 88(sp)
54                sd      x12, 96(sp)
55                sd      x13, 104(sp)
56                sd      x14, 112(sp)
57                sd      x15, 120(sp)
58                sd      x16, 128(sp)
59                sd      x17, 136(sp)
60                sd      x18, 144(sp)
61                sd      x19, 152(sp)
62                sd      x20, 160(sp)
63                sd      x21, 168(sp)
64                sd      x22, 176(sp)
65                sd      x23, 184(sp)
66                sd      x24, 192(sp)
67                sd      x25, 200(sp)
68                sd      x26, 208(sp)
69                sd      x27, 216(sp)
70                sd      x28, 224(sp)
71                sd      x29, 232(sp)
72                sd      x30, 240(sp)
73                sd      x31, 248(sp)
74                csrr    t0, sepc
75                sd      t0, 256(sp)
76
77                // Load sp from Riscv64.scratch and store sp to trapframe
78                ld      t0, 0(a0)  // t0 = Riscv64.scratch (old sp)
79                sd      t0, 16(sp) // trapframe.sp = t0
80
81                // Save original a0 (currently in sscratch) to trapframe
82                csrr    t0, sscratch  // t0 = original a0 value
83                sd      t0, 80(sp)    // trapframe.a0 = original a0
84
85                // Restore sscratch to Riscv64 pointer
86                csrw   sscratch, a0
87
88                /* Call the user trap handler */
89                /* Load the function pointer from Riscv64.kernel_trap */
90                ld      ra, 32(a0)
91
92                /* Pass the trapframe pointer as the first argument */
93                mv      a0, sp
94                jr      ra // Riscv64.kernel_trap(a0: &mut Trapframe)
95            "
96        );
97    }
98}
99
100#[unsafe(link_section = ".trampoline.text")]
101#[unsafe(export_name = "_user_trap_exit")]
102#[unsafe(naked)]
103pub extern "C" fn _user_trap_exit(trapframe: &mut Trapframe) -> ! {
104    unsafe {
105        naked_asm!(
106            "
107        .option norvc
108        .option norelax
109        .align 8
110                /* Restore the context of the current hart from trapframe first */ 
111                /* epc */
112                ld     t0, 256(a0)
113                csrw   sepc, t0
114                
115                /* Register - restore all except sp and a0 */
116                ld     x0, 0(a0)
117                ld     x1, 8(a0)
118                ld     x2, 16(a0)
119                ld     x3, 24(a0)
120                ld     x4, 32(a0)
121                ld     x5, 40(a0)
122                ld     x6, 48(a0)
123                ld     x7, 56(a0)
124                ld     x8, 64(a0)
125                ld     x9, 72(a0)
126                // ld     x10, 80(a0) (a0 will be restored last)
127                ld     x11, 88(a0)
128                ld     x12, 96(a0)
129                ld     x13, 104(a0)
130                ld     x14, 112(a0)
131                ld     x15, 120(a0)
132                ld     x16, 128(a0)
133                ld     x17, 136(a0)
134                ld     x18, 144(a0)
135                ld     x19, 152(a0)
136                ld     x20, 160(a0)
137                ld     x21, 168(a0)
138                ld     x22, 176(a0)
139                ld     x23, 184(a0)
140                ld     x24, 192(a0)
141                ld     x25, 200(a0)
142                ld     x26, 208(a0)
143                ld     x27, 216(a0)
144                ld     x28, 224(a0)
145                ld     x29, 232(a0)
146                ld     x30, 240(a0)
147                ld     x31, 248(a0)
148
149                /* Restore a0 from trapframe */
150                ld     a0, 80(a0)
151
152                /* Swap a0 with sscratch to get Riscv64 pointer */
153                csrrw  a0, sscratch, a0  // a0 = Riscv64 pointer, sscratch = original a0
154
155                /* Store original t0 in Riscv64.scratch temporarily */
156                sd     t0, 0(a0)        // Riscv64.scratch = original t0
157
158                /* Restore the user memory space using t0 as temp */
159                ld     t0, 16(a0)       // t0 = Riscv64.satp (user satp)
160                csrrw  t0, satp, t0
161                /* Store back the kernel memory space */
162                sd     t0, 16(a0)       // Riscv64.satp = t0
163                sfence.vma zero, zero
164
165                /* Restore trapframe t0 from Riscv64.scratch */
166                ld     t0, 0(a0)        // t0 = original t0
167
168                /* Swap back sscratch to original a0 */
169                csrrw   a0, sscratch, a0     // a0 = original a0, sscratch = Riscv64 pointer
170
171                sret
172            "
173        );
174    }
175}
176
177#[unsafe(export_name = "arch_user_trap_handler")]
178pub extern "C" fn arch_user_trap_handler(addr: usize) -> ! {
179    let trapframe: &mut Trapframe = unsafe { transmute(addr) };
180    set_trapvector(get_kernel_trapvector_paddr());
181
182    // let cpu = crate::arch::get_cpu();
183    // crate::early_println!("CPU: {:#x?}", cpu);
184
185    let cause: usize;
186    unsafe {
187        asm!(
188            "csrr {0}, scause",
189            out(reg) cause,
190        );
191    }
192
193    let interrupt = cause & 0x8000000000000000 != 0;
194    if interrupt {
195        arch_interrupt_handler(trapframe, cause & !0x8000000000000000);
196    } else {
197        // crate::println!("Entering exception handler for cause: {}", cause);
198        arch_exception_handler(trapframe, cause);
199        // crate::println!("Exiting exception handler for cause: {}", cause);
200    }
201    // Jump directly to user trap exit via trampoline
202    arch_switch_to_user_space(trapframe);
203}
204
205/// Switch to user space using the trampoline mechanism
206///
207/// This function prepares the trapframe for user space execution
208/// and jumps to the user trap exit handler using a trampoline.
209///
210/// # Arguments
211/// * `trapframe` - A mutable reference to the trapframe that contains the state to switch to user space.
212///
213/// This function is marked as `noreturn` because it will not return to the caller.
214/// It will jump to the user trap exit handler, which will then return to user space.
215#[unsafe(export_name = "arch_switch_to_user_space")]
216pub fn arch_switch_to_user_space(trapframe: &mut Trapframe) -> ! {
217    let addr = trapframe as *mut Trapframe as usize;
218
219    // Configure the upcoming user return. This affects sstatus.SPIE, not the current kernel SIE.
220    crate::arch::configure_user_entry(
221        trapframe,
222        crate::arch::UserEntryOptions {
223            irq_policy: crate::arch::UserReturnIrqPolicy::Enable,
224        },
225    );
226
227    // Get the trampoline address for _user_trap_exit
228    let trap_exit_offset = _user_trap_exit as usize - _user_trap_entry as usize;
229    // crate::early_println!("_user_trap_entry: {:#x}, _user_trap_exit: {:#x}, offset: {:#x}", _user_trap_entry as usize, _user_trap_exit as usize, trap_exit_offset);
230    let trampoline_base = crate::vm::get_trampoline_trap_vector();
231    let trap_exit_addr = trampoline_base + trap_exit_offset;
232    set_trapvector(trampoline_base);
233
234    // crate::early_println!("trap_exit_addr: {:#x}, trapframe: {:#x}", trap_exit_addr, addr);
235
236    unsafe {
237        asm!(
238            "mv t0, {trap_exit_addr}",    // Load jump target into t0 first
239            "mv a0, {trapframe_addr}",    // Load trapframe addr into a0
240            "jr t0",                      // Jump using t0 (preserves a0)
241            trapframe_addr = in(reg) addr,
242            trap_exit_addr = in(reg) trap_exit_addr,
243            options(noreturn, nostack)
244        );
245    }
246}