1use alloc::{format, string::String, sync::Arc, vec::Vec};
8use spin::RwLock;
9
10use crate::mem::page::{allocate_raw_pages, free_raw_pages};
11use crate::object::capability::memory_mapping::{
12 AccessKind, MemoryMappingOps, ResolveFaultError, ResolveFaultResult,
13};
14use crate::vm::vmem::VirtualMemoryMap;
15
16const LOG_SHARED_MEMORY_RESIZE: bool = false;
17
18pub trait SharedMemoryObject: MemoryMappingOps + Send + Sync {
22 fn size(&self) -> usize;
24
25 fn resize(&self, new_size: usize) -> Result<(), &'static str>;
27
28 fn id(&self) -> String;
30
31 fn is_valid(&self) -> bool;
33}
34
35struct SharedMemoryState {
37 paddr: usize,
39 size: usize,
41 capacity: usize,
43 permissions: usize,
45 valid: bool,
47 mapping_count: usize,
49 stale_pages: Vec<(usize, usize)>,
51 owns_memory: bool,
53}
54
55impl SharedMemoryState {
56 fn new(paddr: usize, size: usize, permissions: usize, owns_memory: bool) -> Self {
57 Self {
58 paddr,
59 size,
60 capacity: size,
61 permissions,
62 valid: true,
63 mapping_count: 0,
64 stale_pages: Vec::new(),
65 owns_memory,
66 }
67 }
68}
69
70pub struct SharedMemory {
75 state: Arc<RwLock<SharedMemoryState>>,
77 id: String,
79}
80
81impl SharedMemory {
82 pub fn new(size: usize, permissions: usize) -> Result<Self, &'static str> {
91 use crate::environment::PAGE_SIZE;
92
93 if size == 0 {
94 return Err("Size must be greater than 0");
95 }
96
97 let num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
99 let aligned_size = num_pages * PAGE_SIZE;
100
101 let pages = allocate_raw_pages(num_pages);
103 if pages.is_null() {
104 return Err("Failed to allocate physical memory for shared memory");
105 }
106 let paddr = pages as usize;
107
108 let state = SharedMemoryState::new(paddr, aligned_size, permissions, true);
109 let id = format!("shmem_{:#x}", paddr);
110
111 Ok(Self {
112 state: Arc::new(RwLock::new(state)),
113 id,
114 })
115 }
116
117 pub unsafe fn from_paddr(paddr: usize, size: usize, permissions: usize) -> Self {
133 let state = SharedMemoryState::new(paddr, size, permissions, false);
134 let id = format!("shmem_{:#x}", paddr);
135
136 Self {
137 state: Arc::new(RwLock::new(state)),
138 id,
139 }
140 }
141
142 pub fn invalidate(&self) {
147 let mut state = self.state.write();
148 state.valid = false;
149 }
150}
151
152impl SharedMemoryObject for SharedMemory {
153 fn size(&self) -> usize {
154 self.state.read().size
155 }
156
157 fn resize(&self, new_size: usize) -> Result<(), &'static str> {
158 use crate::environment::PAGE_SIZE;
159
160 let mut state = self.state.write();
161 let aligned_size = if new_size == 0 {
162 0
163 } else {
164 let num_pages = (new_size + PAGE_SIZE - 1) / PAGE_SIZE;
165 num_pages * PAGE_SIZE
166 };
167
168 if aligned_size <= state.capacity {
170 state.size = aligned_size;
171 return Ok(());
172 }
173
174 if !state.owns_memory {
175 return Err("Shared memory resize not supported for external memory");
176 }
177
178 let num_pages = aligned_size / PAGE_SIZE;
180 let pages = allocate_raw_pages(num_pages);
181 if pages.is_null() {
182 return Err("Failed to allocate physical memory for shared memory resize");
183 }
184
185 let old_paddr = state.paddr;
186 let copy_size = state.size;
187
188 if copy_size > 0 {
190 unsafe {
191 core::ptr::copy_nonoverlapping(
192 state.paddr as *const u8,
193 pages as *mut u8,
194 copy_size,
195 );
196 }
197 }
198
199 let old_pages = state.capacity / PAGE_SIZE;
201 if old_pages > 0 {
202 if state.mapping_count > 0 {
203 state.stale_pages.push((old_paddr, old_pages));
204 } else {
205 let old_ptr = old_paddr as *mut crate::mem::page::Page;
206 free_raw_pages(old_ptr, old_pages);
207 }
208 }
209
210 state.paddr = pages as usize;
211 state.size = aligned_size;
212 state.capacity = aligned_size;
213
214 Ok(())
233 }
234
235 fn id(&self) -> String {
236 self.id.clone()
237 }
238
239 fn is_valid(&self) -> bool {
240 self.state.read().valid
241 }
242}
243
244impl MemoryMappingOps for SharedMemory {
245 fn get_mapping_info(
246 &self,
247 offset: usize,
248 length: usize,
249 ) -> Result<(usize, usize, bool), &'static str> {
250 let state = self.state.read();
251
252 if !state.valid {
253 return Err("Shared memory object is not valid");
254 }
255
256 let end = match offset.checked_add(length) {
257 Some(end) => end,
258 None => {
259 return Err("Mapping request exceeds shared memory size");
260 }
261 };
262
263 if end > state.size {
264 return Err("Mapping request exceeds shared memory size");
265 }
266
267 let paddr = state
269 .paddr
270 .checked_add(offset)
271 .ok_or("Physical address overflow in shared memory mapping")?;
272
273 Ok((paddr, state.permissions, true))
274 }
275
276 fn on_mapped(&self, _vaddr: usize, _paddr: usize, _length: usize, _offset: usize) {
277 let mut state = self.state.write();
278 state.mapping_count += 1;
279 }
280
281 fn on_unmapped(&self, _vaddr: usize, _length: usize) {
282 let mut state = self.state.write();
283 if state.mapping_count > 0 {
284 state.mapping_count -= 1;
285 }
286 if state.mapping_count == 0 && !state.stale_pages.is_empty() {
287 let stale = core::mem::take(&mut state.stale_pages);
288 for (paddr, pages) in stale {
289 if pages == 0 {
290 continue;
291 }
292 let ptr = paddr as *mut crate::mem::page::Page;
293 free_raw_pages(ptr, pages);
294 }
295 }
296 }
297
298 fn supports_mmap(&self) -> bool {
299 self.state.read().valid
300 }
301
302 fn mmap_owner_name(&self) -> String {
303 self.id.clone()
304 }
305
306 fn resolve_fault(
307 &self,
308 access: &AccessKind,
309 map: &VirtualMemoryMap,
310 ) -> Result<ResolveFaultResult, ResolveFaultError> {
311 let state = self.state.read();
312
313 if !state.valid {
314 return Err(ResolveFaultError::Invalid);
315 }
316
317 let page_vaddr = access.vaddr & !(crate::environment::PAGE_SIZE - 1);
319
320 if page_vaddr < map.vmarea.start {
324 return Err(ResolveFaultError::Unmapped);
325 }
326
327 let offset_in_mapping = page_vaddr - map.vmarea.start;
328
329 if offset_in_mapping >= state.size {
331 return Err(ResolveFaultError::Unmapped);
332 }
333
334 let paddr_page_base = state
337 .paddr
338 .checked_add(offset_in_mapping)
339 .ok_or(ResolveFaultError::Invalid)?;
340
341 Ok(ResolveFaultResult {
342 paddr_page_base,
343 is_tail: false,
344 })
345 }
346}
347
348impl Drop for SharedMemory {
349 fn drop(&mut self) {
350 use crate::environment::PAGE_SIZE;
351
352 let state = self.state.read();
353 if state.mapping_count > 0 {
354 }
358
359 if state.owns_memory {
361 let num_pages = (state.capacity + PAGE_SIZE - 1) / PAGE_SIZE;
362 let pages_ptr = state.paddr as *mut crate::mem::page::Page;
363 free_raw_pages(pages_ptr, num_pages);
364 for (paddr, pages) in &state.stale_pages {
365 if *pages == 0 {
366 continue;
367 }
368 let pages_ptr = *paddr as *mut crate::mem::page::Page;
369 free_raw_pages(pages_ptr, *pages);
370 }
371 }
372 }
373}
374
375#[cfg(test)]
376mod tests {
377 use super::*;
378 use alloc::vec::Vec;
379
380 #[test_case]
381 fn test_shared_memory_creation() {
382 let permissions = 0x3; let size = 4096;
385
386 match SharedMemory::new(size, permissions) {
388 Ok(shmem) => {
389 assert!(shmem.size() >= size); assert!(shmem.is_valid());
391 assert!(shmem.supports_mmap());
392 }
393 Err(e) => {
394 crate::println!("SharedMemory::new failed: {}", e);
396 }
397 }
398 }
399
400 #[test_case]
401 fn test_shared_memory_from_paddr() {
402 let paddr = 0x80000000;
404 let size = 8192;
405 let permissions = 0x3; let shmem = unsafe { SharedMemory::from_paddr(paddr, size, permissions) };
408
409 assert_eq!(shmem.size(), size);
410 assert!(shmem.is_valid());
411 assert!(shmem.supports_mmap());
412 }
413
414 #[test_case]
415 fn test_shared_memory_mapping_info() {
416 let paddr = 0x80000000;
417 let size = 4096;
418 let permissions = 0x3; let shmem = unsafe { SharedMemory::from_paddr(paddr, size, permissions) };
421
422 match shmem.get_mapping_info(0, 4096) {
424 Ok((mapped_paddr, mapped_perms, is_shared)) => {
425 assert_eq!(mapped_paddr, paddr);
426 assert_eq!(mapped_perms, permissions);
427 assert!(is_shared); }
429 Err(e) => panic!("Mapping info failed: {}", e),
430 }
431
432 match shmem.get_mapping_info(1024, 2048) {
434 Ok((mapped_paddr, mapped_perms, is_shared)) => {
435 assert_eq!(mapped_paddr, paddr + 1024);
436 assert_eq!(mapped_perms, permissions);
437 assert!(is_shared);
438 }
439 Err(e) => panic!("Mapping info with offset failed: {}", e),
440 }
441
442 assert!(shmem.get_mapping_info(0, 8192).is_err());
444 assert!(shmem.get_mapping_info(4096, 1).is_err());
445 }
446
447 #[test_case]
448 fn test_shared_memory_invalidation() {
449 let paddr = 0x80000000;
450 let size = 4096;
451 let permissions = 0x3;
452
453 let shmem = unsafe { SharedMemory::from_paddr(paddr, size, permissions) };
454
455 assert!(shmem.is_valid());
456 assert!(shmem.supports_mmap());
457
458 shmem.invalidate();
460
461 assert!(!shmem.is_valid());
462 assert!(!shmem.supports_mmap());
463
464 assert!(shmem.get_mapping_info(0, 4096).is_err());
466 }
467
468 #[test_case]
469 fn test_shared_memory_mapping_tracking() {
470 let paddr = 0x80000000;
471 let size = 4096;
472 let permissions = 0x3;
473
474 let shmem = unsafe { SharedMemory::from_paddr(paddr, size, permissions) };
475
476 assert_eq!(shmem.state.read().mapping_count, 0);
478
479 shmem.on_mapped(0x10000000, paddr, 4096, 0);
481 assert_eq!(shmem.state.read().mapping_count, 1);
482
483 shmem.on_mapped(0x20000000, paddr, 4096, 0);
485 assert_eq!(shmem.state.read().mapping_count, 2);
486
487 shmem.on_unmapped(0x10000000, 4096);
489 assert_eq!(shmem.state.read().mapping_count, 1);
490
491 shmem.on_unmapped(0x20000000, 4096);
492 assert_eq!(shmem.state.read().mapping_count, 0);
493 }
494
495 #[test_case]
496 fn test_shared_memory_mmap_owner_name() {
497 let paddr = 0x80000000;
498 let size = 4096;
499 let permissions = 0x3;
500
501 let shmem = unsafe { SharedMemory::from_paddr(paddr, size, permissions) };
502
503 let owner_name = shmem.mmap_owner_name();
504 assert!(owner_name.contains("shmem"));
505 assert!(owner_name.contains(&format!("{:#x}", paddr)));
506 }
507}