1use alloc::vec;
27use alloc::{boxed::Box, collections::VecDeque, vec::Vec};
28use spin::{Mutex, RwLock};
29
30use core::{mem, ptr};
31
32use crate::defer;
33use crate::device::{Device, DeviceType};
34use crate::drivers::virtio::features::{
35 VIRTIO_F_ANY_LAYOUT, VIRTIO_RING_F_EVENT_IDX, VIRTIO_RING_F_INDIRECT_DESC,
36};
37use crate::object::capability::{MemoryMappingOps, Selectable};
38use crate::{
39 device::block::{
40 BlockDevice,
41 request::{BlockIORequest, BlockIORequestType, BlockIOResult},
42 },
43 drivers::virtio::{
44 device::VirtioDevice,
45 queue::{DescriptorFlag, VirtQueue},
46 },
47 object::capability::ControlOps,
48};
49
50const VIRTIO_BLK_T_IN: u32 = 0; const VIRTIO_BLK_T_OUT: u32 = 1; const VIRTIO_BLK_S_OK: u8 = 0;
57const VIRTIO_BLK_S_IOERR: u8 = 1;
58const VIRTIO_BLK_S_UNSUPP: u8 = 2;
59
60const VIRTIO_BLK_F_RO: u32 = 5;
65const VIRTIO_BLK_F_BLK_SIZE: u32 = 6;
66const VIRTIO_BLK_F_SCSI: u32 = 7;
67const VIRTIO_BLK_F_CONFIG_WCE: u32 = 11;
69const VIRTIO_BLK_F_MQ: u32 = 12;
70
71#[repr(C)]
80pub struct VirtioBlkConfig {
81 pub capacity: u64,
82 pub size_max: u32,
83 pub seg_max: u32,
84 pub geometry: VirtioBlkGeometry,
85 pub blk_size: u32,
86 pub topology: VirtioBlkTopology,
87 pub writeback: u8,
88}
89
90#[repr(C)]
91pub struct VirtioBlkGeometry {
92 pub cylinders: u16,
93 pub heads: u8,
94 pub sectors: u8,
95}
96
97#[repr(C)]
98pub struct VirtioBlkTopology {
99 pub physical_block_exp: u8,
100 pub alignment_offset: u8,
101 pub min_io_size: u16,
102 pub opt_io_size: u32,
103}
104
105#[repr(C)]
106pub struct VirtioBlkReqHeader {
107 pub type_: u32,
108 pub reserved: u32,
109 pub sector: u64,
110}
111
112pub struct VirtioBlockDevice {
113 base_addr: usize,
114 virtqueues: Mutex<[VirtQueue<'static>; 1]>, capacity: RwLock<u64>,
116 sector_size: RwLock<u32>,
117 features: RwLock<u32>,
118 read_only: RwLock<bool>,
119 request_queue: Mutex<VecDeque<Box<BlockIORequest>>>,
120}
121
122impl VirtioBlockDevice {
123 pub fn new(base_addr: usize) -> Self {
124 let mut device = Self {
125 base_addr,
126 virtqueues: Mutex::new([VirtQueue::new(32)]),
132 capacity: RwLock::new(0),
133 sector_size: RwLock::new(512), features: RwLock::new(0),
135 read_only: RwLock::new(false),
136 request_queue: Mutex::new(VecDeque::new()),
137 };
138
139 let negotiated_features = match device.init() {
141 Ok(features) => features,
142 Err(e) => panic!("Failed to initialize Virtio Block Device: {}", e),
143 };
144
145 *device.capacity.write() = device.read_config::<u64>(0); *device.features.write() = negotiated_features;
150
151 #[cfg(test)]
153 {
154 use crate::early_println;
155 early_println!(
156 "[virtio-blk] Final negotiated features (after init): 0x{:x}",
157 negotiated_features
158 );
159 }
160
161 if negotiated_features & (1 << VIRTIO_BLK_F_BLK_SIZE) != 0 {
163 *device.sector_size.write() = device.read_config::<u32>(20); }
165
166 *device.read_only.write() = negotiated_features & (1 << VIRTIO_BLK_F_RO) != 0;
168
169 device
170 }
171
172 fn process_request(&self, req: &mut BlockIORequest) -> Result<(), &'static str> {
173 crate::profile_scope!("virtio_blk::process_request");
174 let header = Box::new(VirtioBlkReqHeader {
176 type_: match req.request_type {
177 BlockIORequestType::Read => VIRTIO_BLK_T_IN,
178 BlockIORequestType::Write => VIRTIO_BLK_T_OUT,
179 },
180 reserved: 0,
181 sector: req.sector as u64,
182 });
183 let data = vec![0u8; req.buffer.len()].into_boxed_slice();
184 let status = Box::new(0u8);
185
186 let header_ptr = Box::into_raw(header);
188 let data_ptr = Box::into_raw(data) as *mut [u8];
189 let status_ptr = Box::into_raw(status);
190
191 defer! {
192 unsafe {
194 drop(Box::from_raw(header_ptr));
195 drop(Box::from_raw(data_ptr));
196 drop(Box::from_raw(status_ptr));
197 }
198 }
199
200 unsafe {
202 if let BlockIORequestType::Write = req.request_type {
204 ptr::copy_nonoverlapping(
205 req.buffer.as_ptr(),
206 data_ptr as *mut u8,
207 req.buffer.len(),
208 );
209 }
210 }
211
212 let mut virtqueues = self.virtqueues.lock();
214
215 let header_desc = virtqueues[0]
217 .alloc_desc()
218 .ok_or("Failed to allocate descriptor")?;
219 let data_desc = match virtqueues[0].alloc_desc() {
220 Some(desc) => desc,
221 None => {
222 virtqueues[0].free_desc(header_desc);
223 return Err("Failed to allocate descriptor");
224 }
225 };
226 let status_desc = match virtqueues[0].alloc_desc() {
227 Some(desc) => desc,
228 None => {
229 virtqueues[0].free_desc(data_desc);
230 virtqueues[0].free_desc(header_desc);
231 return Err("Failed to allocate descriptor");
232 }
233 };
234
235 let header_phys = crate::vm::get_kernel_vm_manager()
237 .translate_vaddr(header_ptr as usize)
238 .ok_or("Failed to translate header vaddr")?;
239 virtqueues[0].desc[header_desc].addr = header_phys as u64;
240 virtqueues[0].desc[header_desc].len = mem::size_of::<VirtioBlkReqHeader>() as u32;
241 virtqueues[0].desc[header_desc].flags = DescriptorFlag::Next as u16;
242 virtqueues[0].desc[header_desc].next = data_desc as u16;
243
244 let data_phys = crate::vm::get_kernel_vm_manager()
246 .translate_vaddr(data_ptr as *mut u8 as usize)
247 .ok_or("Failed to translate data vaddr")?;
248 virtqueues[0].desc[data_desc].addr = data_phys as u64;
249 virtqueues[0].desc[data_desc].len = req.buffer.len() as u32;
250
251 match req.request_type {
253 BlockIORequestType::Read => {
254 DescriptorFlag::Next.set(&mut virtqueues[0].desc[data_desc].flags);
255 DescriptorFlag::Write.set(&mut virtqueues[0].desc[data_desc].flags);
256 }
257 BlockIORequestType::Write => {
258 DescriptorFlag::Next.set(&mut virtqueues[0].desc[data_desc].flags);
259 }
260 }
261
262 virtqueues[0].desc[data_desc].next = status_desc as u16;
263
264 let status_phys = crate::vm::get_kernel_vm_manager()
266 .translate_vaddr(status_ptr as usize)
267 .ok_or("Failed to translate status vaddr")?;
268 virtqueues[0].desc[status_desc].addr = status_phys as u64;
269 virtqueues[0].desc[status_desc].len = 1;
270 virtqueues[0].desc[status_desc].flags |= DescriptorFlag::Write as u16;
271
272 if let Err(e) = virtqueues[0].push(header_desc) {
274 virtqueues[0].free_desc(status_desc);
276 virtqueues[0].free_desc(data_desc);
277 virtqueues[0].free_desc(header_desc);
278 return Err(e);
279 }
280
281 self.notify(0);
283
284 while virtqueues[0].is_busy() {}
286
287 let desc_idx = match virtqueues[0].pop() {
289 Some(idx) => idx,
290 None => {
291 virtqueues[0].free_desc(status_desc);
293 virtqueues[0].free_desc(data_desc);
294 virtqueues[0].free_desc(header_desc);
295 return Err("No response from device");
296 }
297 };
298
299 if desc_idx != header_desc {
300 virtqueues[0].free_desc(status_desc);
302 virtqueues[0].free_desc(data_desc);
303 virtqueues[0].free_desc(header_desc);
304 return Err("Invalid descriptor index");
305 }
306
307 let status_val = unsafe { core::ptr::read_volatile(status_ptr) };
309 let result = match status_val {
310 VIRTIO_BLK_S_OK => {
311 if let BlockIORequestType::Read = req.request_type {
313 unsafe {
314 req.buffer.clear();
315 req.buffer.extend_from_slice(core::slice::from_raw_parts(
316 data_ptr as *const u8,
317 virtqueues[0].desc[data_desc].len as usize,
318 ));
319 }
320 }
321 Ok(())
322 }
323 VIRTIO_BLK_S_IOERR => Err("I/O error"),
324 VIRTIO_BLK_S_UNSUPP => Err("Unsupported request"),
325 _ => Err("Unknown error"),
326 };
327
328 virtqueues[0].free_desc(status_desc);
330 virtqueues[0].free_desc(data_desc);
331 virtqueues[0].free_desc(header_desc);
332
333 result
334 }
335
336 fn process_requests_batch(
339 &self,
340 requests: &mut [Box<BlockIORequest>],
341 ) -> Vec<Result<(), &'static str>> {
342 crate::profile_scope!("virtio_blk::process_requests_batch");
343
344 if requests.is_empty() {
345 return Vec::new();
346 }
347
348 const MAX_BATCH_SIZE: usize = 10;
353
354 if requests.len() > MAX_BATCH_SIZE {
355 crate::early_println!(
356 "[virtio_blk] WARNING: Batch size {} exceeds safe limit {}, processing in chunks",
357 requests.len(),
358 MAX_BATCH_SIZE
359 );
360
361 let mut all_results = Vec::with_capacity(requests.len());
363 let chunks = requests.chunks_mut(MAX_BATCH_SIZE);
364
365 for chunk in chunks {
366 let mut chunk_results = self.process_requests_batch(chunk);
367 all_results.append(&mut chunk_results);
368 }
369
370 return all_results;
371 }
372
373 let read_count = requests
375 .iter()
376 .filter(|r| matches!(r.request_type, BlockIORequestType::Read))
377 .count();
378 let write_count = requests
379 .iter()
380 .filter(|r| matches!(r.request_type, BlockIORequestType::Write))
381 .count();
382
383 #[cfg(test)]
384 {
385 static BATCH_SIZES: spin::Mutex<alloc::vec::Vec<usize>> =
387 spin::Mutex::new(alloc::vec::Vec::new());
388 static CALL_COUNT: spin::Mutex<usize> = spin::Mutex::new(0);
389 let mut sizes = BATCH_SIZES.lock();
390 let mut count = CALL_COUNT.lock();
391 sizes.push(requests.len());
392 *count += 1;
393
394 if *count % 100 == 0 {
396 let total_requests: usize = sizes.iter().sum();
397 let avg_batch_size = total_requests as f64 / sizes.len() as f64;
398 let single_requests = sizes.iter().filter(|&&size| size == 1).count();
399 crate::early_println!(
400 "[virtio_blk] Batch stats: {} calls, avg_batch={:.2}, single_req={}/{} ({:.1}%)",
401 sizes.len(),
402 avg_batch_size,
403 single_requests,
404 sizes.len(),
405 (single_requests as f64 / sizes.len() as f64) * 100.0
406 );
407 }
408 }
409
410 let batch_size = requests.len();
411 let mut results = vec![Err("Not processed"); batch_size];
412 let mut request_data = Vec::new();
413
414 let mut virtqueues = self.virtqueues.lock();
416
417 for (idx, req) in requests.iter_mut().enumerate() {
419 let header = Box::new(VirtioBlkReqHeader {
421 type_: match req.request_type {
422 BlockIORequestType::Read => VIRTIO_BLK_T_IN,
423 BlockIORequestType::Write => VIRTIO_BLK_T_OUT,
424 },
425 reserved: 0,
426 sector: req.sector as u64,
427 });
428 let data = vec![0u8; req.buffer.len()].into_boxed_slice();
429 let status = Box::new(0u8);
430
431 let header_ptr = Box::into_raw(header);
432 let data_ptr = Box::into_raw(data) as *mut [u8];
433 let status_ptr = Box::into_raw(status);
434
435 if let BlockIORequestType::Write = req.request_type {
437 unsafe {
438 core::ptr::copy_nonoverlapping(
439 req.buffer.as_ptr(),
440 data_ptr as *mut u8,
441 req.buffer.len(),
442 );
443 }
444 }
445
446 if let (Some(header_desc), Some(data_desc), Some(status_desc)) = (
448 virtqueues[0].alloc_desc(),
449 virtqueues[0].alloc_desc(),
450 virtqueues[0].alloc_desc(),
451 ) {
452 let header_phys =
454 match crate::vm::get_kernel_vm_manager().translate_vaddr(header_ptr as usize) {
455 Some(phys) => phys,
456 None => {
457 virtqueues[0].free_desc(status_desc);
458 virtqueues[0].free_desc(data_desc);
459 virtqueues[0].free_desc(header_desc);
460 results[idx] = Err("Failed to translate header vaddr");
461 continue;
462 }
463 };
464 virtqueues[0].desc[header_desc].addr = header_phys as u64;
465 virtqueues[0].desc[header_desc].len = mem::size_of::<VirtioBlkReqHeader>() as u32;
466 virtqueues[0].desc[header_desc].flags = DescriptorFlag::Next as u16;
467 virtqueues[0].desc[header_desc].next = data_desc as u16;
468
469 let data_phys = match crate::vm::get_kernel_vm_manager()
470 .translate_vaddr(data_ptr as *mut u8 as usize)
471 {
472 Some(phys) => phys,
473 None => {
474 virtqueues[0].free_desc(status_desc);
475 virtqueues[0].free_desc(data_desc);
476 virtqueues[0].free_desc(header_desc);
477 results[idx] = Err("Failed to translate data vaddr");
478 continue;
479 }
480 };
481 virtqueues[0].desc[data_desc].addr = data_phys as u64;
482 virtqueues[0].desc[data_desc].len = req.buffer.len() as u32;
483
484 match req.request_type {
485 BlockIORequestType::Read => {
486 DescriptorFlag::Next.set(&mut virtqueues[0].desc[data_desc].flags);
487 DescriptorFlag::Write.set(&mut virtqueues[0].desc[data_desc].flags);
488 }
489 BlockIORequestType::Write => {
490 DescriptorFlag::Next.set(&mut virtqueues[0].desc[data_desc].flags);
491 }
492 }
493
494 virtqueues[0].desc[data_desc].next = status_desc as u16;
495
496 let status_phys =
497 match crate::vm::get_kernel_vm_manager().translate_vaddr(status_ptr as usize) {
498 Some(phys) => phys,
499 None => {
500 virtqueues[0].free_desc(status_desc);
501 virtqueues[0].free_desc(data_desc);
502 virtqueues[0].free_desc(header_desc);
503 results[idx] = Err("Failed to translate status vaddr");
504 continue;
505 }
506 };
507 virtqueues[0].desc[status_desc].addr = status_phys as u64;
508 virtqueues[0].desc[status_desc].len = 1;
509 virtqueues[0].desc[status_desc].flags |= DescriptorFlag::Write as u16;
510
511 if virtqueues[0].push(header_desc).is_ok() {
513 request_data.push((
524 idx,
525 header_desc,
526 data_desc,
527 status_desc,
528 header_ptr,
529 data_ptr,
530 status_ptr,
531 ));
532 } else {
533 virtqueues[0].free_desc(status_desc);
535 virtqueues[0].free_desc(data_desc);
536 virtqueues[0].free_desc(header_desc);
537 unsafe {
538 drop(Box::from_raw(header_ptr));
539 drop(Box::from_raw(data_ptr));
540 drop(Box::from_raw(status_ptr));
541 }
542 results[idx] = Err("Failed to submit request");
543 }
544 } else {
545 crate::early_println!(
547 "[virtio_blk] ERROR: Failed to allocate descriptors for request {} (batch size: {})",
548 idx,
549 batch_size
550 );
551
552 unsafe {
554 drop(Box::from_raw(header_ptr));
555 drop(Box::from_raw(data_ptr));
556 drop(Box::from_raw(status_ptr));
557 }
558 results[idx] = Err("Virtqueue descriptor allocation failed - queue may be full");
559 }
560 }
561
562 if !request_data.is_empty() {
564 self.notify(0);
566 }
567
568 use alloc::collections::BTreeMap;
570 let mut pending_requests: BTreeMap<
571 usize,
572 (
573 usize,
574 usize,
575 usize,
576 *mut VirtioBlkReqHeader,
577 *mut [u8],
578 *mut u8,
579 ),
580 > = BTreeMap::new();
581
582 for (req_idx, header_desc, data_desc, status_desc, header_ptr, data_ptr, status_ptr) in
584 request_data
585 {
586 pending_requests.insert(
587 header_desc,
588 (
589 req_idx,
590 data_desc,
591 status_desc,
592 header_ptr,
593 data_ptr,
594 status_ptr,
595 ),
596 );
597 }
598
599 while !pending_requests.is_empty() {
601 let status = self.read32_register(crate::drivers::virtio::device::Register::Status);
603 while virtqueues[0].is_busy() {
607 let status = self.read32_register(crate::drivers::virtio::device::Register::Status);
608 if crate::drivers::virtio::device::DeviceStatus::DeviceNeedReset.is_set(status) {
609 crate::early_println!(
610 "[virtio-blk] ERROR: Device entered NEEDS_RESET state during poll. Aborting. Status=0x{:x}",
611 status
612 );
613 break;
614 }
615 if crate::drivers::virtio::device::DeviceStatus::Failed.is_set(status) {
616 crate::early_println!(
617 "[virtio-blk] ERROR: Device entered FAILED state during poll. Aborting. Status=0x{:x}",
618 status
619 );
620 break;
621 }
622 }
623
624 while let Some(desc_idx) = virtqueues[0].pop() {
626 if let Some((req_idx, data_desc, status_desc, header_ptr, data_ptr, status_ptr)) =
627 pending_requests.remove(&desc_idx)
628 {
629 let status_val = unsafe { core::ptr::read_volatile(status_ptr) };
631 results[req_idx] = match status_val {
632 VIRTIO_BLK_S_OK => {
633 if let BlockIORequestType::Read = requests[req_idx].request_type {
635 unsafe {
636 requests[req_idx].buffer.clear();
637 requests[req_idx].buffer.extend_from_slice(
638 core::slice::from_raw_parts(
639 data_ptr as *const u8,
640 virtqueues[0].desc[data_desc].len as usize,
641 ),
642 );
643 }
644 }
645 Ok(())
646 }
647 VIRTIO_BLK_S_IOERR => Err("I/O error"),
648 VIRTIO_BLK_S_UNSUPP => Err("Unsupported request"),
649 _ => Err("Unknown error"),
650 };
651
652 virtqueues[0].free_desc(status_desc);
654 virtqueues[0].free_desc(data_desc);
655 virtqueues[0].free_desc(desc_idx); unsafe {
657 drop(Box::from_raw(header_ptr));
658 drop(Box::from_raw(data_ptr));
659 drop(Box::from_raw(status_ptr));
660 }
661 } else {
662 crate::early_println!(
664 "[virtio-blk] Warning: Unexpected descriptor completion: {}",
665 desc_idx
666 );
667 }
668 }
669 }
670
671 results
672 }
673}
674
675impl MemoryMappingOps for VirtioBlockDevice {
676 fn get_mapping_info(
677 &self,
678 _offset: usize,
679 _length: usize,
680 ) -> Result<(usize, usize, bool), &'static str> {
681 Err("Memory mapping not supported by VirtIO block device")
682 }
683
684 fn on_mapped(&self, _vaddr: usize, _paddr: usize, _length: usize, _offset: usize) {
685 }
687
688 fn on_unmapped(&self, _vaddr: usize, _length: usize) {
689 }
691
692 fn supports_mmap(&self) -> bool {
693 false
694 }
695}
696
697impl Device for VirtioBlockDevice {
698 fn device_type(&self) -> DeviceType {
699 DeviceType::Block
700 }
701
702 fn name(&self) -> &'static str {
703 "virtio-blk"
704 }
705
706 fn as_any(&self) -> &dyn core::any::Any {
707 self
708 }
709
710 fn as_any_mut(&mut self) -> &mut dyn core::any::Any {
711 self
712 }
713
714 fn as_block_device(&self) -> Option<&dyn crate::device::block::BlockDevice> {
715 Some(self)
716 }
717
718 fn into_block_device(
719 self: alloc::sync::Arc<Self>,
720 ) -> Option<alloc::sync::Arc<dyn crate::device::block::BlockDevice>> {
721 Some(self)
722 }
723}
724
725impl Selectable for VirtioBlockDevice {
726 fn wait_until_ready(
727 &self,
728 _interest: crate::object::capability::selectable::ReadyInterest,
729 _trapframe: &mut crate::arch::Trapframe,
730 _timeout_ticks: Option<u64>,
731 ) -> crate::object::capability::selectable::SelectWaitOutcome {
732 crate::object::capability::selectable::SelectWaitOutcome::Ready
733 }
734}
735
736impl VirtioDevice for VirtioBlockDevice {
737 fn get_base_addr(&self) -> usize {
738 self.base_addr
739 }
740
741 fn get_virtqueue_count(&self) -> usize {
742 1 }
744
745 fn get_virtqueue_size(&self, queue_idx: usize) -> usize {
746 if queue_idx >= 1 {
747 panic!("Invalid queue index for VirtIO block device: {}", queue_idx);
748 }
749
750 let virtqueues = self.virtqueues.lock();
751 virtqueues[queue_idx].get_queue_size()
752 }
753
754 fn get_supported_features(&self, device_features: u32) -> u32 {
755 let mut result = device_features
757 & !(1 << VIRTIO_BLK_F_RO
758 | 1 << VIRTIO_BLK_F_SCSI
759 | 1 << VIRTIO_BLK_F_CONFIG_WCE
760 | 1 << VIRTIO_BLK_F_MQ
761 | 1 << VIRTIO_F_ANY_LAYOUT);
762
763 if !self.allow_ring_features() {
764 result &= !(1 << VIRTIO_RING_F_EVENT_IDX | 1 << VIRTIO_RING_F_INDIRECT_DESC);
765 }
766
767 result
768 }
769
770 fn get_queue_desc_addr(&self, queue_idx: usize) -> Option<u64> {
771 if queue_idx >= 1 {
772 return None;
773 }
774
775 let virtqueues = self.virtqueues.lock();
776 Some(virtqueues[queue_idx].get_raw_ptr() as u64)
777 }
778
779 fn get_queue_driver_addr(&self, queue_idx: usize) -> Option<u64> {
780 if queue_idx >= 1 {
781 return None;
782 }
783
784 let virtqueues = self.virtqueues.lock();
785 Some(virtqueues[queue_idx].avail.flags as *const _ as u64)
786 }
787
788 fn get_queue_device_addr(&self, queue_idx: usize) -> Option<u64> {
789 if queue_idx >= 1 {
790 return None;
791 }
792
793 let virtqueues = self.virtqueues.lock();
794 Some(virtqueues[queue_idx].used.flags as *const _ as u64)
795 }
796}
797
798impl BlockDevice for VirtioBlockDevice {
799 fn get_disk_name(&self) -> &'static str {
800 "virtio-blk"
801 }
802
803 fn get_disk_size(&self) -> usize {
804 let capacity = *self.capacity.read();
805 let sector_size = *self.sector_size.read();
806 (capacity * sector_size as u64) as usize
807 }
808
809 fn enqueue_request(&self, request: Box<BlockIORequest>) {
810 self.request_queue.lock().push_back(request);
812 }
813
814 fn process_requests(&self) -> Vec<BlockIOResult> {
815 crate::profile_scope!("virtio_blk::process_requests");
816 let mut queue = self.request_queue.lock();
817
818 let mut requests = Vec::new();
820 while let Some(request) = queue.pop_front() {
821 requests.push(request);
822 }
823 drop(queue); if requests.is_empty() {
826 return Vec::new();
827 }
828
829 let batch_results = self.process_requests_batch(&mut requests);
831
832 requests
834 .into_iter()
835 .zip(batch_results.into_iter())
836 .map(|(request, result)| BlockIOResult { request, result })
837 .collect()
838 }
839}
840
841impl ControlOps for VirtioBlockDevice {
842 fn control(&self, _command: u32, _arg: usize) -> Result<i32, &'static str> {
844 Err("Control operations not supported")
845 }
846}
847
848#[cfg(all(test, target_arch = "riscv64"))]
849pub mod tests {
850 use super::*;
851 use alloc::vec;
852
853 #[test_case]
854 fn test_virtio_block_device_init() {
855 let base_addr = 0x10001000; let device = VirtioBlockDevice::new(base_addr);
857
858 assert_eq!(device.get_disk_name(), "virtio-blk");
859 assert_eq!(
860 device.get_disk_size(),
861 (*device.capacity.read() * *device.sector_size.read() as u64) as usize
862 );
863 }
864
865 #[test_case]
866 fn test_virtio_block_device() {
867 let base_addr = 0x10001000; let device = VirtioBlockDevice::new(base_addr);
869
870 assert_eq!(device.get_disk_name(), "virtio-blk");
871 assert_eq!(
872 device.get_disk_size(),
873 (*device.capacity.read() * *device.sector_size.read() as u64) as usize
874 );
875
876 let sector_size = *device.sector_size.read();
878 let request = BlockIORequest {
879 request_type: BlockIORequestType::Read,
880 sector: 0,
881 sector_count: 1,
882 head: 0,
883 cylinder: 0,
884 buffer: vec![0; sector_size as usize],
885 };
886 device.enqueue_request(Box::new(request));
887
888 let results = device.process_requests();
889 assert_eq!(results.len(), 1);
890
891 let result = &results[0];
892 assert!(result.result.is_ok());
893
894 let buffer = &result.request.buffer;
896 assert_eq!(buffer.len(), sector_size as usize);
897
898 if buffer.len() >= 512 {
900 assert_eq!(buffer[510], 0x55);
902 assert_eq!(buffer[511], 0xAA);
903 }
904 }
905}