kernel/drivers/virtio/
queue.rs

1//! Virtio Queue module.
2//!
3//! This module provides the implementation of the Virtio Queue.
4//! It includes the data structures and methods to manage the Virtio Queue.
5//!
6
7use alloc::{alloc::alloc_zeroed, vec::Vec};
8use core::{
9    alloc::Layout,
10    mem::{self},
11    sync::atomic::compiler_fence,
12};
13
14// struct RawVirtQueue {
15//     pub desc: [Descriptor; 0], /* Flexible array member */
16//     pub avail: RawAvailableRing,
17//     pub padding: [u8; 0], /* Padding to align the used ring */
18//     pub used: RawUsedRing,
19// }
20
21/// VirtQueue structure
22///
23/// This structure represents the wrapper of the virtqueue.
24/// It contains the descriptor table, available ring, and used ring.
25///
26/// # Fields
27///
28/// * `index`: The ID of the virtqueue.
29/// * `desc`: A mutable slice of descriptors.
30/// * `avail`: The available ring.
31/// * `used`: The used ring.
32/// * `free_head`: The index of the next free descriptor.
33/// * `last_used_idx`: The index of the last used descriptor.
34/// * `ptr`: A raw pointer to the start of the virtqueue memory.
35/// * `layout`: The layout of the virtqueue memory.
36pub struct VirtQueue<'a> {
37    pub desc: &'a mut [Descriptor],
38    pub avail: AvailableRing<'a>,
39    pub used: UsedRing<'a>,
40    pub free_descriptors: Vec<usize>,
41    pub last_used_idx: u16,
42    ptr: *mut u8,
43    layout: Layout,
44}
45
46unsafe impl<'a> Send for VirtQueue<'a> {}
47unsafe impl<'a> Sync for VirtQueue<'a> {}
48
49impl<'a> VirtQueue<'a> {
50    pub fn new(queue_size: usize) -> Self {
51        /* Calculate the size of each ring */
52        let desc_size = queue_size * mem::size_of::<Descriptor>();
53        let avail_size = mem::size_of::<RawAvailableRing>() + queue_size * mem::size_of::<u16>();
54        let used_size =
55            mem::size_of::<RawUsedRing>() + queue_size * mem::size_of::<RawUsedRingEntry>();
56
57        /* Floor the sum of desc_size, avail_size to the nearest multiple of 4 */
58        let align_size = (desc_size + avail_size + 3) & !3;
59        /* Calculate the size of the padding for the used ring */
60        let padding_size = align_size - (desc_size + avail_size);
61
62        /* Make layout for the virtqueue */
63        /* The size is the sum of the sizes of the descriptor table, available ring, and used ring */
64        let layout = Layout::from_size_align(
65            desc_size + avail_size + padding_size + used_size,
66            mem::align_of::<Descriptor>(),
67        )
68        .unwrap();
69
70        /* Allocate memory for the virtqueue */
71        let ptr = unsafe { alloc_zeroed(layout) };
72        if ptr.is_null() {
73            panic!("Memory allocation failed");
74        }
75
76        /* Create the descriptor table */
77        let desc_ptr = ptr as *mut Descriptor;
78        let desc = unsafe { core::slice::from_raw_parts_mut(desc_ptr, queue_size) };
79
80        /* Create the available ring */
81        let avail_ptr = unsafe { desc_ptr.add(queue_size) as *mut RawAvailableRing };
82        let avail = unsafe { AvailableRing::new(queue_size, avail_ptr) };
83
84        /* Create the used ring */
85        let used_ptr = unsafe {
86            (avail_ptr as *mut u8).add(
87                mem::size_of::<RawAvailableRing>()
88                    + queue_size * mem::size_of::<u16>()
89                    + padding_size,
90            ) as *mut RawUsedRing
91        };
92        let used = unsafe { UsedRing::new(queue_size, used_ptr) };
93
94        /* Create the virtqueue */
95        let mut free_descriptors = Vec::new();
96        for i in 0..queue_size {
97            free_descriptors.push(i);
98        }
99        let last_used_idx = 0;
100        Self {
101            desc,
102            avail,
103            used,
104            free_descriptors,
105            last_used_idx,
106            ptr,
107            layout,
108        }
109    }
110
111    /// Initialize the virtqueue
112    ///
113    /// This function initializes the descriptor table, available ring, and used ring.
114    /// It sets the next pointer of each descriptor to point to the next descriptor in the table.
115    ///
116    pub fn init(&mut self) {
117        // Initialize the descriptor table
118        for i in 0..self.desc.len() {
119            self.desc[i].addr = 0;
120            self.desc[i].len = 0;
121            self.desc[i].flags = 0;
122            self.desc[i].next = (i as u16 + 1) % self.desc.len() as u16;
123        }
124
125        *(self.avail.flags) = 0;
126        *(self.avail.idx) = 0;
127        *(self.avail.used_event) = 0;
128        *(self.used.flags) = 0;
129        *(self.used.idx) = 0;
130        *(self.used.avail_event) = 0;
131    }
132
133    /// Get the raw pointer to the virtqueue
134    ///
135    /// This function returns a raw pointer to the start of the virtqueue memory.
136    /// It can be used to access the memory directly.
137    ///
138    /// # Returns
139    ///
140    /// *const u8: A raw pointer to the start of the virtqueue memory.
141    pub fn get_raw_ptr(&self) -> *const u8 {
142        self.desc.as_ptr() as *const u8
143    }
144
145    /// Get the size of the raw virtqueue
146    ///
147    /// This function returns the size of the virtqueue in bytes.
148    /// It is calculated as the sum of the sizes of the descriptor table, available ring, and used ring.
149    ///
150    /// # Returns
151    ///
152    /// usize: The size of the virtqueue in bytes.
153    pub fn get_raw_size(&self) -> usize {
154        let desc_size = self.desc.len() * mem::size_of::<Descriptor>();
155        let avail_size =
156            mem::size_of::<RawAvailableRing>() + self.desc.len() * mem::size_of::<u16>();
157        let used_size =
158            mem::size_of::<RawUsedRing>() + self.desc.len() * mem::size_of::<RawUsedRingEntry>();
159        let align_size = (desc_size + avail_size + 3) & !3;
160        let padding_size = align_size - (desc_size + avail_size);
161        desc_size + avail_size + used_size + padding_size
162    }
163
164    pub fn get_queue_size(&self) -> usize {
165        self.desc.len()
166    }
167
168    /// Allocate a descriptor
169    ///
170    /// This function allocates a descriptor from the free list.
171    ///
172    /// # Returns
173    ///
174    /// Option<usize>: The index of the allocated descriptor, or None if no descriptors are available.
175    ///
176    pub fn alloc_desc(&mut self) -> Option<usize> {
177        let desc = self.free_descriptors.pop();
178        if let Some(desc_idx) = desc {
179            self.desc[desc_idx].next = 0;
180            self.desc[desc_idx].addr = 0;
181            self.desc[desc_idx].len = 0;
182            self.desc[desc_idx].flags = 0;
183            Some(desc_idx)
184        } else {
185            None
186        }
187    }
188
189    /// Free a descriptor
190    ///
191    /// This function frees a descriptor and adds it back to the free list.
192    ///
193    /// # Arguments
194    ///
195    /// * `desc_idx` - The index of the descriptor to free.
196    ///
197    pub fn free_desc(&mut self, desc_idx: usize) {
198        if desc_idx < self.desc.len() {
199            self.desc[desc_idx].next = 0;
200            self.free_descriptors.push(desc_idx);
201        } else {
202            panic!("Invalid descriptor index");
203        }
204    }
205
206    /// Allocate a chain of descriptors
207    ///
208    /// This function allocates a chain of descriptors of the specified length.
209    ///
210    /// # Arguments
211    ///
212    /// * `length` - The length of the chain to allocate.
213    ///
214    /// # Returns
215    ///
216    /// Option<usize>: The index of the first descriptor in the chain, or None if no descriptors are available.
217    ///
218    pub fn alloc_desc_chain(&mut self, length: usize) -> Option<usize> {
219        let desc_idx = self.alloc_desc();
220        if desc_idx.is_none() {
221            return None;
222        }
223        let desc_idx = desc_idx.unwrap();
224        let mut prev_idx = desc_idx;
225
226        for _ in 1..length {
227            let next_idx = self.alloc_desc();
228            if next_idx.is_none() {
229                self.free_desc_chain(desc_idx);
230                return None;
231            }
232            let next_idx = next_idx.unwrap();
233            self.desc[prev_idx].next = next_idx as u16;
234            self.desc[prev_idx].flags = DescriptorFlag::Next as u16;
235            prev_idx = next_idx;
236        }
237
238        self.desc[prev_idx].next = 0;
239        Some(desc_idx)
240    }
241
242    /// Free a chain of descriptors
243    ///
244    /// This function frees a chain of descriptors starting from the given index.
245    ///
246    /// # Arguments
247    ///
248    /// * `desc_idx` - The index of the first descriptor in the chain.
249    ///
250    pub fn free_desc_chain(&mut self, desc_idx: usize) {
251        let mut idx = desc_idx;
252        loop {
253            if idx >= self.desc.len() {
254                break;
255            }
256            let next = self.desc[idx].next;
257            let flags = self.desc[idx].flags;
258
259            compiler_fence(core::sync::atomic::Ordering::SeqCst);
260
261            self.free_desc(idx);
262
263            if !DescriptorFlag::Next.is_set(flags) {
264                break;
265            }
266            idx = next as usize;
267        }
268    }
269
270    /// Check if the virtqueue is busy
271    ///
272    /// This function checks if the virtqueue is busy by comparing the last used index with the current index.
273    ///
274    /// # Returns
275    ///
276    /// bool: True if the virtqueue is busy, false otherwise.
277    pub fn is_busy(&self) -> bool {
278        // A memory fence is needed to ensure that we see the latest value of `used.idx`
279        // written by the device.
280        core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
281        // Volatile read to ensure we get the latest value
282        let used_idx = unsafe { core::ptr::read_volatile(self.used.idx) };
283        self.last_used_idx == used_idx
284    }
285
286    /// Push a descriptor index to the available ring
287    ///
288    /// This function pushes a descriptor index to the available ring.
289    ///
290    /// # Arguments
291    ///
292    /// * `desc_idx` - The index of the descriptor to push.
293    /// If you want to push a chain of descriptors, you should pass the first descriptor index.
294    ///
295    /// # Returns
296    ///
297    /// Result<(), &'static str>: Ok if the push was successful, or an error message if it failed.
298    pub fn push(&mut self, desc_idx: usize) -> Result<(), &'static str> {
299        if desc_idx >= self.desc.len() {
300            return Err("Invalid descriptor index");
301        }
302
303        // Ensure all descriptor writes are visible before publishing the descriptor index.
304        // Using the architecture-provided I/O barrier is conservative but safe for virtio.
305        crate::arch::io_mb();
306
307        let ring_ptr =
308            &mut self.avail.ring[(*self.avail.idx as usize) % self.avail.size] as *mut u16;
309
310        unsafe {
311            core::ptr::write_volatile(ring_ptr, desc_idx as u16);
312        }
313
314        // Ensure the ring entry is visible before updating idx.
315        crate::arch::io_mb();
316
317        // *self.avail.idx = (*self.avail.idx).wrapping_add(1);
318
319        let new_idx = self.avail.idx.wrapping_add(1);
320        unsafe {
321            core::ptr::write_volatile(self.avail.idx, new_idx);
322        }
323
324        // Ensure idx is visible before any subsequent device notification.
325        crate::arch::io_mb();
326
327        Ok(())
328    }
329
330    /// Pop a buffer from the used ring and return descriptor index + used length.
331    ///
332    /// This function retrieves a buffer from the used ring when the device has finished processing it.
333    /// The caller is responsible for freeing the descriptor when it's done with the buffer.
334    ///
335    /// # Returns
336    ///
337    /// Option<(usize, u32)>: The descriptor index and the used length, or None if no descriptors are available.
338    pub fn pop_used(&mut self) -> Option<(usize, u32)> {
339        // A memory fence is needed to ensure that we see the latest value of `used.idx`
340        // written by the device.
341        core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
342
343        let used_idx = unsafe { core::ptr::read_volatile(self.used.idx) };
344
345        // Check if there are any used buffers available
346        if self.last_used_idx == used_idx {
347            return None;
348        }
349
350        // Calculate the index in the used ring
351        let used_ring_idx = self.last_used_idx as usize % self.desc.len();
352
353        // Retrieve the descriptor index from the used ring
354        let used_entry_ptr = self.used.ring.as_ptr().wrapping_add(used_ring_idx);
355        let used_entry = unsafe { core::ptr::read_volatile(used_entry_ptr) };
356        let desc_idx = used_entry.id as usize;
357
358        // Update the last used index
359        self.last_used_idx = self.last_used_idx.wrapping_add(1);
360
361        Some((desc_idx, used_entry.len))
362    }
363
364    /// Pop a buffer from the used ring
365    ///
366    /// This function retrieves a buffer from the used ring when the device has finished processing it.
367    /// The caller is responsible for freeing the descriptor when it's done with the buffer.
368    ///
369    /// # Returns
370    ///
371    /// Option<usize>: The index of the descriptor that was used, or None if no descriptors are available.
372    ///
373    pub fn pop(&mut self) -> Option<usize> {
374        self.pop_used().map(|(desc_idx, _)| desc_idx)
375    }
376}
377
378impl<'a> Drop for VirtQueue<'a> {
379    fn drop(&mut self) {
380        unsafe {
381            alloc::alloc::dealloc(self.ptr, self.layout);
382        }
383    }
384}
385
386/// Descriptor structure
387///
388/// This structure represents a descriptor in the descriptor table.
389/// It contains the address, length, flags, and next pointer.
390/// This structure is located in the physical memory directly.
391#[repr(C)]
392pub struct Descriptor {
393    pub addr: u64,
394    pub len: u32,
395    pub flags: u16,
396    pub next: u16,
397}
398
399/// Descriptor flags
400///
401/// This enum represents the flags that can be set for a descriptor.
402/// It includes flags for indicating the next descriptor, write operation, and indirect descriptor.
403#[derive(Clone, Copy)]
404pub enum DescriptorFlag {
405    Next = 0x1,
406    Write = 0x2,
407    Indirect = 0x4,
408}
409
410impl DescriptorFlag {
411    /// Check if the flag is set
412    ///
413    /// This method checks if the specified flag is set in the given flags.
414    ///
415    /// # Arguments
416    ///
417    /// * `flags` - The flags to check.
418    ///
419    /// # Returns
420    ///
421    /// Returns true if the flag is set, false otherwise.
422    ///
423    pub fn is_set(&self, flags: u16) -> bool {
424        (flags & *self as u16) != 0
425    }
426
427    /// Set the flag
428    ///
429    /// This method sets the specified flag in the given flags.
430    ///
431    /// # Arguments
432    ///
433    /// * `flags` - A mutable reference to the flags to modify.
434    ///
435    pub fn set(&self, flags: &mut u16) {
436        (*flags) |= *self as u16;
437    }
438
439    /// Clear the flag
440    ///
441    /// This method clears the specified flag in the given flags.
442    ///
443    /// # Arguments
444    ///
445    /// * `flags` - A mutable reference to the flags to modify.
446    ///
447    pub fn clear(&self, flags: &mut u16) {
448        (*flags) &= !(*self as u16);
449    }
450
451    /// Toggle the flag
452    ///
453    /// This method toggles the specified flag in the given flags.
454    ///
455    /// # Arguments
456    ///
457    /// * `flags` - A mutable reference to the flags to modify.
458    ///
459    pub fn toggle(&self, flags: &mut u16) {
460        (*flags) ^= *self as u16;
461    }
462}
463
464/// Raw available ring structure
465///
466/// This structure represents the raw available ring.
467/// It contains the flags, index, ring buffer, and used event.
468/// This structure is located in the physical memory directly.
469#[repr(C, align(2))]
470pub struct RawAvailableRing {
471    flags: u16,
472    idx: u16,
473    ring: [u16; 0],  /* Flexible array member */
474    used_event: u16, /* Locate after ring */
475}
476
477/// Available ring structure
478///
479/// This structure is wrapped around the `RawAvailableRing` structure.
480/// It provides a safe interface to access the available ring entries.
481#[repr(C)]
482pub struct AvailableRing<'a> {
483    size: usize,
484    pub flags: &'a mut u16,
485    pub idx: &'a mut u16,
486    pub ring: &'a mut [u16],
487    pub used_event: &'a mut u16,
488}
489
490impl<'a> AvailableRing<'a> {
491    /// Create a new `AvailableRing` instance
492    ///
493    /// This function creates a new `AvailableRing` instance from a raw pointer to a `RawAvailableRing`.
494    ///
495    /// # Safety
496    ///
497    /// This function is unsafe because it dereferences raw pointers and assumes that the memory layout is correct.
498    /// The caller must ensure that the pointer is valid and points to a properly initialized `RawAvailableRing`.
499    ///
500    /// # Arguments
501    ///
502    /// * `size` - The size of the ring.
503    /// * `ptr` - A raw pointer to a `RawAvailableRing`.
504    ///
505    /// # Returns
506    ///
507    /// `AvailableRing` - A new `AvailableRing` instance.
508    pub unsafe fn new(size: usize, ptr: *mut RawAvailableRing) -> Self {
509        let flags = unsafe { &mut (*ptr).flags };
510        let idx = unsafe { &mut (*ptr).idx };
511        let ring = unsafe { core::slice::from_raw_parts_mut((*ptr).ring.as_mut_ptr(), size) };
512        let used_event = unsafe { &mut *((*ptr).ring.as_mut_ptr().add(size) as *mut u16) };
513
514        Self {
515            size,
516            flags,
517            idx,
518            ring,
519            used_event,
520        }
521    }
522}
523
524/// Raw used ring structure
525///
526/// This structure represents the raw used ring.
527/// It contains the flags, index, ring buffer, and available event.
528/// This structure is located in the physical memory directly.
529#[repr(C, align(4))]
530pub struct RawUsedRing {
531    flags: u16,
532    idx: u16,
533    ring: [RawUsedRingEntry; 0], /* Flexible array member */
534    avail_event: u16,
535}
536
537/// Raw used ring entry structure
538///
539/// This structure represents a single entry in the used ring.
540/// It contains the ID and length of the used buffer.
541///
542/// This structure is located in the physical memory directly.
543#[derive(Clone, Copy)]
544#[repr(C)]
545pub struct RawUsedRingEntry {
546    pub id: u32,
547    pub len: u32,
548}
549
550impl Default for RawUsedRingEntry {
551    fn default() -> Self {
552        Self { id: 0, len: 0 }
553    }
554}
555
556/// Used ring structure
557///
558/// This structure is wrapped around the `RawUsedRing` structure.
559/// It provides a safe interface to access the used ring entries.
560pub struct UsedRing<'a> {
561    pub flags: &'a mut u16,
562    pub idx: &'a mut u16,
563    pub ring: &'a mut [RawUsedRingEntry],
564    pub avail_event: &'a mut u16,
565}
566
567impl<'a> UsedRing<'a> {
568    /// Create a new `UsedRing` instance
569    ///
570    /// This function creates a new `UsedRing` instance from a raw pointer to a `RawUsedRing`.
571    ///
572    /// # Safety
573    ///
574    /// This function is unsafe because it dereferences raw pointers and assumes that the memory layout is correct.
575    /// The caller must ensure that the pointer is valid and points to a properly initialized `RawUsedRing`.
576    ///
577    /// # Arguments
578    ///
579    /// * `size` - The size of the ring.
580    /// * `ptr` - A raw pointer to a `RawUsedRing`.
581    ///
582    /// # Returns
583    ///
584    /// `UsedRing` - A new `UsedRing` instance.
585    pub unsafe fn new(size: usize, ptr: *mut RawUsedRing) -> Self {
586        let flags = unsafe { &mut (*ptr).flags };
587        let idx = unsafe { &mut (*ptr).idx };
588        let ring_ptr = unsafe { (*ptr).ring.as_mut_ptr() };
589        let ring = unsafe { core::slice::from_raw_parts_mut(ring_ptr, size) };
590        let avail_event = unsafe { &mut *((*ptr).ring.as_mut_ptr().add(size) as *mut u16) };
591
592        Self {
593            flags,
594            idx,
595            ring,
596            avail_event,
597        }
598    }
599}
600
601#[cfg(test)]
602mod tests {
603    use super::*;
604
605    #[test_case]
606    fn test_used_ring_flags_update() {
607        let mut raw = RawUsedRing {
608            flags: 0,
609            idx: 0,
610            ring: [RawUsedRingEntry { id: 0, len: 0 }; 0],
611            avail_event: 0,
612        };
613
614        let used_ring = unsafe { UsedRing::new(0, &mut raw) };
615
616        // Verify initial values
617        assert_eq!(raw.flags, 0);
618        assert_eq!(*used_ring.flags, 0);
619
620        // Modify flags
621        *used_ring.flags = 42;
622
623        // Verify the modification is reflected
624        assert_eq!(raw.flags, 42);
625        assert_eq!(*used_ring.flags, 42);
626    }
627
628    #[test_case]
629    fn test_raw_used_ring_direct_access() {
630        let queue_size = 2;
631        let mut virtqueue = VirtQueue::new(queue_size);
632        virtqueue.init();
633
634        // 1. Write values to UsedRing via VirtQueue
635        *virtqueue.used.flags = 42;
636        *virtqueue.used.idx = 1;
637        for i in 0..queue_size {
638            virtqueue.used.ring[i].id = i as u32;
639            virtqueue.used.ring[i].len = 456;
640        }
641
642        // 2. Get a pointer to RawUsedRing
643        let raw_used_ptr = virtqueue.used.flags as *mut u16 as *mut RawUsedRing;
644
645        // 3. Directly access RawUsedRing and verify values
646        let raw_used = unsafe { &*raw_used_ptr };
647        assert_eq!(raw_used.flags, 42, "flags mismatch");
648        assert_eq!(raw_used.idx, 1, "idx mismatch");
649
650        // 4. Verify the contents of the ring
651        unsafe {
652            let used_ring = &mut *virtqueue.used.ring.as_mut_ptr();
653            let ring = core::slice::from_raw_parts_mut(used_ring, queue_size);
654
655            for i in 0..queue_size {
656                assert_eq!(ring[i].id, i as u32, "ring[{}].id mismatch", i);
657                assert_eq!(ring[i].len, 456, "ring[{}].len mismatch", i);
658            }
659        }
660    }
661
662    #[test_case]
663    fn test_raw_available_ring_direct_access() {
664        let queue_size = 16;
665        let mut virtqueue = VirtQueue::new(queue_size);
666        virtqueue.init();
667
668        // 1. Write values to AvailableRing via VirtQueue
669        *virtqueue.avail.flags = 24;
670        *virtqueue.avail.idx = 1;
671        for i in 0..queue_size {
672            virtqueue.avail.ring[i] = i as u16;
673        }
674
675        // 2. Get a pointer to RawAvailableRing
676        let raw_avail_ptr = virtqueue.avail.flags as *mut u16 as *mut RawAvailableRing;
677
678        // 3. Directly access RawAvailableRing and verify values
679        let raw_avail = unsafe { &*raw_avail_ptr };
680        assert_eq!(raw_avail.flags, 24, "flags mismatch");
681        assert_eq!(raw_avail.idx, 1, "idx mismatch");
682
683        // 4. Verify the contents of the ring
684        unsafe {
685            let avail_ring = &mut *virtqueue.avail.ring.as_mut_ptr();
686            let ring = core::slice::from_raw_parts_mut(avail_ring, queue_size);
687
688            for i in 0..queue_size {
689                assert_eq!(ring[i], i as u16, "ring[{}] mismatch", i);
690            }
691        }
692    }
693
694    #[test_case]
695    fn test_initialize_virtqueue() {
696        let queue_size = 2;
697        let mut virtqueue = VirtQueue::new(queue_size);
698        virtqueue.init();
699
700        let total = 68;
701
702        assert_eq!(virtqueue.desc.len(), queue_size);
703        assert_eq!(*virtqueue.avail.idx, 0);
704        assert_eq!(*virtqueue.used.idx, 0);
705
706        // Check the size of the allocated memory
707        let allocated_size = virtqueue.get_raw_size();
708        assert_eq!(allocated_size, total);
709
710        // Check the next index of each descriptor
711        for i in 0..queue_size {
712            assert_eq!(virtqueue.desc[i].next, (i as u16 + 1) % queue_size as u16);
713            assert_eq!(virtqueue.avail.ring[i], 0);
714            assert_eq!(virtqueue.used.ring[i].len, 0);
715        }
716    }
717
718    #[test_case]
719    fn test_alloc_free_desc() {
720        let queue_size = 1;
721        let mut virtqueue = VirtQueue::new(queue_size);
722        virtqueue.init();
723
724        // Allocate a descriptor
725        let desc_idx = virtqueue.alloc_desc().unwrap();
726        assert_eq!(desc_idx, 0);
727
728        // Free the descriptor
729        virtqueue.free_desc(desc_idx);
730        assert_eq!(virtqueue.free_descriptors.len(), 1);
731    }
732
733    #[test_case]
734    fn test_alloc_free_desc_chain() {
735        let queue_size = 2;
736        let mut virtqueue = VirtQueue::new(queue_size);
737        virtqueue.init();
738
739        // Allocate a chain of descriptors
740        let desc_idx = virtqueue.alloc_desc_chain(2).unwrap();
741
742        // Free the chain of descriptors
743        virtqueue.free_desc_chain(desc_idx);
744        assert_eq!(virtqueue.free_descriptors.len(), 2);
745    }
746
747    #[test_case]
748    fn test_alloc_desc_chain_too_long() {
749        let queue_size = 2;
750        let mut virtqueue = VirtQueue::new(queue_size);
751        virtqueue.init();
752
753        // Allocate a chain of descriptors that is too long
754        let desc_idx = virtqueue.alloc_desc_chain(3);
755        assert!(desc_idx.is_none());
756    }
757
758    #[test_case]
759    fn test_push_pop() {
760        let queue_size = 2;
761        let mut virtqueue = VirtQueue::new(queue_size);
762        virtqueue.init();
763
764        // 1. Allocate and configure a descriptor
765        let desc_idx = virtqueue.alloc_desc().unwrap();
766        virtqueue.desc[desc_idx].addr = 0x1000;
767        virtqueue.desc[desc_idx].len = 100;
768
769        // 2. Push to the queue
770        assert!(virtqueue.push(desc_idx).is_ok());
771
772        // 3. Simulate device processing the buffer
773        *virtqueue.used.idx = 1;
774        virtqueue.used.ring[0].id = desc_idx as u32;
775
776        // 4. Pop the buffer
777        let popped = virtqueue.pop();
778        assert!(popped.is_some());
779        assert_eq!(popped.unwrap(), desc_idx);
780
781        // 5. Verify no more buffers are available
782        assert!(virtqueue.pop().is_none());
783    }
784
785    #[test_case]
786    fn test_push_pop_chain() {
787        let queue_size = 4;
788        let mut virtqueue = VirtQueue::new(queue_size);
789        virtqueue.init();
790
791        // 1. Allocate a chain of descriptors
792        let chain_len = 3;
793        let desc_idx = virtqueue.alloc_desc_chain(chain_len).unwrap();
794
795        // 2. Configure the descriptors in the chain
796        let mut current_idx = desc_idx;
797        for i in 0..chain_len {
798            virtqueue.desc[current_idx].addr = 0x1000 + (i * 0x100) as u64;
799            virtqueue.desc[current_idx].len = 100;
800
801            // Set appropriate flags (except for the last one)
802            if i < chain_len - 1 {
803                DescriptorFlag::Next.set(&mut virtqueue.desc[current_idx].flags);
804                current_idx = virtqueue.desc[current_idx].next as usize;
805            }
806        }
807
808        // 3. Push the chain to the queue
809        assert!(virtqueue.push(desc_idx).is_ok());
810
811        // 4. Simulate device processing the chain
812        *virtqueue.used.idx = 1;
813        virtqueue.used.ring[0].id = desc_idx as u32;
814        virtqueue.used.ring[0].len = 300; // Total bytes processed (100 per descriptor)
815
816        // 5. Pop the buffer
817        let popped = virtqueue.pop();
818        assert!(popped.is_some());
819        assert_eq!(popped.unwrap(), desc_idx);
820
821        // 6. Verify the chain is intact
822        let mut current_idx = desc_idx;
823        for i in 0..chain_len {
824            // Check each descriptor in the chain
825            assert_eq!(
826                virtqueue.desc[current_idx].addr,
827                0x1000 + (i * 0x100) as u64
828            );
829            assert_eq!(virtqueue.desc[current_idx].len, 100);
830
831            if i < chain_len - 1 {
832                assert!(DescriptorFlag::Next.is_set(virtqueue.desc[current_idx].flags));
833                current_idx = virtqueue.desc[current_idx].next as usize;
834            } else {
835                // Last descriptor should not have NEXT flag
836                assert!(!DescriptorFlag::Next.is_set(virtqueue.desc[current_idx].flags));
837            }
838        }
839
840        // 7. Free the chain after processing
841        virtqueue.free_desc_chain(desc_idx);
842        assert_eq!(virtqueue.free_descriptors.len(), queue_size);
843
844        // 8. Verify no more buffers are available
845        assert!(virtqueue.pop().is_none());
846    }
847}