use core::mem::size_of; use core::slice; use core::sync::atomic::{fence, Ordering}; use super::*; use crate::transport::Transport; use bitflags::*; use volatile::Volatile; /// The mechanism for bulk data transport on virtio devices. /// /// Each device can have zero or more virtqueues. #[derive(Debug)] pub struct VirtQueue<'a, H: Hal> { /// DMA guard dma: DMA, /// Descriptor table desc: &'a mut [Descriptor], /// Available ring avail: &'a mut AvailRing, /// Used ring used: &'a mut UsedRing, /// The index of queue queue_idx: u32, /// The size of the queue. /// /// This is both the number of descriptors, and the number of slots in the available and used /// rings. queue_size: u16, /// The number of used queues. num_used: u16, /// The head desc index of the free list. free_head: u16, avail_idx: u16, last_used_idx: u16, } impl VirtQueue<'_, H> { /// Create a new VirtQueue. pub fn new(transport: &mut T, idx: usize, size: u16) -> Result { if transport.queue_used(idx as u32) { return Err(Error::AlreadyUsed); } if !size.is_power_of_two() || transport.max_queue_size() < size as u32 { return Err(Error::InvalidParam); } let layout = VirtQueueLayout::new(size); // Allocate contiguous pages. let dma = DMA::new(layout.size / PAGE_SIZE)?; transport.queue_set( idx as u32, size as u32, dma.paddr(), dma.paddr() + layout.avail_offset, dma.paddr() + layout.used_offset, ); let desc = unsafe { slice::from_raw_parts_mut(dma.vaddr() as *mut Descriptor, size as usize) }; let avail = unsafe { &mut *((dma.vaddr() + layout.avail_offset) as *mut AvailRing) }; let used = unsafe { &mut *((dma.vaddr() + layout.used_offset) as *mut UsedRing) }; // Link descriptors together. for i in 0..(size - 1) { desc[i as usize].next.write(i + 1); } Ok(VirtQueue { dma, desc, avail, used, queue_size: size, queue_idx: idx as u32, num_used: 0, free_head: 0, avail_idx: 0, last_used_idx: 0, }) } /// Add buffers to the virtqueue, return a token. /// /// Ref: linux virtio_ring.c virtqueue_add pub fn add(&mut self, inputs: &[&[u8]], outputs: &[&mut [u8]]) -> Result { if inputs.is_empty() && outputs.is_empty() { return Err(Error::InvalidParam); } if inputs.len() + outputs.len() + self.num_used as usize > self.queue_size as usize { return Err(Error::BufferTooSmall); } // allocate descriptors from free list let head = self.free_head; let mut last = self.free_head; for input in inputs.iter() { let desc = &mut self.desc[self.free_head as usize]; desc.set_buf::(input); desc.flags.write(DescFlags::NEXT); last = self.free_head; self.free_head = desc.next.read(); } for output in outputs.iter() { let desc = &mut self.desc[self.free_head as usize]; desc.set_buf::(output); desc.flags.write(DescFlags::NEXT | DescFlags::WRITE); last = self.free_head; self.free_head = desc.next.read(); } // set last_elem.next = NULL { let desc = &mut self.desc[last as usize]; let mut flags = desc.flags.read(); flags.remove(DescFlags::NEXT); desc.flags.write(flags); } self.num_used += (inputs.len() + outputs.len()) as u16; let avail_slot = self.avail_idx & (self.queue_size - 1); self.avail.ring[avail_slot as usize].write(head); // write barrier fence(Ordering::SeqCst); // increase head of avail ring self.avail_idx = self.avail_idx.wrapping_add(1); self.avail.idx.write(self.avail_idx); Ok(head) } /// Whether there is a used element that can pop. pub fn can_pop(&self) -> bool { self.last_used_idx != self.used.idx.read() } /// The number of free descriptors. pub fn available_desc(&self) -> usize { (self.queue_size - self.num_used) as usize } /// Recycle descriptors in the list specified by head. /// /// This will push all linked descriptors at the front of the free list. fn recycle_descriptors(&mut self, mut head: u16) { let origin_free_head = self.free_head; self.free_head = head; loop { let desc = &mut self.desc[head as usize]; let flags = desc.flags.read(); self.num_used -= 1; if flags.contains(DescFlags::NEXT) { head = desc.next.read(); } else { desc.next.write(origin_free_head); return; } } } /// Get a token from device used buffers, return (token, len). /// /// Ref: linux virtio_ring.c virtqueue_get_buf_ctx pub fn pop_used(&mut self) -> Result<(u16, u32)> { if !self.can_pop() { return Err(Error::NotReady); } // read barrier fence(Ordering::SeqCst); let last_used_slot = self.last_used_idx & (self.queue_size - 1); let index = self.used.ring[last_used_slot as usize].id.read() as u16; let len = self.used.ring[last_used_slot as usize].len.read(); self.recycle_descriptors(index); self.last_used_idx = self.last_used_idx.wrapping_add(1); Ok((index, len)) } /// Return size of the queue. pub fn size(&self) -> u16 { self.queue_size } } /// The inner layout of a VirtQueue. /// /// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout struct VirtQueueLayout { avail_offset: usize, used_offset: usize, size: usize, } impl VirtQueueLayout { fn new(queue_size: u16) -> Self { assert!( queue_size.is_power_of_two(), "queue size should be a power of 2" ); let queue_size = queue_size as usize; let desc = size_of::() * queue_size; let avail = size_of::() * (3 + queue_size); let used = size_of::() * 3 + size_of::() * queue_size; VirtQueueLayout { avail_offset: desc, used_offset: align_up(desc + avail), size: align_up(desc + avail) + align_up(used), } } } #[repr(C, align(16))] #[derive(Debug)] pub(crate) struct Descriptor { addr: Volatile, len: Volatile, flags: Volatile, next: Volatile, } impl Descriptor { fn set_buf(&mut self, buf: &[u8]) { self.addr .write(H::virt_to_phys(buf.as_ptr() as usize) as u64); self.len.write(buf.len() as u32); } } bitflags! { /// Descriptor flags struct DescFlags: u16 { const NEXT = 1; const WRITE = 2; const INDIRECT = 4; } } /// The driver uses the available ring to offer buffers to the device: /// each ring entry refers to the head of a descriptor chain. /// It is only written by the driver and read by the device. #[repr(C)] #[derive(Debug)] struct AvailRing { flags: Volatile, /// A driver MUST NOT decrement the idx. idx: Volatile, ring: [Volatile; 32], // actual size: queue_size used_event: Volatile, // unused } /// The used ring is where the device returns buffers once it is done with them: /// it is only written to by the device, and read by the driver. #[repr(C)] #[derive(Debug)] struct UsedRing { flags: Volatile, idx: Volatile, ring: [UsedElem; 32], // actual size: queue_size avail_event: Volatile, // unused } #[repr(C)] #[derive(Debug)] struct UsedElem { id: Volatile, len: Volatile, } #[cfg(test)] mod tests { use super::*; use crate::{hal::fake::FakeHal, transport::mmio::MODERN_VERSION}; use core::ptr::NonNull; #[test] fn invalid_queue_size() { let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4); let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap(); // Size not a power of 2. assert_eq!( VirtQueue::::new(&mut transport, 0, 3).unwrap_err(), Error::InvalidParam ); } #[test] fn queue_too_big() { let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4); let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap(); assert_eq!( VirtQueue::::new(&mut transport, 0, 5).unwrap_err(), Error::InvalidParam ); } #[test] fn queue_already_used() { let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4); let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap(); VirtQueue::::new(&mut transport, 0, 4).unwrap(); assert_eq!( VirtQueue::::new(&mut transport, 0, 4).unwrap_err(), Error::AlreadyUsed ); } #[test] fn add_empty() { let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4); let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap(); let mut queue = VirtQueue::::new(&mut transport, 0, 4).unwrap(); assert_eq!(queue.add(&[], &[]).unwrap_err(), Error::InvalidParam); } #[test] fn add_too_big() { let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4); let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap(); let mut queue = VirtQueue::::new(&mut transport, 0, 4).unwrap(); assert_eq!(queue.available_desc(), 4); assert_eq!( queue .add(&[&[], &[], &[]], &[&mut [], &mut []]) .unwrap_err(), Error::BufferTooSmall ); } #[test] fn add_buffers() { let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4); let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap(); let mut queue = VirtQueue::::new(&mut transport, 0, 4).unwrap(); assert_eq!(queue.size(), 4); assert_eq!(queue.available_desc(), 4); // Add a buffer chain consisting of two device-readable parts followed by two // device-writable parts. let token = queue .add(&[&[1, 2], &[3]], &[&mut [0, 0], &mut [0]]) .unwrap(); assert_eq!(queue.available_desc(), 0); assert!(!queue.can_pop()); let first_descriptor_index = queue.avail.ring[0].read(); assert_eq!(first_descriptor_index, token); assert_eq!(queue.desc[first_descriptor_index as usize].len.read(), 2); assert_eq!( queue.desc[first_descriptor_index as usize].flags.read(), DescFlags::NEXT ); let second_descriptor_index = queue.desc[first_descriptor_index as usize].next.read(); assert_eq!(queue.desc[second_descriptor_index as usize].len.read(), 1); assert_eq!( queue.desc[second_descriptor_index as usize].flags.read(), DescFlags::NEXT ); let third_descriptor_index = queue.desc[second_descriptor_index as usize].next.read(); assert_eq!(queue.desc[third_descriptor_index as usize].len.read(), 2); assert_eq!( queue.desc[third_descriptor_index as usize].flags.read(), DescFlags::NEXT | DescFlags::WRITE ); let fourth_descriptor_index = queue.desc[third_descriptor_index as usize].next.read(); assert_eq!(queue.desc[fourth_descriptor_index as usize].len.read(), 1); assert_eq!( queue.desc[fourth_descriptor_index as usize].flags.read(), DescFlags::WRITE ); } }