queue.rs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. use core::mem::size_of;
  2. use core::slice;
  3. use core::sync::atomic::{fence, Ordering};
  4. use super::*;
  5. use crate::transport::Transport;
  6. use bitflags::*;
  7. use volatile::Volatile;
  8. /// The mechanism for bulk data transport on virtio devices.
  9. ///
  10. /// Each device can have zero or more virtqueues.
  11. #[derive(Debug)]
  12. pub struct VirtQueue<'a, H: Hal> {
  13. /// DMA guard
  14. dma: DMA<H>,
  15. /// Descriptor table
  16. desc: &'a mut [Descriptor],
  17. /// Available ring
  18. avail: &'a mut AvailRing,
  19. /// Used ring
  20. used: &'a mut UsedRing,
  21. /// The index of queue
  22. queue_idx: u32,
  23. /// The size of the queue.
  24. ///
  25. /// This is both the number of descriptors, and the number of slots in the available and used
  26. /// rings.
  27. queue_size: u16,
  28. /// The number of used queues.
  29. num_used: u16,
  30. /// The head desc index of the free list.
  31. free_head: u16,
  32. avail_idx: u16,
  33. last_used_idx: u16,
  34. }
  35. impl<H: Hal> VirtQueue<'_, H> {
  36. /// Create a new VirtQueue.
  37. pub fn new<T: Transport>(transport: &mut T, idx: usize, size: u16) -> Result<Self> {
  38. if transport.queue_used(idx as u32) {
  39. return Err(Error::AlreadyUsed);
  40. }
  41. if !size.is_power_of_two() || transport.max_queue_size() < size as u32 {
  42. return Err(Error::InvalidParam);
  43. }
  44. let layout = VirtQueueLayout::new(size);
  45. // Allocate contiguous pages.
  46. let dma = DMA::new(layout.size / PAGE_SIZE)?;
  47. transport.queue_set(
  48. idx as u32,
  49. size as u32,
  50. dma.paddr(),
  51. dma.paddr() + layout.avail_offset,
  52. dma.paddr() + layout.used_offset,
  53. );
  54. let desc =
  55. unsafe { slice::from_raw_parts_mut(dma.vaddr() as *mut Descriptor, size as usize) };
  56. let avail = unsafe { &mut *((dma.vaddr() + layout.avail_offset) as *mut AvailRing) };
  57. let used = unsafe { &mut *((dma.vaddr() + layout.used_offset) as *mut UsedRing) };
  58. // Link descriptors together.
  59. for i in 0..(size - 1) {
  60. desc[i as usize].next.write(i + 1);
  61. }
  62. Ok(VirtQueue {
  63. dma,
  64. desc,
  65. avail,
  66. used,
  67. queue_size: size,
  68. queue_idx: idx as u32,
  69. num_used: 0,
  70. free_head: 0,
  71. avail_idx: 0,
  72. last_used_idx: 0,
  73. })
  74. }
  75. /// Add buffers to the virtqueue, return a token.
  76. ///
  77. /// Ref: linux virtio_ring.c virtqueue_add
  78. pub fn add(&mut self, inputs: &[&[u8]], outputs: &[&mut [u8]]) -> Result<u16> {
  79. if inputs.is_empty() && outputs.is_empty() {
  80. return Err(Error::InvalidParam);
  81. }
  82. if inputs.len() + outputs.len() + self.num_used as usize > self.queue_size as usize {
  83. return Err(Error::BufferTooSmall);
  84. }
  85. // allocate descriptors from free list
  86. let head = self.free_head;
  87. let mut last = self.free_head;
  88. for input in inputs.iter() {
  89. let desc = &mut self.desc[self.free_head as usize];
  90. desc.set_buf::<H>(input);
  91. desc.flags.write(DescFlags::NEXT);
  92. last = self.free_head;
  93. self.free_head = desc.next.read();
  94. }
  95. for output in outputs.iter() {
  96. let desc = &mut self.desc[self.free_head as usize];
  97. desc.set_buf::<H>(output);
  98. desc.flags.write(DescFlags::NEXT | DescFlags::WRITE);
  99. last = self.free_head;
  100. self.free_head = desc.next.read();
  101. }
  102. // set last_elem.next = NULL
  103. {
  104. let desc = &mut self.desc[last as usize];
  105. let mut flags = desc.flags.read();
  106. flags.remove(DescFlags::NEXT);
  107. desc.flags.write(flags);
  108. }
  109. self.num_used += (inputs.len() + outputs.len()) as u16;
  110. let avail_slot = self.avail_idx & (self.queue_size - 1);
  111. self.avail.ring[avail_slot as usize].write(head);
  112. // write barrier
  113. fence(Ordering::SeqCst);
  114. // increase head of avail ring
  115. self.avail_idx = self.avail_idx.wrapping_add(1);
  116. self.avail.idx.write(self.avail_idx);
  117. Ok(head)
  118. }
  119. /// Whether there is a used element that can pop.
  120. pub fn can_pop(&self) -> bool {
  121. self.last_used_idx != self.used.idx.read()
  122. }
  123. /// The number of free descriptors.
  124. pub fn available_desc(&self) -> usize {
  125. (self.queue_size - self.num_used) as usize
  126. }
  127. /// Recycle descriptors in the list specified by head.
  128. ///
  129. /// This will push all linked descriptors at the front of the free list.
  130. fn recycle_descriptors(&mut self, mut head: u16) {
  131. let origin_free_head = self.free_head;
  132. self.free_head = head;
  133. loop {
  134. let desc = &mut self.desc[head as usize];
  135. let flags = desc.flags.read();
  136. self.num_used -= 1;
  137. if flags.contains(DescFlags::NEXT) {
  138. head = desc.next.read();
  139. } else {
  140. desc.next.write(origin_free_head);
  141. return;
  142. }
  143. }
  144. }
  145. /// Get a token from device used buffers, return (token, len).
  146. ///
  147. /// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
  148. pub fn pop_used(&mut self) -> Result<(u16, u32)> {
  149. if !self.can_pop() {
  150. return Err(Error::NotReady);
  151. }
  152. // read barrier
  153. fence(Ordering::SeqCst);
  154. let last_used_slot = self.last_used_idx & (self.queue_size - 1);
  155. let index = self.used.ring[last_used_slot as usize].id.read() as u16;
  156. let len = self.used.ring[last_used_slot as usize].len.read();
  157. self.recycle_descriptors(index);
  158. self.last_used_idx = self.last_used_idx.wrapping_add(1);
  159. Ok((index, len))
  160. }
  161. /// Return size of the queue.
  162. pub fn size(&self) -> u16 {
  163. self.queue_size
  164. }
  165. }
  166. /// The inner layout of a VirtQueue.
  167. ///
  168. /// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout
  169. struct VirtQueueLayout {
  170. avail_offset: usize,
  171. used_offset: usize,
  172. size: usize,
  173. }
  174. impl VirtQueueLayout {
  175. fn new(queue_size: u16) -> Self {
  176. assert!(
  177. queue_size.is_power_of_two(),
  178. "queue size should be a power of 2"
  179. );
  180. let queue_size = queue_size as usize;
  181. let desc = size_of::<Descriptor>() * queue_size;
  182. let avail = size_of::<u16>() * (3 + queue_size);
  183. let used = size_of::<u16>() * 3 + size_of::<UsedElem>() * queue_size;
  184. VirtQueueLayout {
  185. avail_offset: desc,
  186. used_offset: align_up(desc + avail),
  187. size: align_up(desc + avail) + align_up(used),
  188. }
  189. }
  190. }
  191. #[repr(C, align(16))]
  192. #[derive(Debug)]
  193. pub(crate) struct Descriptor {
  194. addr: Volatile<u64>,
  195. len: Volatile<u32>,
  196. flags: Volatile<DescFlags>,
  197. next: Volatile<u16>,
  198. }
  199. impl Descriptor {
  200. fn set_buf<H: Hal>(&mut self, buf: &[u8]) {
  201. self.addr
  202. .write(H::virt_to_phys(buf.as_ptr() as usize) as u64);
  203. self.len.write(buf.len() as u32);
  204. }
  205. }
  206. bitflags! {
  207. /// Descriptor flags
  208. struct DescFlags: u16 {
  209. const NEXT = 1;
  210. const WRITE = 2;
  211. const INDIRECT = 4;
  212. }
  213. }
  214. /// The driver uses the available ring to offer buffers to the device:
  215. /// each ring entry refers to the head of a descriptor chain.
  216. /// It is only written by the driver and read by the device.
  217. #[repr(C)]
  218. #[derive(Debug)]
  219. struct AvailRing {
  220. flags: Volatile<u16>,
  221. /// A driver MUST NOT decrement the idx.
  222. idx: Volatile<u16>,
  223. ring: [Volatile<u16>; 32], // actual size: queue_size
  224. used_event: Volatile<u16>, // unused
  225. }
  226. /// The used ring is where the device returns buffers once it is done with them:
  227. /// it is only written to by the device, and read by the driver.
  228. #[repr(C)]
  229. #[derive(Debug)]
  230. struct UsedRing {
  231. flags: Volatile<u16>,
  232. idx: Volatile<u16>,
  233. ring: [UsedElem; 32], // actual size: queue_size
  234. avail_event: Volatile<u16>, // unused
  235. }
  236. #[repr(C)]
  237. #[derive(Debug)]
  238. struct UsedElem {
  239. id: Volatile<u32>,
  240. len: Volatile<u32>,
  241. }
  242. #[cfg(test)]
  243. mod tests {
  244. use super::*;
  245. use crate::{hal::fake::FakeHal, transport::mmio::MODERN_VERSION};
  246. use core::ptr::NonNull;
  247. #[test]
  248. fn invalid_queue_size() {
  249. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  250. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  251. // Size not a power of 2.
  252. assert_eq!(
  253. VirtQueue::<FakeHal>::new(&mut transport, 0, 3).unwrap_err(),
  254. Error::InvalidParam
  255. );
  256. }
  257. #[test]
  258. fn queue_too_big() {
  259. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  260. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  261. assert_eq!(
  262. VirtQueue::<FakeHal>::new(&mut transport, 0, 5).unwrap_err(),
  263. Error::InvalidParam
  264. );
  265. }
  266. #[test]
  267. fn queue_already_used() {
  268. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  269. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  270. VirtQueue::<FakeHal>::new(&mut transport, 0, 4).unwrap();
  271. assert_eq!(
  272. VirtQueue::<FakeHal>::new(&mut transport, 0, 4).unwrap_err(),
  273. Error::AlreadyUsed
  274. );
  275. }
  276. #[test]
  277. fn add_empty() {
  278. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  279. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  280. let mut queue = VirtQueue::<FakeHal>::new(&mut transport, 0, 4).unwrap();
  281. assert_eq!(queue.add(&[], &[]).unwrap_err(), Error::InvalidParam);
  282. }
  283. #[test]
  284. fn add_too_big() {
  285. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  286. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  287. let mut queue = VirtQueue::<FakeHal>::new(&mut transport, 0, 4).unwrap();
  288. assert_eq!(queue.available_desc(), 4);
  289. assert_eq!(
  290. queue
  291. .add(&[&[], &[], &[]], &[&mut [], &mut []])
  292. .unwrap_err(),
  293. Error::BufferTooSmall
  294. );
  295. }
  296. #[test]
  297. fn add_buffers() {
  298. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  299. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  300. let mut queue = VirtQueue::<FakeHal>::new(&mut transport, 0, 4).unwrap();
  301. assert_eq!(queue.size(), 4);
  302. assert_eq!(queue.available_desc(), 4);
  303. // Add a buffer chain consisting of two device-readable parts followed by two
  304. // device-writable parts.
  305. let token = queue
  306. .add(&[&[1, 2], &[3]], &[&mut [0, 0], &mut [0]])
  307. .unwrap();
  308. assert_eq!(queue.available_desc(), 0);
  309. assert!(!queue.can_pop());
  310. let first_descriptor_index = queue.avail.ring[0].read();
  311. assert_eq!(first_descriptor_index, token);
  312. assert_eq!(queue.desc[first_descriptor_index as usize].len.read(), 2);
  313. assert_eq!(
  314. queue.desc[first_descriptor_index as usize].flags.read(),
  315. DescFlags::NEXT
  316. );
  317. let second_descriptor_index = queue.desc[first_descriptor_index as usize].next.read();
  318. assert_eq!(queue.desc[second_descriptor_index as usize].len.read(), 1);
  319. assert_eq!(
  320. queue.desc[second_descriptor_index as usize].flags.read(),
  321. DescFlags::NEXT
  322. );
  323. let third_descriptor_index = queue.desc[second_descriptor_index as usize].next.read();
  324. assert_eq!(queue.desc[third_descriptor_index as usize].len.read(), 2);
  325. assert_eq!(
  326. queue.desc[third_descriptor_index as usize].flags.read(),
  327. DescFlags::NEXT | DescFlags::WRITE
  328. );
  329. let fourth_descriptor_index = queue.desc[third_descriptor_index as usize].next.read();
  330. assert_eq!(queue.desc[fourth_descriptor_index as usize].len.read(), 1);
  331. assert_eq!(
  332. queue.desc[fourth_descriptor_index as usize].flags.read(),
  333. DescFlags::WRITE
  334. );
  335. }
  336. }