queue.rs 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. #[cfg(test)]
  2. use crate::hal::VirtAddr;
  3. use crate::hal::{BufferDirection, Dma, Hal, PhysAddr};
  4. use crate::transport::Transport;
  5. use crate::{align_up, nonnull_slice_from_raw_parts, pages, Error, Result, PAGE_SIZE};
  6. use bitflags::bitflags;
  7. #[cfg(test)]
  8. use core::cmp::min;
  9. use core::hint::spin_loop;
  10. use core::mem::size_of;
  11. #[cfg(test)]
  12. use core::ptr;
  13. use core::ptr::{addr_of_mut, NonNull};
  14. use core::sync::atomic::{fence, Ordering};
  15. /// The mechanism for bulk data transport on virtio devices.
  16. ///
  17. /// Each device can have zero or more virtqueues.
  18. ///
  19. /// * `SIZE`: The size of the queue. This is both the number of descriptors, and the number of slots
  20. /// in the available and used rings.
  21. #[derive(Debug)]
  22. pub struct VirtQueue<H: Hal, const SIZE: usize> {
  23. /// DMA guard
  24. layout: VirtQueueLayout<H>,
  25. /// Descriptor table
  26. desc: NonNull<[Descriptor]>,
  27. /// Available ring
  28. avail: NonNull<AvailRing<SIZE>>,
  29. /// Used ring
  30. used: NonNull<UsedRing<SIZE>>,
  31. /// The index of queue
  32. queue_idx: u16,
  33. /// The number of descriptors currently in use.
  34. num_used: u16,
  35. /// The head desc index of the free list.
  36. free_head: u16,
  37. avail_idx: u16,
  38. last_used_idx: u16,
  39. }
  40. impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
  41. /// Create a new VirtQueue.
  42. pub fn new<T: Transport>(transport: &mut T, idx: u16) -> Result<Self> {
  43. if transport.queue_used(idx) {
  44. return Err(Error::AlreadyUsed);
  45. }
  46. if !SIZE.is_power_of_two()
  47. || SIZE > u16::MAX.into()
  48. || transport.max_queue_size() < SIZE as u32
  49. {
  50. return Err(Error::InvalidParam);
  51. }
  52. let size = SIZE as u16;
  53. let layout = if transport.requires_legacy_layout() {
  54. VirtQueueLayout::allocate_legacy(size)?
  55. } else {
  56. VirtQueueLayout::allocate_flexible(size)?
  57. };
  58. transport.queue_set(
  59. idx,
  60. size.into(),
  61. layout.descriptors_paddr(),
  62. layout.driver_area_paddr(),
  63. layout.device_area_paddr(),
  64. );
  65. let desc =
  66. nonnull_slice_from_raw_parts(layout.descriptors_vaddr().cast::<Descriptor>(), SIZE);
  67. let avail = layout.avail_vaddr().cast();
  68. let used = layout.used_vaddr().cast();
  69. // Link descriptors together.
  70. for i in 0..(size - 1) {
  71. // Safe because `desc` is properly aligned, dereferenceable, initialised, and the device
  72. // won't access the descriptors for the duration of this unsafe block.
  73. unsafe {
  74. (*desc.as_ptr())[i as usize].next = i + 1;
  75. }
  76. }
  77. Ok(VirtQueue {
  78. layout,
  79. desc,
  80. avail,
  81. used,
  82. queue_idx: idx,
  83. num_used: 0,
  84. free_head: 0,
  85. avail_idx: 0,
  86. last_used_idx: 0,
  87. })
  88. }
  89. /// Add buffers to the virtqueue, return a token.
  90. ///
  91. /// Ref: linux virtio_ring.c virtqueue_add
  92. ///
  93. /// # Safety
  94. ///
  95. /// The input and output buffers must remain valid until the token is returned by `pop_used`.
  96. pub unsafe fn add(&mut self, inputs: &[*const [u8]], outputs: &[*mut [u8]]) -> Result<u16> {
  97. if inputs.is_empty() && outputs.is_empty() {
  98. return Err(Error::InvalidParam);
  99. }
  100. if inputs.len() + outputs.len() + self.num_used as usize > SIZE {
  101. return Err(Error::QueueFull);
  102. }
  103. // allocate descriptors from free list
  104. let head = self.free_head;
  105. let mut last = self.free_head;
  106. // Safe because self.desc is properly aligned, dereferenceable and initialised, and nothing
  107. // else reads or writes the free descriptors during this block.
  108. unsafe {
  109. for (buffer, direction) in input_output_iter(inputs, outputs) {
  110. let desc = self.desc_ptr(self.free_head);
  111. (*desc).set_buf::<H>(buffer, direction, DescFlags::NEXT);
  112. last = self.free_head;
  113. self.free_head = (*desc).next;
  114. }
  115. // set last_elem.next = NULL
  116. (*self.desc_ptr(last)).flags.remove(DescFlags::NEXT);
  117. }
  118. self.num_used += (inputs.len() + outputs.len()) as u16;
  119. let avail_slot = self.avail_idx & (SIZE as u16 - 1);
  120. // Safe because self.avail is properly aligned, dereferenceable and initialised.
  121. unsafe {
  122. (*self.avail.as_ptr()).ring[avail_slot as usize] = head;
  123. }
  124. // Write barrier so that device sees changes to descriptor table and available ring before
  125. // change to available index.
  126. fence(Ordering::SeqCst);
  127. // increase head of avail ring
  128. self.avail_idx = self.avail_idx.wrapping_add(1);
  129. // Safe because self.avail is properly aligned, dereferenceable and initialised.
  130. unsafe {
  131. (*self.avail.as_ptr()).idx = self.avail_idx;
  132. }
  133. // Write barrier so that device can see change to available index after this method returns.
  134. fence(Ordering::SeqCst);
  135. Ok(head)
  136. }
  137. /// Add the given buffers to the virtqueue, notifies the device, blocks until the device uses
  138. /// them, then pops them.
  139. ///
  140. /// This assumes that the device isn't processing any other buffers at the same time.
  141. pub fn add_notify_wait_pop(
  142. &mut self,
  143. inputs: &[*const [u8]],
  144. outputs: &[*mut [u8]],
  145. transport: &mut impl Transport,
  146. ) -> Result<u32> {
  147. // Safe because we don't return until the same token has been popped, so they remain valid
  148. // until then.
  149. let token = unsafe { self.add(inputs, outputs) }?;
  150. // Notify the queue.
  151. transport.notify(self.queue_idx);
  152. // Wait until there is at least one element in the used ring.
  153. while !self.can_pop() {
  154. spin_loop();
  155. }
  156. self.pop_used(token, inputs, outputs)
  157. }
  158. /// Returns a non-null pointer to the descriptor at the given index.
  159. fn desc_ptr(&mut self, index: u16) -> *mut Descriptor {
  160. // Safe because self.desc is properly aligned and dereferenceable.
  161. unsafe { addr_of_mut!((*self.desc.as_ptr())[index as usize]) }
  162. }
  163. /// Returns whether there is a used element that can be popped.
  164. pub fn can_pop(&self) -> bool {
  165. // Read barrier, so we read a fresh value from the device.
  166. fence(Ordering::SeqCst);
  167. // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
  168. // instance of UsedRing.
  169. self.last_used_idx != unsafe { (*self.used.as_ptr()).idx }
  170. }
  171. /// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
  172. /// `None` if the used ring is empty.
  173. pub fn peek_used(&self) -> Option<u16> {
  174. if self.can_pop() {
  175. let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
  176. // Safe because self.used points to a valid, aligned, initialised, dereferenceable,
  177. // readable instance of UsedRing.
  178. Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
  179. } else {
  180. None
  181. }
  182. }
  183. /// Returns the number of free descriptors.
  184. pub fn available_desc(&self) -> usize {
  185. SIZE - self.num_used as usize
  186. }
  187. /// Unshares buffers in the list starting at descriptor index `head` and adds them to the free
  188. /// list. Unsharing may involve copying data back to the original buffers, so they must be
  189. /// passed in too.
  190. ///
  191. /// This will push all linked descriptors at the front of the free list.
  192. fn recycle_descriptors(&mut self, head: u16, inputs: &[*const [u8]], outputs: &[*mut [u8]]) {
  193. let original_free_head = self.free_head;
  194. self.free_head = head;
  195. let mut next = Some(head);
  196. for (buffer, direction) in input_output_iter(inputs, outputs) {
  197. let desc = self.desc_ptr(next.expect("Descriptor chain was shorter than expected."));
  198. // Safe because self.desc is properly aligned, dereferenceable and initialised, and
  199. // nothing else reads or writes the descriptor during this block.
  200. let paddr = unsafe {
  201. let paddr = (*desc).addr;
  202. (*desc).unset_buf();
  203. self.num_used -= 1;
  204. next = (*desc).next();
  205. if next.is_none() {
  206. (*desc).next = original_free_head;
  207. }
  208. paddr
  209. };
  210. // Unshare the buffer (and perhaps copy its contents back to the original buffer).
  211. H::unshare(paddr as usize, buffer, direction);
  212. }
  213. if next.is_some() {
  214. panic!("Descriptor chain was longer than expected.");
  215. }
  216. }
  217. /// If the given token is next on the device used queue, pops it and returns the total buffer
  218. /// length which was used (written) by the device.
  219. ///
  220. /// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
  221. pub fn pop_used(
  222. &mut self,
  223. token: u16,
  224. inputs: &[*const [u8]],
  225. outputs: &[*mut [u8]],
  226. ) -> Result<u32> {
  227. if !self.can_pop() {
  228. return Err(Error::NotReady);
  229. }
  230. // Read barrier not necessary, as can_pop already has one.
  231. // Get the index of the start of the descriptor chain for the next element in the used ring.
  232. let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
  233. let index;
  234. let len;
  235. // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
  236. // instance of UsedRing.
  237. unsafe {
  238. index = (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16;
  239. len = (*self.used.as_ptr()).ring[last_used_slot as usize].len;
  240. }
  241. if index != token {
  242. // The device used a different descriptor chain to the one we were expecting.
  243. return Err(Error::WrongToken);
  244. }
  245. self.recycle_descriptors(index, inputs, outputs);
  246. self.last_used_idx = self.last_used_idx.wrapping_add(1);
  247. Ok(len)
  248. }
  249. }
  250. /// The inner layout of a VirtQueue.
  251. ///
  252. /// Ref: 2.6 Split Virtqueues
  253. #[derive(Debug)]
  254. enum VirtQueueLayout<H: Hal> {
  255. Legacy {
  256. dma: Dma<H>,
  257. avail_offset: usize,
  258. used_offset: usize,
  259. },
  260. Modern {
  261. /// The region used for the descriptor area and driver area.
  262. driver_to_device_dma: Dma<H>,
  263. /// The region used for the device area.
  264. device_to_driver_dma: Dma<H>,
  265. /// The offset from the start of the `driver_to_device_dma` region to the driver area
  266. /// (available ring).
  267. avail_offset: usize,
  268. },
  269. }
  270. impl<H: Hal> VirtQueueLayout<H> {
  271. /// Allocates a single DMA region containing all parts of the virtqueue, following the layout
  272. /// required by legacy interfaces.
  273. ///
  274. /// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout
  275. fn allocate_legacy(queue_size: u16) -> Result<Self> {
  276. let (desc, avail, used) = queue_part_sizes(queue_size);
  277. let size = align_up(desc + avail) + align_up(used);
  278. // Allocate contiguous pages.
  279. let dma = Dma::new(size / PAGE_SIZE, BufferDirection::Both)?;
  280. Ok(Self::Legacy {
  281. dma,
  282. avail_offset: desc,
  283. used_offset: align_up(desc + avail),
  284. })
  285. }
  286. /// Allocates separate DMA regions for the the different parts of the virtqueue, as supported by
  287. /// non-legacy interfaces.
  288. ///
  289. /// This is preferred over `allocate_legacy` where possible as it reduces memory fragmentation
  290. /// and allows the HAL to know which DMA regions are used in which direction.
  291. fn allocate_flexible(queue_size: u16) -> Result<Self> {
  292. let (desc, avail, used) = queue_part_sizes(queue_size);
  293. let driver_to_device_dma = Dma::new(pages(desc + avail), BufferDirection::DriverToDevice)?;
  294. let device_to_driver_dma = Dma::new(pages(used), BufferDirection::DeviceToDriver)?;
  295. Ok(Self::Modern {
  296. driver_to_device_dma,
  297. device_to_driver_dma,
  298. avail_offset: desc,
  299. })
  300. }
  301. /// Returns the physical address of the descriptor area.
  302. fn descriptors_paddr(&self) -> PhysAddr {
  303. match self {
  304. Self::Legacy { dma, .. } => dma.paddr(),
  305. Self::Modern {
  306. driver_to_device_dma,
  307. ..
  308. } => driver_to_device_dma.paddr(),
  309. }
  310. }
  311. /// Returns a pointer to the descriptor table (in the descriptor area).
  312. fn descriptors_vaddr(&self) -> NonNull<u8> {
  313. match self {
  314. Self::Legacy { dma, .. } => dma.vaddr(0),
  315. Self::Modern {
  316. driver_to_device_dma,
  317. ..
  318. } => driver_to_device_dma.vaddr(0),
  319. }
  320. }
  321. /// Returns the physical address of the driver area.
  322. fn driver_area_paddr(&self) -> PhysAddr {
  323. match self {
  324. Self::Legacy {
  325. dma, avail_offset, ..
  326. } => dma.paddr() + avail_offset,
  327. Self::Modern {
  328. driver_to_device_dma,
  329. avail_offset,
  330. ..
  331. } => driver_to_device_dma.paddr() + avail_offset,
  332. }
  333. }
  334. /// Returns a pointer to the available ring (in the driver area).
  335. fn avail_vaddr(&self) -> NonNull<u8> {
  336. match self {
  337. Self::Legacy {
  338. dma, avail_offset, ..
  339. } => dma.vaddr(*avail_offset),
  340. Self::Modern {
  341. driver_to_device_dma,
  342. avail_offset,
  343. ..
  344. } => driver_to_device_dma.vaddr(*avail_offset),
  345. }
  346. }
  347. /// Returns the physical address of the device area.
  348. fn device_area_paddr(&self) -> PhysAddr {
  349. match self {
  350. Self::Legacy {
  351. used_offset, dma, ..
  352. } => dma.paddr() + used_offset,
  353. Self::Modern {
  354. device_to_driver_dma,
  355. ..
  356. } => device_to_driver_dma.paddr(),
  357. }
  358. }
  359. /// Returns a pointer to the used ring (in the driver area).
  360. fn used_vaddr(&self) -> NonNull<u8> {
  361. match self {
  362. Self::Legacy {
  363. dma, used_offset, ..
  364. } => dma.vaddr(*used_offset),
  365. Self::Modern {
  366. device_to_driver_dma,
  367. ..
  368. } => device_to_driver_dma.vaddr(0),
  369. }
  370. }
  371. }
  372. /// Returns the size in bytes of the descriptor table, available ring and used ring for a given
  373. /// queue size.
  374. ///
  375. /// Ref: 2.6 Split Virtqueues
  376. fn queue_part_sizes(queue_size: u16) -> (usize, usize, usize) {
  377. assert!(
  378. queue_size.is_power_of_two(),
  379. "queue size should be a power of 2"
  380. );
  381. let queue_size = queue_size as usize;
  382. let desc = size_of::<Descriptor>() * queue_size;
  383. let avail = size_of::<u16>() * (3 + queue_size);
  384. let used = size_of::<u16>() * 3 + size_of::<UsedElem>() * queue_size;
  385. (desc, avail, used)
  386. }
  387. #[repr(C, align(16))]
  388. #[derive(Debug)]
  389. pub(crate) struct Descriptor {
  390. addr: u64,
  391. len: u32,
  392. flags: DescFlags,
  393. next: u16,
  394. }
  395. impl Descriptor {
  396. /// Sets the buffer address, length and flags, and shares it with the device.
  397. ///
  398. /// # Safety
  399. ///
  400. /// The caller must ensure that the buffer lives at least as long as the descriptor is active.
  401. unsafe fn set_buf<H: Hal>(
  402. &mut self,
  403. buf: NonNull<[u8]>,
  404. direction: BufferDirection,
  405. extra_flags: DescFlags,
  406. ) {
  407. self.addr = H::share(buf, direction) as u64;
  408. self.len = buf.len() as u32;
  409. self.flags = extra_flags
  410. | match direction {
  411. BufferDirection::DeviceToDriver => DescFlags::WRITE,
  412. BufferDirection::DriverToDevice => DescFlags::empty(),
  413. BufferDirection::Both => {
  414. panic!("Buffer passed to device should never use BufferDirection::Both.")
  415. }
  416. };
  417. }
  418. /// Sets the buffer address and length to 0.
  419. ///
  420. /// This must only be called once the device has finished using the descriptor.
  421. fn unset_buf(&mut self) {
  422. self.addr = 0;
  423. self.len = 0;
  424. }
  425. /// Returns the index of the next descriptor in the chain if the `NEXT` flag is set, or `None`
  426. /// if it is not (and thus this descriptor is the end of the chain).
  427. fn next(&self) -> Option<u16> {
  428. if self.flags.contains(DescFlags::NEXT) {
  429. Some(self.next)
  430. } else {
  431. None
  432. }
  433. }
  434. }
  435. bitflags! {
  436. /// Descriptor flags
  437. struct DescFlags: u16 {
  438. const NEXT = 1;
  439. const WRITE = 2;
  440. const INDIRECT = 4;
  441. }
  442. }
  443. /// The driver uses the available ring to offer buffers to the device:
  444. /// each ring entry refers to the head of a descriptor chain.
  445. /// It is only written by the driver and read by the device.
  446. #[repr(C)]
  447. #[derive(Debug)]
  448. struct AvailRing<const SIZE: usize> {
  449. flags: u16,
  450. /// A driver MUST NOT decrement the idx.
  451. idx: u16,
  452. ring: [u16; SIZE],
  453. used_event: u16, // unused
  454. }
  455. /// The used ring is where the device returns buffers once it is done with them:
  456. /// it is only written to by the device, and read by the driver.
  457. #[repr(C)]
  458. #[derive(Debug)]
  459. struct UsedRing<const SIZE: usize> {
  460. flags: u16,
  461. idx: u16,
  462. ring: [UsedElem; SIZE],
  463. avail_event: u16, // unused
  464. }
  465. #[repr(C)]
  466. #[derive(Debug)]
  467. struct UsedElem {
  468. id: u32,
  469. len: u32,
  470. }
  471. /// Simulates the device writing to a VirtIO queue, for use in tests.
  472. ///
  473. /// The fake device always uses descriptors in order.
  474. #[cfg(test)]
  475. pub(crate) fn fake_write_to_queue<const QUEUE_SIZE: usize>(
  476. receive_queue_descriptors: *const Descriptor,
  477. receive_queue_driver_area: VirtAddr,
  478. receive_queue_device_area: VirtAddr,
  479. data: &[u8],
  480. ) {
  481. let descriptors = ptr::slice_from_raw_parts(receive_queue_descriptors, QUEUE_SIZE);
  482. let available_ring = receive_queue_driver_area as *const AvailRing<QUEUE_SIZE>;
  483. let used_ring = receive_queue_device_area as *mut UsedRing<QUEUE_SIZE>;
  484. // Safe because the various pointers are properly aligned, dereferenceable, initialised, and
  485. // nothing else accesses them during this block.
  486. unsafe {
  487. // Make sure there is actually at least one descriptor available to write to.
  488. assert_ne!((*available_ring).idx, (*used_ring).idx);
  489. // The fake device always uses descriptors in order, like VIRTIO_F_IN_ORDER, so
  490. // `used_ring.idx` marks the next descriptor we should take from the available ring.
  491. let next_slot = (*used_ring).idx & (QUEUE_SIZE as u16 - 1);
  492. let head_descriptor_index = (*available_ring).ring[next_slot as usize];
  493. let mut descriptor = &(*descriptors)[head_descriptor_index as usize];
  494. // Loop through all descriptors in the chain, writing data to them.
  495. let mut remaining_data = data;
  496. loop {
  497. // Check the buffer and write to it.
  498. let flags = descriptor.flags;
  499. assert!(flags.contains(DescFlags::WRITE));
  500. let buffer_length = descriptor.len as usize;
  501. let length_to_write = min(remaining_data.len(), buffer_length);
  502. ptr::copy(
  503. remaining_data.as_ptr(),
  504. descriptor.addr as *mut u8,
  505. length_to_write,
  506. );
  507. remaining_data = &remaining_data[length_to_write..];
  508. if let Some(next) = descriptor.next() {
  509. descriptor = &(*descriptors)[next as usize];
  510. } else {
  511. assert_eq!(remaining_data.len(), 0);
  512. break;
  513. }
  514. }
  515. // Mark the buffer as used.
  516. (*used_ring).ring[next_slot as usize].id = head_descriptor_index as u32;
  517. (*used_ring).ring[next_slot as usize].len = data.len() as u32;
  518. (*used_ring).idx += 1;
  519. }
  520. }
  521. #[cfg(test)]
  522. mod tests {
  523. use super::*;
  524. use crate::{
  525. hal::fake::FakeHal,
  526. transport::mmio::{MmioTransport, VirtIOHeader, MODERN_VERSION},
  527. };
  528. use core::ptr::NonNull;
  529. #[test]
  530. fn invalid_queue_size() {
  531. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  532. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  533. // Size not a power of 2.
  534. assert_eq!(
  535. VirtQueue::<FakeHal, 3>::new(&mut transport, 0).unwrap_err(),
  536. Error::InvalidParam
  537. );
  538. }
  539. #[test]
  540. fn queue_too_big() {
  541. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  542. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  543. assert_eq!(
  544. VirtQueue::<FakeHal, 8>::new(&mut transport, 0).unwrap_err(),
  545. Error::InvalidParam
  546. );
  547. }
  548. #[test]
  549. fn queue_already_used() {
  550. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  551. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  552. VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  553. assert_eq!(
  554. VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap_err(),
  555. Error::AlreadyUsed
  556. );
  557. }
  558. #[test]
  559. fn add_empty() {
  560. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  561. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  562. let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  563. assert_eq!(
  564. unsafe { queue.add(&[], &[]) }.unwrap_err(),
  565. Error::InvalidParam
  566. );
  567. }
  568. #[test]
  569. fn add_too_many() {
  570. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  571. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  572. let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  573. assert_eq!(queue.available_desc(), 4);
  574. assert_eq!(
  575. unsafe { queue.add(&[&[], &[], &[]], &[&mut [], &mut []]) }.unwrap_err(),
  576. Error::QueueFull
  577. );
  578. }
  579. #[test]
  580. fn add_buffers() {
  581. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  582. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  583. let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  584. assert_eq!(queue.available_desc(), 4);
  585. // Add a buffer chain consisting of two device-readable parts followed by two
  586. // device-writable parts.
  587. let token = unsafe { queue.add(&[&[1, 2], &[3]], &[&mut [0, 0], &mut [0]]) }.unwrap();
  588. assert_eq!(queue.available_desc(), 0);
  589. assert!(!queue.can_pop());
  590. // Safe because the various parts of the queue are properly aligned, dereferenceable and
  591. // initialised, and nothing else is accessing them at the same time.
  592. unsafe {
  593. let first_descriptor_index = (*queue.avail.as_ptr()).ring[0];
  594. assert_eq!(first_descriptor_index, token);
  595. assert_eq!(
  596. (*queue.desc.as_ptr())[first_descriptor_index as usize].len,
  597. 2
  598. );
  599. assert_eq!(
  600. (*queue.desc.as_ptr())[first_descriptor_index as usize].flags,
  601. DescFlags::NEXT
  602. );
  603. let second_descriptor_index =
  604. (*queue.desc.as_ptr())[first_descriptor_index as usize].next;
  605. assert_eq!(
  606. (*queue.desc.as_ptr())[second_descriptor_index as usize].len,
  607. 1
  608. );
  609. assert_eq!(
  610. (*queue.desc.as_ptr())[second_descriptor_index as usize].flags,
  611. DescFlags::NEXT
  612. );
  613. let third_descriptor_index =
  614. (*queue.desc.as_ptr())[second_descriptor_index as usize].next;
  615. assert_eq!(
  616. (*queue.desc.as_ptr())[third_descriptor_index as usize].len,
  617. 2
  618. );
  619. assert_eq!(
  620. (*queue.desc.as_ptr())[third_descriptor_index as usize].flags,
  621. DescFlags::NEXT | DescFlags::WRITE
  622. );
  623. let fourth_descriptor_index =
  624. (*queue.desc.as_ptr())[third_descriptor_index as usize].next;
  625. assert_eq!(
  626. (*queue.desc.as_ptr())[fourth_descriptor_index as usize].len,
  627. 1
  628. );
  629. assert_eq!(
  630. (*queue.desc.as_ptr())[fourth_descriptor_index as usize].flags,
  631. DescFlags::WRITE
  632. );
  633. }
  634. }
  635. }
  636. /// Returns an iterator over the buffers of first `inputs` and then `outputs`, paired with the
  637. /// corresponding `BufferDirection`.
  638. ///
  639. /// Panics if any of the buffer pointers is null.
  640. fn input_output_iter<'a>(
  641. inputs: &'a [*const [u8]],
  642. outputs: &'a [*mut [u8]],
  643. ) -> impl Iterator<Item = (NonNull<[u8]>, BufferDirection)> + 'a {
  644. inputs
  645. .iter()
  646. .map(|input| {
  647. (
  648. NonNull::new(*input as *mut [u8]).unwrap(),
  649. BufferDirection::DriverToDevice,
  650. )
  651. })
  652. .chain(outputs.iter().map(|output| {
  653. (
  654. NonNull::new(*output).unwrap(),
  655. BufferDirection::DeviceToDriver,
  656. )
  657. }))
  658. }