queue.rs 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. #![deny(unsafe_op_in_unsafe_fn)]
  2. use crate::hal::{BufferDirection, Dma, Hal, PhysAddr};
  3. use crate::transport::Transport;
  4. use crate::{align_up, nonnull_slice_from_raw_parts, pages, Error, Result, PAGE_SIZE};
  5. use bitflags::bitflags;
  6. #[cfg(test)]
  7. use core::cmp::min;
  8. use core::hint::spin_loop;
  9. use core::mem::{size_of, take};
  10. #[cfg(test)]
  11. use core::ptr;
  12. use core::ptr::NonNull;
  13. use core::sync::atomic::{fence, Ordering};
  14. use zerocopy::FromBytes;
  15. /// The mechanism for bulk data transport on virtio devices.
  16. ///
  17. /// Each device can have zero or more virtqueues.
  18. ///
  19. /// * `SIZE`: The size of the queue. This is both the number of descriptors, and the number of slots
  20. /// in the available and used rings.
  21. #[derive(Debug)]
  22. pub struct VirtQueue<H: Hal, const SIZE: usize> {
  23. /// DMA guard
  24. layout: VirtQueueLayout<H>,
  25. /// Descriptor table
  26. ///
  27. /// The device may be able to modify this, even though it's not supposed to, so we shouldn't
  28. /// trust values read back from it. Use `desc_shadow` instead to keep track of what we wrote to
  29. /// it.
  30. desc: NonNull<[Descriptor]>,
  31. /// Available ring
  32. ///
  33. /// The device may be able to modify this, even though it's not supposed to, so we shouldn't
  34. /// trust values read back from it. The only field we need to read currently is `idx`, so we
  35. /// have `avail_idx` below to use instead.
  36. avail: NonNull<AvailRing<SIZE>>,
  37. /// Used ring
  38. used: NonNull<UsedRing<SIZE>>,
  39. /// The index of queue
  40. queue_idx: u16,
  41. /// The number of descriptors currently in use.
  42. num_used: u16,
  43. /// The head desc index of the free list.
  44. free_head: u16,
  45. /// Our trusted copy of `desc` that the device can't access.
  46. desc_shadow: [Descriptor; SIZE],
  47. /// Our trusted copy of `avail.idx`.
  48. avail_idx: u16,
  49. last_used_idx: u16,
  50. }
  51. impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
  52. /// Create a new VirtQueue.
  53. pub fn new<T: Transport>(transport: &mut T, idx: u16) -> Result<Self> {
  54. if transport.queue_used(idx) {
  55. return Err(Error::AlreadyUsed);
  56. }
  57. if !SIZE.is_power_of_two()
  58. || SIZE > u16::MAX.into()
  59. || transport.max_queue_size(idx) < SIZE as u32
  60. {
  61. return Err(Error::InvalidParam);
  62. }
  63. let size = SIZE as u16;
  64. let layout = if transport.requires_legacy_layout() {
  65. VirtQueueLayout::allocate_legacy(size)?
  66. } else {
  67. VirtQueueLayout::allocate_flexible(size)?
  68. };
  69. transport.queue_set(
  70. idx,
  71. size.into(),
  72. layout.descriptors_paddr(),
  73. layout.driver_area_paddr(),
  74. layout.device_area_paddr(),
  75. );
  76. let desc =
  77. nonnull_slice_from_raw_parts(layout.descriptors_vaddr().cast::<Descriptor>(), SIZE);
  78. let avail = layout.avail_vaddr().cast();
  79. let used = layout.used_vaddr().cast();
  80. let mut desc_shadow: [Descriptor; SIZE] = FromBytes::new_zeroed();
  81. // Link descriptors together.
  82. for i in 0..(size - 1) {
  83. desc_shadow[i as usize].next = i + 1;
  84. // Safe because `desc` is properly aligned, dereferenceable, initialised, and the device
  85. // won't access the descriptors for the duration of this unsafe block.
  86. unsafe {
  87. (*desc.as_ptr())[i as usize].next = i + 1;
  88. }
  89. }
  90. Ok(VirtQueue {
  91. layout,
  92. desc,
  93. avail,
  94. used,
  95. queue_idx: idx,
  96. num_used: 0,
  97. free_head: 0,
  98. desc_shadow,
  99. avail_idx: 0,
  100. last_used_idx: 0,
  101. })
  102. }
  103. /// Add buffers to the virtqueue, return a token.
  104. ///
  105. /// The buffers must not be empty.
  106. ///
  107. /// Ref: linux virtio_ring.c virtqueue_add
  108. ///
  109. /// # Safety
  110. ///
  111. /// The input and output buffers must remain valid and not be accessed until a call to
  112. /// `pop_used` with the returned token succeeds.
  113. pub unsafe fn add<'a, 'b>(
  114. &mut self,
  115. inputs: &'a [&'b [u8]],
  116. outputs: &'a mut [&'b mut [u8]],
  117. ) -> Result<u16> {
  118. if inputs.is_empty() && outputs.is_empty() {
  119. return Err(Error::InvalidParam);
  120. }
  121. if inputs.len() + outputs.len() + self.num_used as usize > SIZE {
  122. return Err(Error::QueueFull);
  123. }
  124. // allocate descriptors from free list
  125. let head = self.free_head;
  126. let mut last = self.free_head;
  127. for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
  128. assert_ne!(buffer.len(), 0);
  129. // Write to desc_shadow then copy.
  130. let desc = &mut self.desc_shadow[usize::from(self.free_head)];
  131. // Safe because our caller promises that the buffers live at least until `pop_used`
  132. // returns them.
  133. unsafe {
  134. desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
  135. }
  136. last = self.free_head;
  137. self.free_head = desc.next;
  138. self.write_desc(last);
  139. }
  140. // set last_elem.next = NULL
  141. self.desc_shadow[usize::from(last)]
  142. .flags
  143. .remove(DescFlags::NEXT);
  144. self.write_desc(last);
  145. self.num_used += (inputs.len() + outputs.len()) as u16;
  146. let avail_slot = self.avail_idx & (SIZE as u16 - 1);
  147. // Safe because self.avail is properly aligned, dereferenceable and initialised.
  148. unsafe {
  149. (*self.avail.as_ptr()).ring[avail_slot as usize] = head;
  150. }
  151. // Write barrier so that device sees changes to descriptor table and available ring before
  152. // change to available index.
  153. fence(Ordering::SeqCst);
  154. // increase head of avail ring
  155. self.avail_idx = self.avail_idx.wrapping_add(1);
  156. // Safe because self.avail is properly aligned, dereferenceable and initialised.
  157. unsafe {
  158. (*self.avail.as_ptr()).idx = self.avail_idx;
  159. }
  160. // Write barrier so that device can see change to available index after this method returns.
  161. fence(Ordering::SeqCst);
  162. Ok(head)
  163. }
  164. /// Add the given buffers to the virtqueue, notifies the device, blocks until the device uses
  165. /// them, then pops them.
  166. ///
  167. /// This assumes that the device isn't processing any other buffers at the same time.
  168. ///
  169. /// The buffers must not be empty.
  170. pub fn add_notify_wait_pop<'a>(
  171. &mut self,
  172. inputs: &'a [&'a [u8]],
  173. outputs: &'a mut [&'a mut [u8]],
  174. transport: &mut impl Transport,
  175. ) -> Result<u32> {
  176. // Safe because we don't return until the same token has been popped, so the buffers remain
  177. // valid and are not otherwise accessed until then.
  178. let token = unsafe { self.add(inputs, outputs) }?;
  179. // Notify the queue.
  180. if self.should_notify() {
  181. transport.notify(self.queue_idx);
  182. }
  183. // Wait until there is at least one element in the used ring.
  184. while !self.can_pop() {
  185. spin_loop();
  186. }
  187. // Safe because these are the same buffers as we passed to `add` above and they are still
  188. // valid.
  189. unsafe { self.pop_used(token, inputs, outputs) }
  190. }
  191. /// Returns whether the driver should notify the device after adding a new buffer to the
  192. /// virtqueue.
  193. ///
  194. /// This will be false if the device has supressed notifications.
  195. pub fn should_notify(&self) -> bool {
  196. // Read barrier, so we read a fresh value from the device.
  197. fence(Ordering::SeqCst);
  198. // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
  199. // instance of UsedRing.
  200. unsafe { (*self.used.as_ptr()).flags & 0x0001 == 0 }
  201. }
  202. /// Copies the descriptor at the given index from `desc_shadow` to `desc`, so it can be seen by
  203. /// the device.
  204. fn write_desc(&mut self, index: u16) {
  205. let index = usize::from(index);
  206. // Safe because self.desc is properly aligned, dereferenceable and initialised, and nothing
  207. // else reads or writes the descriptor during this block.
  208. unsafe {
  209. (*self.desc.as_ptr())[index] = self.desc_shadow[index].clone();
  210. }
  211. }
  212. /// Returns whether there is a used element that can be popped.
  213. pub fn can_pop(&self) -> bool {
  214. // Read barrier, so we read a fresh value from the device.
  215. fence(Ordering::SeqCst);
  216. // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
  217. // instance of UsedRing.
  218. self.last_used_idx != unsafe { (*self.used.as_ptr()).idx }
  219. }
  220. /// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
  221. /// `None` if the used ring is empty.
  222. pub fn peek_used(&self) -> Option<u16> {
  223. if self.can_pop() {
  224. let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
  225. // Safe because self.used points to a valid, aligned, initialised, dereferenceable,
  226. // readable instance of UsedRing.
  227. Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
  228. } else {
  229. None
  230. }
  231. }
  232. /// Returns the number of free descriptors.
  233. pub fn available_desc(&self) -> usize {
  234. SIZE - self.num_used as usize
  235. }
  236. /// Unshares buffers in the list starting at descriptor index `head` and adds them to the free
  237. /// list. Unsharing may involve copying data back to the original buffers, so they must be
  238. /// passed in too.
  239. ///
  240. /// This will push all linked descriptors at the front of the free list.
  241. ///
  242. /// # Safety
  243. ///
  244. /// The buffers in `inputs` and `outputs` must match the set of buffers originally added to the
  245. /// queue by `add`.
  246. unsafe fn recycle_descriptors<'a>(
  247. &mut self,
  248. head: u16,
  249. inputs: &'a [&'a [u8]],
  250. outputs: &'a mut [&'a mut [u8]],
  251. ) {
  252. let original_free_head = self.free_head;
  253. self.free_head = head;
  254. let mut next = Some(head);
  255. for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
  256. assert_ne!(buffer.len(), 0);
  257. let desc_index = next.expect("Descriptor chain was shorter than expected.");
  258. let desc = &mut self.desc_shadow[usize::from(desc_index)];
  259. let paddr = desc.addr;
  260. desc.unset_buf();
  261. self.num_used -= 1;
  262. next = desc.next();
  263. if next.is_none() {
  264. desc.next = original_free_head;
  265. }
  266. self.write_desc(desc_index);
  267. // Safe because the caller ensures that the buffer is valid and matches the descriptor
  268. // from which we got `paddr`.
  269. unsafe {
  270. // Unshare the buffer (and perhaps copy its contents back to the original buffer).
  271. H::unshare(paddr as usize, buffer, direction);
  272. }
  273. }
  274. if next.is_some() {
  275. panic!("Descriptor chain was longer than expected.");
  276. }
  277. }
  278. /// If the given token is next on the device used queue, pops it and returns the total buffer
  279. /// length which was used (written) by the device.
  280. ///
  281. /// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
  282. ///
  283. /// # Safety
  284. ///
  285. /// The buffers in `inputs` and `outputs` must match the set of buffers originally added to the
  286. /// queue by `add` when it returned the token being passed in here.
  287. pub unsafe fn pop_used<'a>(
  288. &mut self,
  289. token: u16,
  290. inputs: &'a [&'a [u8]],
  291. outputs: &'a mut [&'a mut [u8]],
  292. ) -> Result<u32> {
  293. if !self.can_pop() {
  294. return Err(Error::NotReady);
  295. }
  296. // Read barrier not necessary, as can_pop already has one.
  297. // Get the index of the start of the descriptor chain for the next element in the used ring.
  298. let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
  299. let index;
  300. let len;
  301. // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
  302. // instance of UsedRing.
  303. unsafe {
  304. index = (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16;
  305. len = (*self.used.as_ptr()).ring[last_used_slot as usize].len;
  306. }
  307. if index != token {
  308. // The device used a different descriptor chain to the one we were expecting.
  309. return Err(Error::WrongToken);
  310. }
  311. // Safe because the caller ensures the buffers are valid and match the descriptor.
  312. unsafe {
  313. self.recycle_descriptors(index, inputs, outputs);
  314. }
  315. self.last_used_idx = self.last_used_idx.wrapping_add(1);
  316. Ok(len)
  317. }
  318. }
  319. /// The inner layout of a VirtQueue.
  320. ///
  321. /// Ref: 2.6 Split Virtqueues
  322. #[derive(Debug)]
  323. enum VirtQueueLayout<H: Hal> {
  324. Legacy {
  325. dma: Dma<H>,
  326. avail_offset: usize,
  327. used_offset: usize,
  328. },
  329. Modern {
  330. /// The region used for the descriptor area and driver area.
  331. driver_to_device_dma: Dma<H>,
  332. /// The region used for the device area.
  333. device_to_driver_dma: Dma<H>,
  334. /// The offset from the start of the `driver_to_device_dma` region to the driver area
  335. /// (available ring).
  336. avail_offset: usize,
  337. },
  338. }
  339. impl<H: Hal> VirtQueueLayout<H> {
  340. /// Allocates a single DMA region containing all parts of the virtqueue, following the layout
  341. /// required by legacy interfaces.
  342. ///
  343. /// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout
  344. fn allocate_legacy(queue_size: u16) -> Result<Self> {
  345. let (desc, avail, used) = queue_part_sizes(queue_size);
  346. let size = align_up(desc + avail) + align_up(used);
  347. // Allocate contiguous pages.
  348. let dma = Dma::new(size / PAGE_SIZE, BufferDirection::Both)?;
  349. Ok(Self::Legacy {
  350. dma,
  351. avail_offset: desc,
  352. used_offset: align_up(desc + avail),
  353. })
  354. }
  355. /// Allocates separate DMA regions for the the different parts of the virtqueue, as supported by
  356. /// non-legacy interfaces.
  357. ///
  358. /// This is preferred over `allocate_legacy` where possible as it reduces memory fragmentation
  359. /// and allows the HAL to know which DMA regions are used in which direction.
  360. fn allocate_flexible(queue_size: u16) -> Result<Self> {
  361. let (desc, avail, used) = queue_part_sizes(queue_size);
  362. let driver_to_device_dma = Dma::new(pages(desc + avail), BufferDirection::DriverToDevice)?;
  363. let device_to_driver_dma = Dma::new(pages(used), BufferDirection::DeviceToDriver)?;
  364. Ok(Self::Modern {
  365. driver_to_device_dma,
  366. device_to_driver_dma,
  367. avail_offset: desc,
  368. })
  369. }
  370. /// Returns the physical address of the descriptor area.
  371. fn descriptors_paddr(&self) -> PhysAddr {
  372. match self {
  373. Self::Legacy { dma, .. } => dma.paddr(),
  374. Self::Modern {
  375. driver_to_device_dma,
  376. ..
  377. } => driver_to_device_dma.paddr(),
  378. }
  379. }
  380. /// Returns a pointer to the descriptor table (in the descriptor area).
  381. fn descriptors_vaddr(&self) -> NonNull<u8> {
  382. match self {
  383. Self::Legacy { dma, .. } => dma.vaddr(0),
  384. Self::Modern {
  385. driver_to_device_dma,
  386. ..
  387. } => driver_to_device_dma.vaddr(0),
  388. }
  389. }
  390. /// Returns the physical address of the driver area.
  391. fn driver_area_paddr(&self) -> PhysAddr {
  392. match self {
  393. Self::Legacy {
  394. dma, avail_offset, ..
  395. } => dma.paddr() + avail_offset,
  396. Self::Modern {
  397. driver_to_device_dma,
  398. avail_offset,
  399. ..
  400. } => driver_to_device_dma.paddr() + avail_offset,
  401. }
  402. }
  403. /// Returns a pointer to the available ring (in the driver area).
  404. fn avail_vaddr(&self) -> NonNull<u8> {
  405. match self {
  406. Self::Legacy {
  407. dma, avail_offset, ..
  408. } => dma.vaddr(*avail_offset),
  409. Self::Modern {
  410. driver_to_device_dma,
  411. avail_offset,
  412. ..
  413. } => driver_to_device_dma.vaddr(*avail_offset),
  414. }
  415. }
  416. /// Returns the physical address of the device area.
  417. fn device_area_paddr(&self) -> PhysAddr {
  418. match self {
  419. Self::Legacy {
  420. used_offset, dma, ..
  421. } => dma.paddr() + used_offset,
  422. Self::Modern {
  423. device_to_driver_dma,
  424. ..
  425. } => device_to_driver_dma.paddr(),
  426. }
  427. }
  428. /// Returns a pointer to the used ring (in the driver area).
  429. fn used_vaddr(&self) -> NonNull<u8> {
  430. match self {
  431. Self::Legacy {
  432. dma, used_offset, ..
  433. } => dma.vaddr(*used_offset),
  434. Self::Modern {
  435. device_to_driver_dma,
  436. ..
  437. } => device_to_driver_dma.vaddr(0),
  438. }
  439. }
  440. }
  441. /// Returns the size in bytes of the descriptor table, available ring and used ring for a given
  442. /// queue size.
  443. ///
  444. /// Ref: 2.6 Split Virtqueues
  445. fn queue_part_sizes(queue_size: u16) -> (usize, usize, usize) {
  446. assert!(
  447. queue_size.is_power_of_two(),
  448. "queue size should be a power of 2"
  449. );
  450. let queue_size = queue_size as usize;
  451. let desc = size_of::<Descriptor>() * queue_size;
  452. let avail = size_of::<u16>() * (3 + queue_size);
  453. let used = size_of::<u16>() * 3 + size_of::<UsedElem>() * queue_size;
  454. (desc, avail, used)
  455. }
  456. #[repr(C, align(16))]
  457. #[derive(Clone, Debug, FromBytes)]
  458. pub(crate) struct Descriptor {
  459. addr: u64,
  460. len: u32,
  461. flags: DescFlags,
  462. next: u16,
  463. }
  464. impl Descriptor {
  465. /// Sets the buffer address, length and flags, and shares it with the device.
  466. ///
  467. /// # Safety
  468. ///
  469. /// The caller must ensure that the buffer lives at least as long as the descriptor is active.
  470. unsafe fn set_buf<H: Hal>(
  471. &mut self,
  472. buf: NonNull<[u8]>,
  473. direction: BufferDirection,
  474. extra_flags: DescFlags,
  475. ) {
  476. // Safe because our caller promises that the buffer is valid.
  477. unsafe {
  478. self.addr = H::share(buf, direction) as u64;
  479. }
  480. self.len = buf.len() as u32;
  481. self.flags = extra_flags
  482. | match direction {
  483. BufferDirection::DeviceToDriver => DescFlags::WRITE,
  484. BufferDirection::DriverToDevice => DescFlags::empty(),
  485. BufferDirection::Both => {
  486. panic!("Buffer passed to device should never use BufferDirection::Both.")
  487. }
  488. };
  489. }
  490. /// Sets the buffer address and length to 0.
  491. ///
  492. /// This must only be called once the device has finished using the descriptor.
  493. fn unset_buf(&mut self) {
  494. self.addr = 0;
  495. self.len = 0;
  496. }
  497. /// Returns the index of the next descriptor in the chain if the `NEXT` flag is set, or `None`
  498. /// if it is not (and thus this descriptor is the end of the chain).
  499. fn next(&self) -> Option<u16> {
  500. if self.flags.contains(DescFlags::NEXT) {
  501. Some(self.next)
  502. } else {
  503. None
  504. }
  505. }
  506. }
  507. /// Descriptor flags
  508. #[derive(Copy, Clone, Debug, Default, Eq, FromBytes, PartialEq)]
  509. #[repr(transparent)]
  510. struct DescFlags(u16);
  511. bitflags! {
  512. impl DescFlags: u16 {
  513. const NEXT = 1;
  514. const WRITE = 2;
  515. const INDIRECT = 4;
  516. }
  517. }
  518. /// The driver uses the available ring to offer buffers to the device:
  519. /// each ring entry refers to the head of a descriptor chain.
  520. /// It is only written by the driver and read by the device.
  521. #[repr(C)]
  522. #[derive(Debug)]
  523. struct AvailRing<const SIZE: usize> {
  524. flags: u16,
  525. /// A driver MUST NOT decrement the idx.
  526. idx: u16,
  527. ring: [u16; SIZE],
  528. used_event: u16, // unused
  529. }
  530. /// The used ring is where the device returns buffers once it is done with them:
  531. /// it is only written to by the device, and read by the driver.
  532. #[repr(C)]
  533. #[derive(Debug)]
  534. struct UsedRing<const SIZE: usize> {
  535. flags: u16,
  536. idx: u16,
  537. ring: [UsedElem; SIZE],
  538. avail_event: u16, // unused
  539. }
  540. #[repr(C)]
  541. #[derive(Debug)]
  542. struct UsedElem {
  543. id: u32,
  544. len: u32,
  545. }
  546. struct InputOutputIter<'a, 'b> {
  547. inputs: &'a [&'b [u8]],
  548. outputs: &'a mut [&'b mut [u8]],
  549. }
  550. impl<'a, 'b> InputOutputIter<'a, 'b> {
  551. fn new(inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]]) -> Self {
  552. Self { inputs, outputs }
  553. }
  554. }
  555. impl<'a, 'b> Iterator for InputOutputIter<'a, 'b> {
  556. type Item = (NonNull<[u8]>, BufferDirection);
  557. fn next(&mut self) -> Option<Self::Item> {
  558. if let Some(input) = take_first(&mut self.inputs) {
  559. Some(((*input).into(), BufferDirection::DriverToDevice))
  560. } else {
  561. let output = take_first_mut(&mut self.outputs)?;
  562. Some(((*output).into(), BufferDirection::DeviceToDriver))
  563. }
  564. }
  565. }
  566. // TODO: Use `slice::take_first` once it is stable
  567. // (https://github.com/rust-lang/rust/issues/62280).
  568. fn take_first<'a, T>(slice: &mut &'a [T]) -> Option<&'a T> {
  569. let (first, rem) = slice.split_first()?;
  570. *slice = rem;
  571. Some(first)
  572. }
  573. // TODO: Use `slice::take_first_mut` once it is stable
  574. // (https://github.com/rust-lang/rust/issues/62280).
  575. fn take_first_mut<'a, T>(slice: &mut &'a mut [T]) -> Option<&'a mut T> {
  576. let (first, rem) = take(slice).split_first_mut()?;
  577. *slice = rem;
  578. Some(first)
  579. }
  580. /// Simulates the device reading from a VirtIO queue and writing a response back, for use in tests.
  581. ///
  582. /// The fake device always uses descriptors in order.
  583. #[cfg(test)]
  584. pub(crate) fn fake_read_write_queue<const QUEUE_SIZE: usize>(
  585. descriptors: *const [Descriptor; QUEUE_SIZE],
  586. queue_driver_area: *const u8,
  587. queue_device_area: *mut u8,
  588. handler: impl FnOnce(Vec<u8>) -> Vec<u8>,
  589. ) {
  590. use core::{ops::Deref, slice};
  591. let available_ring = queue_driver_area as *const AvailRing<QUEUE_SIZE>;
  592. let used_ring = queue_device_area as *mut UsedRing<QUEUE_SIZE>;
  593. // Safe because the various pointers are properly aligned, dereferenceable, initialised, and
  594. // nothing else accesses them during this block.
  595. unsafe {
  596. // Make sure there is actually at least one descriptor available to read from.
  597. assert_ne!((*available_ring).idx, (*used_ring).idx);
  598. // The fake device always uses descriptors in order, like VIRTIO_F_IN_ORDER, so
  599. // `used_ring.idx` marks the next descriptor we should take from the available ring.
  600. let next_slot = (*used_ring).idx & (QUEUE_SIZE as u16 - 1);
  601. let head_descriptor_index = (*available_ring).ring[next_slot as usize];
  602. let mut descriptor = &(*descriptors)[head_descriptor_index as usize];
  603. // Loop through all input descriptors in the chain, reading data from them.
  604. let mut input = Vec::new();
  605. while !descriptor.flags.contains(DescFlags::WRITE) {
  606. input.extend_from_slice(slice::from_raw_parts(
  607. descriptor.addr as *const u8,
  608. descriptor.len as usize,
  609. ));
  610. if let Some(next) = descriptor.next() {
  611. descriptor = &(*descriptors)[next as usize];
  612. } else {
  613. break;
  614. }
  615. }
  616. let input_length = input.len();
  617. // Let the test handle the request.
  618. let output = handler(input);
  619. // Write the response to the remaining descriptors.
  620. let mut remaining_output = output.deref();
  621. if descriptor.flags.contains(DescFlags::WRITE) {
  622. loop {
  623. assert!(descriptor.flags.contains(DescFlags::WRITE));
  624. let length_to_write = min(remaining_output.len(), descriptor.len as usize);
  625. ptr::copy(
  626. remaining_output.as_ptr(),
  627. descriptor.addr as *mut u8,
  628. length_to_write,
  629. );
  630. remaining_output = &remaining_output[length_to_write..];
  631. if let Some(next) = descriptor.next() {
  632. descriptor = &(*descriptors)[next as usize];
  633. } else {
  634. break;
  635. }
  636. }
  637. }
  638. assert_eq!(remaining_output.len(), 0);
  639. // Mark the buffer as used.
  640. (*used_ring).ring[next_slot as usize].id = head_descriptor_index as u32;
  641. (*used_ring).ring[next_slot as usize].len = (input_length + output.len()) as u32;
  642. (*used_ring).idx += 1;
  643. }
  644. }
  645. #[cfg(test)]
  646. mod tests {
  647. use super::*;
  648. use crate::{
  649. hal::fake::FakeHal,
  650. transport::mmio::{MmioTransport, VirtIOHeader, MODERN_VERSION},
  651. };
  652. use core::ptr::NonNull;
  653. #[test]
  654. fn invalid_queue_size() {
  655. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  656. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  657. // Size not a power of 2.
  658. assert_eq!(
  659. VirtQueue::<FakeHal, 3>::new(&mut transport, 0).unwrap_err(),
  660. Error::InvalidParam
  661. );
  662. }
  663. #[test]
  664. fn queue_too_big() {
  665. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  666. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  667. assert_eq!(
  668. VirtQueue::<FakeHal, 8>::new(&mut transport, 0).unwrap_err(),
  669. Error::InvalidParam
  670. );
  671. }
  672. #[test]
  673. fn queue_already_used() {
  674. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  675. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  676. VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  677. assert_eq!(
  678. VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap_err(),
  679. Error::AlreadyUsed
  680. );
  681. }
  682. #[test]
  683. fn add_empty() {
  684. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  685. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  686. let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  687. assert_eq!(
  688. unsafe { queue.add(&[], &mut []) }.unwrap_err(),
  689. Error::InvalidParam
  690. );
  691. }
  692. #[test]
  693. fn add_too_many() {
  694. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  695. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  696. let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  697. assert_eq!(queue.available_desc(), 4);
  698. assert_eq!(
  699. unsafe { queue.add(&[&[], &[], &[]], &mut [&mut [], &mut []]) }.unwrap_err(),
  700. Error::QueueFull
  701. );
  702. }
  703. #[test]
  704. fn add_buffers() {
  705. let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
  706. let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
  707. let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
  708. assert_eq!(queue.available_desc(), 4);
  709. // Add a buffer chain consisting of two device-readable parts followed by two
  710. // device-writable parts.
  711. let token = unsafe { queue.add(&[&[1, 2], &[3]], &mut [&mut [0, 0], &mut [0]]) }.unwrap();
  712. assert_eq!(queue.available_desc(), 0);
  713. assert!(!queue.can_pop());
  714. // Safe because the various parts of the queue are properly aligned, dereferenceable and
  715. // initialised, and nothing else is accessing them at the same time.
  716. unsafe {
  717. let first_descriptor_index = (*queue.avail.as_ptr()).ring[0];
  718. assert_eq!(first_descriptor_index, token);
  719. assert_eq!(
  720. (*queue.desc.as_ptr())[first_descriptor_index as usize].len,
  721. 2
  722. );
  723. assert_eq!(
  724. (*queue.desc.as_ptr())[first_descriptor_index as usize].flags,
  725. DescFlags::NEXT
  726. );
  727. let second_descriptor_index =
  728. (*queue.desc.as_ptr())[first_descriptor_index as usize].next;
  729. assert_eq!(
  730. (*queue.desc.as_ptr())[second_descriptor_index as usize].len,
  731. 1
  732. );
  733. assert_eq!(
  734. (*queue.desc.as_ptr())[second_descriptor_index as usize].flags,
  735. DescFlags::NEXT
  736. );
  737. let third_descriptor_index =
  738. (*queue.desc.as_ptr())[second_descriptor_index as usize].next;
  739. assert_eq!(
  740. (*queue.desc.as_ptr())[third_descriptor_index as usize].len,
  741. 2
  742. );
  743. assert_eq!(
  744. (*queue.desc.as_ptr())[third_descriptor_index as usize].flags,
  745. DescFlags::NEXT | DescFlags::WRITE
  746. );
  747. let fourth_descriptor_index =
  748. (*queue.desc.as_ptr())[third_descriptor_index as usize].next;
  749. assert_eq!(
  750. (*queue.desc.as_ptr())[fourth_descriptor_index as usize].len,
  751. 1
  752. );
  753. assert_eq!(
  754. (*queue.desc.as_ptr())[fourth_descriptor_index as usize].flags,
  755. DescFlags::WRITE
  756. );
  757. }
  758. }
  759. }