dev_raw.rs 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. use super::{Config, EthernetAddress, Features, VirtioNetHdr};
  2. use super::{MIN_BUFFER_LEN, NET_HDR_SIZE, QUEUE_RECEIVE, QUEUE_TRANSMIT, SUPPORTED_FEATURES};
  3. use crate::hal::Hal;
  4. use crate::queue::VirtQueue;
  5. use crate::transport::Transport;
  6. use crate::volatile::volread;
  7. use crate::{Error, Result};
  8. use log::{debug, info, warn};
  9. use zerocopy::AsBytes;
  10. /// Raw driver for a VirtIO block device.
  11. ///
  12. /// This is a raw version of the VirtIONet driver. It provides non-blocking
  13. /// methods for transmitting and receiving raw slices, without the buffer
  14. /// management. For more higher-level fucntions such as receive buffer backing,
  15. /// see [`VirtIONet`].
  16. ///
  17. /// [`VirtIONet`]: super::VirtIONet
  18. pub struct VirtIONetRaw<H: Hal, T: Transport, const QUEUE_SIZE: usize> {
  19. transport: T,
  20. mac: EthernetAddress,
  21. recv_queue: VirtQueue<H, QUEUE_SIZE>,
  22. send_queue: VirtQueue<H, QUEUE_SIZE>,
  23. }
  24. impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONetRaw<H, T, QUEUE_SIZE> {
  25. /// Create a new VirtIO-Net driver.
  26. pub fn new(mut transport: T) -> Result<Self> {
  27. let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
  28. info!("negotiated_features {:?}", negotiated_features);
  29. // read configuration space
  30. let config = transport.config_space::<Config>()?;
  31. let mac;
  32. // Safe because config points to a valid MMIO region for the config space.
  33. unsafe {
  34. mac = volread!(config, mac);
  35. debug!(
  36. "Got MAC={:02x?}, status={:?}",
  37. mac,
  38. volread!(config, status)
  39. );
  40. }
  41. let send_queue = VirtQueue::new(
  42. &mut transport,
  43. QUEUE_TRANSMIT,
  44. false,
  45. negotiated_features.contains(Features::RING_EVENT_IDX),
  46. )?;
  47. let recv_queue = VirtQueue::new(
  48. &mut transport,
  49. QUEUE_RECEIVE,
  50. false,
  51. negotiated_features.contains(Features::RING_EVENT_IDX),
  52. )?;
  53. transport.finish_init();
  54. Ok(VirtIONetRaw {
  55. transport,
  56. mac,
  57. recv_queue,
  58. send_queue,
  59. })
  60. }
  61. /// Acknowledge interrupt.
  62. pub fn ack_interrupt(&mut self) -> bool {
  63. self.transport.ack_interrupt()
  64. }
  65. /// Disable interrupts.
  66. pub fn disable_interrupts(&mut self) {
  67. self.send_queue.set_dev_notify(false);
  68. self.recv_queue.set_dev_notify(false);
  69. }
  70. /// Enable interrupts.
  71. pub fn enable_interrupts(&mut self) {
  72. self.send_queue.set_dev_notify(true);
  73. self.recv_queue.set_dev_notify(true);
  74. }
  75. /// Get MAC address.
  76. pub fn mac_address(&self) -> EthernetAddress {
  77. self.mac
  78. }
  79. /// Whether can send packet.
  80. pub fn can_send(&self) -> bool {
  81. self.send_queue.available_desc() >= 2
  82. }
  83. /// Whether the length of the receive buffer is valid.
  84. fn check_rx_buf_len(rx_buf: &[u8]) -> Result<()> {
  85. if rx_buf.len() < MIN_BUFFER_LEN {
  86. warn!("Receive buffer len {} is too small", rx_buf.len());
  87. Err(Error::InvalidParam)
  88. } else {
  89. Ok(())
  90. }
  91. }
  92. /// Whether the length of the transmit buffer is valid.
  93. fn check_tx_buf_len(tx_buf: &[u8]) -> Result<()> {
  94. if tx_buf.len() < NET_HDR_SIZE {
  95. warn!("Transmit buffer len {} is too small", tx_buf.len());
  96. Err(Error::InvalidParam)
  97. } else {
  98. Ok(())
  99. }
  100. }
  101. /// Fill the header of the `buffer` with [`VirtioNetHdr`].
  102. ///
  103. /// If the `buffer` is not large enough, it returns [`Error::InvalidParam`].
  104. pub fn fill_buffer_header(&self, buffer: &mut [u8]) -> Result<usize> {
  105. if buffer.len() < NET_HDR_SIZE {
  106. return Err(Error::InvalidParam);
  107. }
  108. let header = VirtioNetHdr::default();
  109. buffer[..NET_HDR_SIZE].copy_from_slice(header.as_bytes());
  110. Ok(NET_HDR_SIZE)
  111. }
  112. /// Submits a request to transmit a buffer immediately without waiting for
  113. /// the transmission to complete.
  114. ///
  115. /// It will submit request to the VirtIO net device and return a token
  116. /// identifying the position of the first descriptor in the chain. If there
  117. /// are not enough descriptors to allocate, then it returns
  118. /// [`Error::QueueFull`].
  119. ///
  120. /// The caller needs to fill the `tx_buf` with a header by calling
  121. /// [`fill_buffer_header`] before transmission. Then it calls [`poll_transmit`]
  122. /// with the returned token to check whether the device has finished handling
  123. /// the request. Once it has, the caller must call [`transmit_complete`] with
  124. /// the same buffer before reading the result (transmitted length).
  125. ///
  126. /// # Safety
  127. ///
  128. /// `tx_buf` is still borrowed by the underlying VirtIO net device even after
  129. /// this method returns. Thus, it is the caller's responsibility to guarantee
  130. /// that they are not accessed before the request is completed in order to
  131. /// avoid data races.
  132. ///
  133. /// [`fill_buffer_header`]: Self::fill_buffer_header
  134. /// [`poll_transmit`]: Self::poll_transmit
  135. /// [`transmit_complete`]: Self::transmit_complete
  136. pub unsafe fn transmit_begin(&mut self, tx_buf: &[u8]) -> Result<u16> {
  137. Self::check_tx_buf_len(tx_buf)?;
  138. let token = self.send_queue.add(&[tx_buf], &mut [])?;
  139. if self.send_queue.should_notify() {
  140. self.transport.notify(QUEUE_TRANSMIT);
  141. }
  142. Ok(token)
  143. }
  144. /// Fetches the token of the next completed transmission request from the
  145. /// used ring and returns it, without removing it from the used ring. If
  146. /// there are no pending completed requests it returns [`None`].
  147. pub fn poll_transmit(&mut self) -> Option<u16> {
  148. self.send_queue.peek_used()
  149. }
  150. /// Completes a transmission operation which was started by [`transmit_begin`].
  151. /// Returns number of bytes transmitted.
  152. ///
  153. /// # Safety
  154. ///
  155. /// The same buffer must be passed in again as was passed to
  156. /// [`transmit_begin`] when it returned the token.
  157. ///
  158. /// [`transmit_begin`]: Self::transmit_begin
  159. pub unsafe fn transmit_complete(&mut self, token: u16, tx_buf: &[u8]) -> Result<usize> {
  160. let len = self.send_queue.pop_used(token, &[tx_buf], &mut [])?;
  161. Ok(len as usize)
  162. }
  163. /// Submits a request to receive a buffer immediately without waiting for
  164. /// the reception to complete.
  165. ///
  166. /// It will submit request to the VirtIO net device and return a token
  167. /// identifying the position of the first descriptor in the chain. If there
  168. /// are not enough descriptors to allocate, then it returns
  169. /// [`Error::QueueFull`].
  170. ///
  171. /// The caller can then call [`poll_receive`] with the returned token to
  172. /// check whether the device has finished handling the request. Once it has,
  173. /// the caller must call [`receive_complete`] with the same buffer before
  174. /// reading the response.
  175. ///
  176. /// # Safety
  177. ///
  178. /// `rx_buf` is still borrowed by the underlying VirtIO net device even after
  179. /// this method returns. Thus, it is the caller's responsibility to guarantee
  180. /// that they are not accessed before the request is completed in order to
  181. /// avoid data races.
  182. ///
  183. /// [`poll_receive`]: Self::poll_receive
  184. /// [`receive_complete`]: Self::receive_complete
  185. pub unsafe fn receive_begin(&mut self, rx_buf: &mut [u8]) -> Result<u16> {
  186. Self::check_rx_buf_len(rx_buf)?;
  187. let token = self.recv_queue.add(&[], &mut [rx_buf])?;
  188. if self.recv_queue.should_notify() {
  189. self.transport.notify(QUEUE_RECEIVE);
  190. }
  191. Ok(token)
  192. }
  193. /// Fetches the token of the next completed reception request from the
  194. /// used ring and returns it, without removing it from the used ring. If
  195. /// there are no pending completed requests it returns [`None`].
  196. pub fn poll_receive(&self) -> Option<u16> {
  197. self.recv_queue.peek_used()
  198. }
  199. /// Completes a transmission operation which was started by [`receive_begin`].
  200. ///
  201. /// After completion, the `rx_buf` will contain a header followed by the
  202. /// received packet. It returns the length of the header and the length of
  203. /// the packet.
  204. ///
  205. /// # Safety
  206. ///
  207. /// The same buffer must be passed in again as was passed to
  208. /// [`receive_begin`] when it returned the token.
  209. ///
  210. /// [`receive_begin`]: Self::receive_begin
  211. pub unsafe fn receive_complete(
  212. &mut self,
  213. token: u16,
  214. rx_buf: &mut [u8],
  215. ) -> Result<(usize, usize)> {
  216. let len = self.recv_queue.pop_used(token, &[], &mut [rx_buf])? as usize;
  217. let packet_len = len.checked_sub(NET_HDR_SIZE).ok_or(Error::IoError)?;
  218. Ok((NET_HDR_SIZE, packet_len))
  219. }
  220. /// Sends a packet to the network, and blocks until the request completed.
  221. pub fn send(&mut self, tx_buf: &[u8]) -> Result {
  222. let header = VirtioNetHdr::default();
  223. if tx_buf.is_empty() {
  224. // Special case sending an empty packet, to avoid adding an empty buffer to the
  225. // virtqueue.
  226. self.send_queue.add_notify_wait_pop(
  227. &[header.as_bytes()],
  228. &mut [],
  229. &mut self.transport,
  230. )?;
  231. } else {
  232. self.send_queue.add_notify_wait_pop(
  233. &[header.as_bytes(), tx_buf],
  234. &mut [],
  235. &mut self.transport,
  236. )?;
  237. }
  238. Ok(())
  239. }
  240. /// Blocks and waits for a packet to be received.
  241. ///
  242. /// After completion, the `rx_buf` will contain a header followed by the
  243. /// received packet. It returns the length of the header and the length of
  244. /// the packet.
  245. pub fn receive_wait(&mut self, rx_buf: &mut [u8]) -> Result<(usize, usize)> {
  246. let token = unsafe { self.receive_begin(rx_buf)? };
  247. while self.poll_receive().is_none() {
  248. core::hint::spin_loop();
  249. }
  250. unsafe { self.receive_complete(token, rx_buf) }
  251. }
  252. }
  253. impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> Drop for VirtIONetRaw<H, T, QUEUE_SIZE> {
  254. fn drop(&mut self) {
  255. // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
  256. // after they have been freed.
  257. self.transport.queue_unset(QUEUE_RECEIVE);
  258. self.transport.queue_unset(QUEUE_TRANSMIT);
  259. }
  260. }