net.rs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. //! Driver for VirtIO network devices.
  2. use crate::hal::Hal;
  3. use crate::queue::VirtQueue;
  4. use crate::transport::Transport;
  5. use crate::volatile::{volread, ReadOnly};
  6. use crate::{Error, Result};
  7. use alloc::{vec, vec::Vec};
  8. use bitflags::bitflags;
  9. use core::mem::size_of;
  10. use log::{debug, info, warn};
  11. use zerocopy::{AsBytes, FromBytes};
  12. const MAX_BUFFER_LEN: usize = 65535;
  13. const MIN_BUFFER_LEN: usize = 1526;
  14. const NET_HDR_SIZE: usize = size_of::<VirtioNetHdr>();
  15. /// A buffer used for transmitting.
  16. pub struct TxBuffer(Vec<u8>);
  17. /// A buffer used for receiving.
  18. pub struct RxBuffer {
  19. buf: Vec<usize>, // for alignment
  20. packet_len: usize,
  21. idx: usize,
  22. }
  23. impl TxBuffer {
  24. /// Constructs the buffer from the given slice.
  25. pub fn from(buf: &[u8]) -> Self {
  26. Self(Vec::from(buf))
  27. }
  28. /// Returns the network packet length.
  29. pub fn packet_len(&self) -> usize {
  30. self.0.len()
  31. }
  32. /// Returns the network packet as a slice.
  33. pub fn packet(&self) -> &[u8] {
  34. self.0.as_slice()
  35. }
  36. /// Returns the network packet as a mutable slice.
  37. pub fn packet_mut(&mut self) -> &mut [u8] {
  38. self.0.as_mut_slice()
  39. }
  40. }
  41. impl RxBuffer {
  42. /// Allocates a new buffer with length `buf_len`.
  43. fn new(idx: usize, buf_len: usize) -> Self {
  44. Self {
  45. buf: vec![0; buf_len / size_of::<usize>()],
  46. packet_len: 0,
  47. idx,
  48. }
  49. }
  50. /// Set the network packet length.
  51. fn set_packet_len(&mut self, packet_len: usize) {
  52. self.packet_len = packet_len
  53. }
  54. /// Returns the network packet length (witout header).
  55. pub const fn packet_len(&self) -> usize {
  56. self.packet_len
  57. }
  58. /// Returns all data in the buffer, including both the header and the packet.
  59. pub fn as_bytes(&self) -> &[u8] {
  60. self.buf.as_bytes()
  61. }
  62. /// Returns all data in the buffer with the mutable reference,
  63. /// including both the header and the packet.
  64. pub fn as_bytes_mut(&mut self) -> &mut [u8] {
  65. self.buf.as_bytes_mut()
  66. }
  67. /// Returns the reference of the header.
  68. pub fn header(&self) -> &VirtioNetHdr {
  69. unsafe { &*(self.buf.as_ptr() as *const VirtioNetHdr) }
  70. }
  71. /// Returns the network packet as a slice.
  72. pub fn packet(&self) -> &[u8] {
  73. &self.buf.as_bytes()[NET_HDR_SIZE..NET_HDR_SIZE + self.packet_len]
  74. }
  75. /// Returns the network packet as a mutable slice.
  76. pub fn packet_mut(&mut self) -> &mut [u8] {
  77. &mut self.buf.as_bytes_mut()[NET_HDR_SIZE..NET_HDR_SIZE + self.packet_len]
  78. }
  79. }
  80. /// The virtio network device is a virtual ethernet card.
  81. ///
  82. /// It has enhanced rapidly and demonstrates clearly how support for new
  83. /// features are added to an existing device.
  84. /// Empty buffers are placed in one virtqueue for receiving packets, and
  85. /// outgoing packets are enqueued into another for transmission in that order.
  86. /// A third command queue is used to control advanced filtering features.
  87. pub struct VirtIONet<H: Hal, T: Transport, const QUEUE_SIZE: usize> {
  88. transport: T,
  89. mac: EthernetAddress,
  90. recv_queue: VirtQueue<H, QUEUE_SIZE>,
  91. send_queue: VirtQueue<H, QUEUE_SIZE>,
  92. rx_buffers: [Option<RxBuffer>; QUEUE_SIZE],
  93. }
  94. impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONet<H, T, QUEUE_SIZE> {
  95. /// Create a new VirtIO-Net driver.
  96. pub fn new(mut transport: T, buf_len: usize) -> Result<Self> {
  97. transport.begin_init(|features| {
  98. let features = Features::from_bits_truncate(features);
  99. info!("Device features {:?}", features);
  100. let supported_features = Features::MAC | Features::STATUS;
  101. (features & supported_features).bits()
  102. });
  103. // read configuration space
  104. let config = transport.config_space::<Config>()?;
  105. let mac;
  106. // Safe because config points to a valid MMIO region for the config space.
  107. unsafe {
  108. mac = volread!(config, mac);
  109. debug!(
  110. "Got MAC={:02x?}, status={:?}",
  111. mac,
  112. volread!(config, status)
  113. );
  114. }
  115. if !(MIN_BUFFER_LEN..=MAX_BUFFER_LEN).contains(&buf_len) {
  116. warn!(
  117. "Receive buffer len {} is not in range [{}, {}]",
  118. buf_len, MIN_BUFFER_LEN, MAX_BUFFER_LEN
  119. );
  120. return Err(Error::InvalidParam);
  121. }
  122. let send_queue = VirtQueue::new(&mut transport, QUEUE_TRANSMIT)?;
  123. let mut recv_queue = VirtQueue::new(&mut transport, QUEUE_RECEIVE)?;
  124. const NONE_BUF: Option<RxBuffer> = None;
  125. let mut rx_buffers = [NONE_BUF; QUEUE_SIZE];
  126. for (i, rx_buf_place) in rx_buffers.iter_mut().enumerate() {
  127. let mut rx_buf = RxBuffer::new(i, buf_len);
  128. // Safe because the buffer lives as long as the queue.
  129. let token = unsafe { recv_queue.add(&[], &mut [rx_buf.as_bytes_mut()])? };
  130. assert_eq!(token, i as u16);
  131. *rx_buf_place = Some(rx_buf);
  132. }
  133. if recv_queue.should_notify() {
  134. transport.notify(QUEUE_RECEIVE);
  135. }
  136. transport.finish_init();
  137. Ok(VirtIONet {
  138. transport,
  139. mac,
  140. recv_queue,
  141. send_queue,
  142. rx_buffers,
  143. })
  144. }
  145. /// Acknowledge interrupt.
  146. pub fn ack_interrupt(&mut self) -> bool {
  147. self.transport.ack_interrupt()
  148. }
  149. /// Get MAC address.
  150. pub fn mac_address(&self) -> EthernetAddress {
  151. self.mac
  152. }
  153. /// Whether can send packet.
  154. pub fn can_send(&self) -> bool {
  155. self.send_queue.available_desc() >= 2
  156. }
  157. /// Whether can receive packet.
  158. pub fn can_recv(&self) -> bool {
  159. self.recv_queue.can_pop()
  160. }
  161. /// Receives a [`RxBuffer`] from network. If currently no data, returns an
  162. /// error with type [`Error::NotReady`].
  163. ///
  164. /// It will try to pop a buffer that completed data reception in the
  165. /// NIC queue.
  166. pub fn receive(&mut self) -> Result<RxBuffer> {
  167. if let Some(token) = self.recv_queue.peek_used() {
  168. let mut rx_buf = self.rx_buffers[token as usize]
  169. .take()
  170. .ok_or(Error::WrongToken)?;
  171. if token as usize != rx_buf.idx {
  172. return Err(Error::WrongToken);
  173. }
  174. // Safe because `token` == `rx_buf.idx`, we are passing the same
  175. // buffer as we passed to `VirtQueue::add` and it is still valid.
  176. let len = unsafe {
  177. self.recv_queue
  178. .pop_used(token, &[], &mut [rx_buf.as_bytes_mut()])?
  179. };
  180. if (len as usize) < NET_HDR_SIZE {
  181. Err(Error::IoError)
  182. } else {
  183. rx_buf.set_packet_len(len as usize - NET_HDR_SIZE);
  184. Ok(rx_buf)
  185. }
  186. } else {
  187. Err(Error::NotReady)
  188. }
  189. }
  190. /// Gives back the ownership of `rx_buf`, and recycles it for next use.
  191. ///
  192. /// It will add the buffer back to the NIC queue.
  193. pub fn recycle_rx_buffer(&mut self, mut rx_buf: RxBuffer) -> Result {
  194. let old_token = rx_buf.idx;
  195. // Safe because we take the ownership of `rx_buf` back to `rx_buffers`,
  196. // it lives as long as the queue.
  197. let new_token = unsafe { self.recv_queue.add(&[], &mut [rx_buf.as_bytes_mut()]) }?;
  198. if new_token as usize != old_token {
  199. return Err(Error::WrongToken);
  200. }
  201. self.rx_buffers[old_token] = Some(rx_buf);
  202. if self.recv_queue.should_notify() {
  203. self.transport.notify(QUEUE_RECEIVE);
  204. }
  205. Ok(())
  206. }
  207. /// Allocate a new buffer for transmitting.
  208. pub fn new_tx_buffer(&self, buf_len: usize) -> TxBuffer {
  209. TxBuffer(vec![0; buf_len])
  210. }
  211. /// Sends a [`TxBuffer`] to the network, and blocks until the request
  212. /// completed.
  213. pub fn send(&mut self, tx_buf: TxBuffer) -> Result {
  214. let header = VirtioNetHdr::default();
  215. self.send_queue.add_notify_wait_pop(
  216. &[header.as_bytes(), tx_buf.packet()],
  217. &mut [],
  218. &mut self.transport,
  219. )?;
  220. Ok(())
  221. }
  222. }
  223. impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> Drop for VirtIONet<H, T, QUEUE_SIZE> {
  224. fn drop(&mut self) {
  225. // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
  226. // after they have been freed.
  227. self.transport.queue_unset(QUEUE_RECEIVE);
  228. self.transport.queue_unset(QUEUE_TRANSMIT);
  229. }
  230. }
  231. bitflags! {
  232. struct Features: u64 {
  233. /// Device handles packets with partial checksum.
  234. /// This "checksum offload" is a common feature on modern network cards.
  235. const CSUM = 1 << 0;
  236. /// Driver handles packets with partial checksum.
  237. const GUEST_CSUM = 1 << 1;
  238. /// Control channel offloads reconfiguration support.
  239. const CTRL_GUEST_OFFLOADS = 1 << 2;
  240. /// Device maximum MTU reporting is supported.
  241. ///
  242. /// If offered by the device, device advises driver about the value of
  243. /// its maximum MTU. If negotiated, the driver uses mtu as the maximum
  244. /// MTU value.
  245. const MTU = 1 << 3;
  246. /// Device has given MAC address.
  247. const MAC = 1 << 5;
  248. /// Device handles packets with any GSO type. (legacy)
  249. const GSO = 1 << 6;
  250. /// Driver can receive TSOv4.
  251. const GUEST_TSO4 = 1 << 7;
  252. /// Driver can receive TSOv6.
  253. const GUEST_TSO6 = 1 << 8;
  254. /// Driver can receive TSO with ECN.
  255. const GUEST_ECN = 1 << 9;
  256. /// Driver can receive UFO.
  257. const GUEST_UFO = 1 << 10;
  258. /// Device can receive TSOv4.
  259. const HOST_TSO4 = 1 << 11;
  260. /// Device can receive TSOv6.
  261. const HOST_TSO6 = 1 << 12;
  262. /// Device can receive TSO with ECN.
  263. const HOST_ECN = 1 << 13;
  264. /// Device can receive UFO.
  265. const HOST_UFO = 1 << 14;
  266. /// Driver can merge receive buffers.
  267. const MRG_RXBUF = 1 << 15;
  268. /// Configuration status field is available.
  269. const STATUS = 1 << 16;
  270. /// Control channel is available.
  271. const CTRL_VQ = 1 << 17;
  272. /// Control channel RX mode support.
  273. const CTRL_RX = 1 << 18;
  274. /// Control channel VLAN filtering.
  275. const CTRL_VLAN = 1 << 19;
  276. ///
  277. const CTRL_RX_EXTRA = 1 << 20;
  278. /// Driver can send gratuitous packets.
  279. const GUEST_ANNOUNCE = 1 << 21;
  280. /// Device supports multiqueue with automatic receive steering.
  281. const MQ = 1 << 22;
  282. /// Set MAC address through control channel.
  283. const CTL_MAC_ADDR = 1 << 23;
  284. // device independent
  285. const RING_INDIRECT_DESC = 1 << 28;
  286. const RING_EVENT_IDX = 1 << 29;
  287. const VERSION_1 = 1 << 32; // legacy
  288. }
  289. }
  290. bitflags! {
  291. struct Status: u16 {
  292. const LINK_UP = 1;
  293. const ANNOUNCE = 2;
  294. }
  295. }
  296. bitflags! {
  297. struct InterruptStatus : u32 {
  298. const USED_RING_UPDATE = 1 << 0;
  299. const CONFIGURATION_CHANGE = 1 << 1;
  300. }
  301. }
  302. #[repr(C)]
  303. struct Config {
  304. mac: ReadOnly<EthernetAddress>,
  305. status: ReadOnly<Status>,
  306. max_virtqueue_pairs: ReadOnly<u16>,
  307. mtu: ReadOnly<u16>,
  308. }
  309. type EthernetAddress = [u8; 6];
  310. /// VirtIO 5.1.6 Device Operation:
  311. ///
  312. /// Packets are transmitted by placing them in the transmitq1. . .transmitqN,
  313. /// and buffers for incoming packets are placed in the receiveq1. . .receiveqN.
  314. /// In each case, the packet itself is preceded by a header.
  315. #[repr(C)]
  316. #[derive(AsBytes, Debug, Default, FromBytes)]
  317. pub struct VirtioNetHdr {
  318. flags: Flags,
  319. gso_type: GsoType,
  320. hdr_len: u16, // cannot rely on this
  321. gso_size: u16,
  322. csum_start: u16,
  323. csum_offset: u16,
  324. // num_buffers: u16, // only available when the feature MRG_RXBUF is negotiated.
  325. // payload starts from here
  326. }
  327. bitflags! {
  328. #[repr(transparent)]
  329. #[derive(AsBytes, Default, FromBytes)]
  330. struct Flags: u8 {
  331. const NEEDS_CSUM = 1;
  332. const DATA_VALID = 2;
  333. const RSC_INFO = 4;
  334. }
  335. }
  336. #[repr(transparent)]
  337. #[derive(AsBytes, Debug, Copy, Clone, Default, Eq, FromBytes, PartialEq)]
  338. struct GsoType(u8);
  339. impl GsoType {
  340. const NONE: GsoType = GsoType(0);
  341. const TCPV4: GsoType = GsoType(1);
  342. const UDP: GsoType = GsoType(3);
  343. const TCPV6: GsoType = GsoType(4);
  344. const ECN: GsoType = GsoType(0x80);
  345. }
  346. const QUEUE_RECEIVE: u16 = 0;
  347. const QUEUE_TRANSMIT: u16 = 1;