Sfoglia il codice sorgente

Use u16 for queue indices.

This is what the PCI transport uses, and there isn't any need for more.
Using u16 everywhere avoids some casts.
Andrew Walbran 2 anni fa
parent
commit
6d94dc7746
8 ha cambiato i file con 31 aggiunte e 31 eliminazioni
  1. 3 3
      src/console.rs
  2. 4 4
      src/gpu.rs
  3. 2 2
      src/input.rs
  4. 4 4
      src/net.rs
  5. 3 3
      src/queue.rs
  6. 5 5
      src/transport/fake.rs
  7. 7 7
      src/transport/mmio.rs
  8. 3 3
      src/transport/mod.rs

+ 3 - 3
src/console.rs

@@ -6,8 +6,8 @@ use bitflags::*;
 use core::{fmt, hint::spin_loop};
 use log::*;
 
-const QUEUE_RECEIVEQ_PORT_0: usize = 0;
-const QUEUE_TRANSMITQ_PORT_0: usize = 1;
+const QUEUE_RECEIVEQ_PORT_0: u16 = 0;
+const QUEUE_TRANSMITQ_PORT_0: u16 = 1;
 const QUEUE_SIZE: u16 = 2;
 
 /// Virtio console. Only one single port is allowed since ``alloc'' is disabled.
@@ -93,7 +93,7 @@ impl<H: Hal, T: Transport> VirtIOConsole<'_, H, T> {
     pub fn send(&mut self, chr: u8) -> Result<()> {
         let buf: [u8; 1] = [chr];
         self.transmitq.add(&[&buf], &[])?;
-        self.transport.notify(QUEUE_TRANSMITQ_PORT_0 as u32);
+        self.transport.notify(QUEUE_TRANSMITQ_PORT_0);
         while !self.transmitq.can_pop() {
             spin_loop();
         }

+ 4 - 4
src/gpu.rs

@@ -165,7 +165,7 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
         }
         self.control_queue
             .add(&[self.queue_buf_send], &[self.queue_buf_recv])?;
-        self.transport.notify(QUEUE_TRANSMIT as u32);
+        self.transport.notify(QUEUE_TRANSMIT);
         while !self.control_queue.can_pop() {
             spin_loop();
         }
@@ -179,7 +179,7 @@ impl<H: Hal, T: Transport> VirtIOGpu<'_, H, T> {
             (self.queue_buf_send.as_mut_ptr() as *mut Req).write(req);
         }
         self.cursor_queue.add(&[self.queue_buf_send], &[])?;
-        self.transport.notify(QUEUE_CURSOR as u32);
+        self.transport.notify(QUEUE_CURSOR);
         while !self.cursor_queue.can_pop() {
             spin_loop();
         }
@@ -483,8 +483,8 @@ struct UpdateCursor {
     _padding: u32,
 }
 
-const QUEUE_TRANSMIT: usize = 0;
-const QUEUE_CURSOR: usize = 1;
+const QUEUE_TRANSMIT: u16 = 0;
+const QUEUE_CURSOR: u16 = 1;
 
 const SCANOUT_ID: u32 = 0;
 const RESOURCE_ID_FB: u32 = 0xbabe;

+ 2 - 2
src/input.rs

@@ -178,8 +178,8 @@ bitflags! {
     }
 }
 
-const QUEUE_EVENT: usize = 0;
-const QUEUE_STATUS: usize = 1;
+const QUEUE_EVENT: u16 = 0;
+const QUEUE_STATUS: u16 = 1;
 
 // a parameter that can change
 const QUEUE_SIZE: usize = 32;

+ 4 - 4
src/net.rs

@@ -78,7 +78,7 @@ impl<H: Hal, T: Transport> VirtIONet<H, T> {
         let mut header = MaybeUninit::<Header>::uninit();
         let header_buf = unsafe { (*header.as_mut_ptr()).as_buf_mut() };
         self.recv_queue.add(&[], &[header_buf, buf])?;
-        self.transport.notify(QUEUE_RECEIVE as u32);
+        self.transport.notify(QUEUE_RECEIVE);
         while !self.recv_queue.can_pop() {
             spin_loop();
         }
@@ -92,7 +92,7 @@ impl<H: Hal, T: Transport> VirtIONet<H, T> {
     pub fn send(&mut self, buf: &[u8]) -> Result {
         let header = unsafe { MaybeUninit::<Header>::zeroed().assume_init() };
         self.send_queue.add(&[header.as_buf(), buf], &[])?;
-        self.transport.notify(QUEUE_TRANSMIT as u32);
+        self.transport.notify(QUEUE_TRANSMIT);
         while !self.send_queue.can_pop() {
             spin_loop();
         }
@@ -218,5 +218,5 @@ enum GsoType {
     ECN = 0x80,
 }
 
-const QUEUE_RECEIVE: usize = 0;
-const QUEUE_TRANSMIT: usize = 1;
+const QUEUE_RECEIVE: u16 = 0;
+const QUEUE_TRANSMIT: u16 = 1;

+ 3 - 3
src/queue.rs

@@ -39,8 +39,8 @@ pub struct VirtQueue<H: Hal> {
 
 impl<H: Hal> VirtQueue<H> {
     /// Create a new VirtQueue.
-    pub fn new<T: Transport>(transport: &mut T, idx: usize, size: u16) -> Result<Self> {
-        if transport.queue_used(idx as u32) {
+    pub fn new<T: Transport>(transport: &mut T, idx: u16, size: u16) -> Result<Self> {
+        if transport.queue_used(idx) {
             return Err(Error::AlreadyUsed);
         }
         if !size.is_power_of_two() || transport.max_queue_size() < size as u32 {
@@ -51,7 +51,7 @@ impl<H: Hal> VirtQueue<H> {
         let dma = DMA::new(layout.size / PAGE_SIZE)?;
 
         transport.queue_set(
-            idx as u32,
+            idx,
             size as u32,
             dma.paddr(),
             dma.paddr() + layout.avail_offset,

+ 5 - 5
src/transport/fake.rs

@@ -34,7 +34,7 @@ impl Transport for FakeTransport {
         self.max_queue_size
     }
 
-    fn notify(&mut self, queue: u32) {
+    fn notify(&mut self, queue: u16) {
         self.state.lock().unwrap().queues[queue as usize].notified = true;
     }
 
@@ -48,7 +48,7 @@ impl Transport for FakeTransport {
 
     fn queue_set(
         &mut self,
-        queue: u32,
+        queue: u16,
         size: u32,
         descriptors: PhysAddr,
         driver_area: PhysAddr,
@@ -61,7 +61,7 @@ impl Transport for FakeTransport {
         state.queues[queue as usize].device_area = device_area;
     }
 
-    fn queue_used(&mut self, queue: u32) -> bool {
+    fn queue_used(&mut self, queue: u16) -> bool {
         self.state.lock().unwrap().queues[queue as usize].descriptors != 0
     }
 
@@ -92,8 +92,8 @@ impl State {
     /// Simulates the device writing to the given queue.
     ///
     /// The fake device always uses descriptors in order.
-    pub fn write_to_queue(&mut self, queue_size: u16, queue_index: usize, data: &[u8]) {
-        let receive_queue = &self.queues[queue_index];
+    pub fn write_to_queue(&mut self, queue_size: u16, queue_index: u16, data: &[u8]) {
+        let receive_queue = &self.queues[queue_index as usize];
         assert_ne!(receive_queue.descriptors, 0);
         fake_write_to_queue(
             queue_size,

+ 7 - 7
src/transport/mmio.rs

@@ -341,10 +341,10 @@ impl Transport for MmioTransport {
         unsafe { volread!(self.header, queue_num_max) }
     }
 
-    fn notify(&mut self, queue: u32) {
+    fn notify(&mut self, queue: u16) {
         // Safe because self.header points to a valid VirtIO MMIO region.
         unsafe {
-            volwrite!(self.header, queue_notify, queue);
+            volwrite!(self.header, queue_notify, queue.into());
         }
     }
 
@@ -371,7 +371,7 @@ impl Transport for MmioTransport {
 
     fn queue_set(
         &mut self,
-        queue: u32,
+        queue: u16,
         size: u32,
         descriptors: PhysAddr,
         driver_area: PhysAddr,
@@ -395,7 +395,7 @@ impl Transport for MmioTransport {
                 assert_eq!(pfn as usize * PAGE_SIZE, descriptors);
                 // Safe because self.header points to a valid VirtIO MMIO region.
                 unsafe {
-                    volwrite!(self.header, queue_sel, queue);
+                    volwrite!(self.header, queue_sel, queue.into());
                     volwrite!(self.header, queue_num, size);
                     volwrite!(self.header, legacy_queue_align, align);
                     volwrite!(self.header, legacy_queue_pfn, pfn);
@@ -404,7 +404,7 @@ impl Transport for MmioTransport {
             MmioVersion::Modern => {
                 // Safe because self.header points to a valid VirtIO MMIO region.
                 unsafe {
-                    volwrite!(self.header, queue_sel, queue);
+                    volwrite!(self.header, queue_sel, queue.into());
                     volwrite!(self.header, queue_num, size);
                     volwrite!(self.header, queue_desc_low, descriptors as u32);
                     volwrite!(self.header, queue_desc_high, (descriptors >> 32) as u32);
@@ -418,10 +418,10 @@ impl Transport for MmioTransport {
         }
     }
 
-    fn queue_used(&mut self, queue: u32) -> bool {
+    fn queue_used(&mut self, queue: u16) -> bool {
         // Safe because self.header points to a valid VirtIO MMIO region.
         unsafe {
-            volwrite!(self.header, queue_sel, queue);
+            volwrite!(self.header, queue_sel, queue.into());
             match self.version {
                 MmioVersion::Legacy => volread!(self.header, legacy_queue_pfn) != 0,
                 MmioVersion::Modern => volread!(self.header, queue_ready) != 0,

+ 3 - 3
src/transport/mod.rs

@@ -22,7 +22,7 @@ pub trait Transport {
     fn max_queue_size(&self) -> u32;
 
     /// Notifies the given queue on the device.
-    fn notify(&mut self, queue: u32);
+    fn notify(&mut self, queue: u16);
 
     /// Sets the device status.
     fn set_status(&mut self, status: DeviceStatus);
@@ -33,7 +33,7 @@ pub trait Transport {
     /// Sets up the given queue.
     fn queue_set(
         &mut self,
-        queue: u32,
+        queue: u16,
         size: u32,
         descriptors: PhysAddr,
         driver_area: PhysAddr,
@@ -41,7 +41,7 @@ pub trait Transport {
     );
 
     /// Returns whether the queue is in use, i.e. has a nonzero PFN or is marked as ready.
-    fn queue_used(&mut self, queue: u32) -> bool;
+    fn queue_used(&mut self, queue: u16) -> bool;
 
     /// Acknowledges an interrupt.
     ///