浏览代码

Merge branch YdrMaster-main into main

luojia65 2 年之前
父节点
当前提交
ef6550a9e7
共有 7 个文件被更改,包括 361 次插入3 次删除
  1. 2 0
      CHANGELOG.md
  2. 10 0
      Cargo.toml
  3. 244 0
      src/ecall.rs
  4. 6 3
      src/hart_mask.rs
  5. 1 0
      src/lib.rs
  6. 1 0
      src/reset.rs
  7. 97 0
      src/util.rs

+ 2 - 0
CHANGELOG.md

@@ -8,8 +8,10 @@ to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 ## Unreleased
 
 ### Added
+- Feature `legacy` to gate SBI legacy extension
 
 ### Modified
+- Update depenency embedded-hal to 1.0.0-alpha.8
 
 ### Fixed
 

+ 10 - 0
Cargo.toml

@@ -16,16 +16,26 @@ categories = ["os", "embedded", "hardware-support", "no-std"]
 edition = "2021"
 
 [dependencies]
+<<<<<<< HEAD
 riscv = "0.8"
 sbi-spec = { git = "https://github.com/rustsbi/sbi-spec.git", rev = "9d728bb" }
 # The following two dependencies are used to support legacy console feature
 embedded-hal = { version = "1.0.0-alpha.8", optional = true }
 nb = { version = "1.0", optional = true }
+=======
+embedded-hal = { version = "1.0.0-alpha.8", optional = true }
+nb = "1.0"
+riscv = "0.7"
+>>>>>>> a577447 (gate SBI legacy extension under `legacy` feature)
 
 [features]
 default = []
 # Support legacy extension; this feature is not included by default.
+<<<<<<< HEAD
 legacy = ["embedded-hal", "nb"]
+=======
+legacy = ["embedded-hal"]
+>>>>>>> a577447 (gate SBI legacy extension under `legacy` feature)
 # Dynamic pointer widths on SBI implementations; useful for developing hypervisors
 guest = []
 

+ 244 - 0
src/ecall.rs

@@ -0,0 +1,244 @@
+//! 这个模块将会处理所有的SBI调用陷入
+// 你应该在riscv-rt或其它中断处理函数里,调用这个模块的内容
+
+mod base;
+mod hsm;
+mod ipi;
+#[cfg(feature = "legacy")]
+mod legacy;
+mod pmu;
+mod rfence;
+mod srst;
+mod timer;
+
+pub const EXTENSION_BASE: u32 = 0x10;
+pub const EXTENSION_TIMER: u32 = 0x54494D45;
+pub const EXTENSION_IPI: u32 = 0x735049;
+pub const EXTENSION_RFENCE: u32 = 0x52464E43;
+pub const EXTENSION_HSM: u32 = 0x48534D;
+pub const EXTENSION_SRST: u32 = 0x53525354;
+pub const EXTENSION_PMU: u32 = 0x504D55;
+
+#[cfg(feature = "legacy")]
+const LEGACY_SET_TIMER: u32 = 0x0;
+#[cfg(feature = "legacy")]
+const LEGACY_CONSOLE_PUTCHAR: u32 = 0x01;
+#[cfg(feature = "legacy")]
+const LEGACY_CONSOLE_GETCHAR: u32 = 0x02;
+// const LEGACY_CLEAR_IPI: u32 = 0x03;
+#[cfg(feature = "legacy")]
+const LEGACY_SEND_IPI: u32 = 0x04;
+#[cfg(feature = "legacy")]
+// const LEGACY_REMOTE_FENCE_I: u32 = 0x05;
+// const LEGACY_REMOTE_SFENCE_VMA: u32 = 0x06;
+// const LEGACY_REMOTE_SFENCE_VMA_ASID: u32 = 0x07;
+#[cfg(feature = "legacy")]
+const LEGACY_SHUTDOWN: u32 = 0x08;
+
+/// Supervisor environment call handler function
+///
+/// This function is used by platform runtime to handle environment call `ecall` instruction.
+///
+/// You should call this function in your runtime's exception handler.
+/// If the incoming exception is caused by supervisor `ecall`,
+/// call this function with parameters extracted from trap frame.
+/// After this function returns, store the return value into `a0` and `a1` parameters.
+///
+/// This function also adapts to the legacy functions.
+/// If the supervisor called any of legacy function, the `a0` and `a1` parameter
+/// is transferred to error and value field of `SbiRet` respectively.
+/// In this case, implementations should always store the result into `a0` and `a1` in
+/// any environment call functions including legacy functions.
+///
+/// # Example
+///
+/// A typical usage:
+///
+/// ```no_run
+/// # use riscv::register::{mepc, mcause::{self, Trap, Exception}};
+/// # struct TrapFrame { a0: usize, a1: usize, a2: usize, a3: usize,
+/// # a4: usize, a5: usize, a6: usize, a7: usize }
+/// extern "C" fn rust_handle_exception(ctx: &mut TrapFrame) {
+///     if mcause::read().cause() == Trap::Exception(Exception::SupervisorEnvCall) {
+///         let params = [ctx.a0, ctx.a1, ctx.a2, ctx.a3, ctx.a4, ctx.a5];
+///         let ans = rustsbi::ecall(ctx.a7, ctx.a6, params);
+///         ctx.a0 = ans.error;
+///         ctx.a1 = ans.value;
+///         mepc::write(mepc::read().wrapping_add(4));
+///     }
+///     // other conditions..
+/// }
+/// ```
+///
+/// Do not forget to advance `mepc` by 4 after an ecall is handled.
+/// This skips the `ecall` instruction itself which is 4-byte long in all conditions.
+#[inline]
+pub fn handle_ecall(extension: usize, function: usize, param: [usize; 6]) -> SbiRet {
+    // RISC-V SBI requires SBI extension IDs (EIDs) and SBI function IDs (FIDs)
+    // are encoded as signed 32-bit integers
+    #[cfg(not(target_pointer_width = "32"))]
+    if extension > u32::MAX as usize || function > u32::MAX as usize {
+        return SbiRet::not_supported();
+    }
+    let (extension, function) = (extension as u32, function as u32);
+    // process actual environment calls
+    match extension {
+        EXTENSION_RFENCE => {
+            rfence::handle_ecall_rfence(function, param[0], param[1], param[2], param[3], param[4])
+        }
+        EXTENSION_TIMER => match () {
+            #[cfg(target_pointer_width = "64")]
+            () => timer::handle_ecall_timer_64(function, param[0]),
+            #[cfg(target_pointer_width = "32")]
+            () => timer::handle_ecall_timer_32(function, param[0], param[1]),
+        },
+        EXTENSION_IPI => ipi::handle_ecall_ipi(function, param[0], param[1]),
+        EXTENSION_BASE => base::handle_ecall_base(function, param[0]),
+        EXTENSION_HSM => hsm::handle_ecall_hsm(function, param[0], param[1], param[2]),
+        EXTENSION_SRST => srst::handle_ecall_srst(function, param[0], param[1]),
+        EXTENSION_PMU => match () {
+            #[cfg(target_pointer_width = "64")]
+            () => {
+                pmu::handle_ecall_pmu_64(function, param[0], param[1], param[2], param[3], param[4])
+            }
+            #[cfg(target_pointer_width = "32")]
+            () => pmu::handle_ecall_pmu_32(
+                function, param[0], param[1], param[2], param[3], param[4], param[5],
+            ),
+        },
+        #[cfg(feature = "legacy")]
+        LEGACY_SET_TIMER => match () {
+            #[cfg(target_pointer_width = "64")]
+            () => legacy::set_timer_64(param[0]),
+            #[cfg(target_pointer_width = "32")]
+            () => legacy::set_timer_32(param[0], param[1]),
+        }
+        .legacy_void(param[0], param[1]),
+        #[cfg(feature = "legacy")]
+        LEGACY_CONSOLE_PUTCHAR => legacy::console_putchar(param[0]).legacy_void(param[0], param[1]),
+        #[cfg(feature = "legacy")]
+        LEGACY_CONSOLE_GETCHAR => legacy::console_getchar().legacy_return(param[1]),
+        #[cfg(feature = "legacy")]
+        LEGACY_SEND_IPI => legacy::send_ipi(param[0]).legacy_void(param[0], param[1]),
+        #[cfg(feature = "legacy")]
+        LEGACY_SHUTDOWN => legacy::shutdown().legacy_void(param[0], param[1]),
+        _ => SbiRet::not_supported(),
+    }
+}
+
+/// Call result returned by SBI
+///
+/// After `handle_ecall` finished, you should save returned `error` in `a0`, and `value` in `a1`.
+#[repr(C)] // ensure that return value follows RISC-V SBI calling convention
+pub struct SbiRet {
+    /// Error number
+    pub error: usize,
+    /// Result value
+    pub value: usize,
+}
+
+const SBI_SUCCESS: usize = 0;
+const SBI_ERR_FAILED: usize = usize::from_ne_bytes(isize::to_ne_bytes(-1));
+const SBI_ERR_NOT_SUPPORTED: usize = usize::from_ne_bytes(isize::to_ne_bytes(-2));
+const SBI_ERR_INVALID_PARAM: usize = usize::from_ne_bytes(isize::to_ne_bytes(-3));
+// const SBI_ERR_DENIED: usize = usize::from_ne_bytes(isize::to_ne_bytes(-4));
+const SBI_ERR_INVALID_ADDRESS: usize = usize::from_ne_bytes(isize::to_ne_bytes(-5));
+const SBI_ERR_ALREADY_AVAILABLE: usize = usize::from_ne_bytes(isize::to_ne_bytes(-6));
+const SBI_ERR_ALREADY_STARTED: usize = usize::from_ne_bytes(isize::to_ne_bytes(-7));
+const SBI_ERR_ALREADY_STOPPED: usize = usize::from_ne_bytes(isize::to_ne_bytes(-8));
+
+impl SbiRet {
+    /// Return success SBI state with given value.
+    #[inline]
+    pub fn ok(value: usize) -> SbiRet {
+        SbiRet {
+            error: SBI_SUCCESS,
+            value,
+        }
+    }
+    /// The SBI call request failed for unknown reasons.
+    #[inline]
+    pub fn failed() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_FAILED,
+            value: 0,
+        }
+    }
+    /// SBI call failed due to not supported by target ISA, operation type not supported,
+    /// or target operation type not implemented on purpose.
+    #[inline]
+    pub fn not_supported() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_NOT_SUPPORTED,
+            value: 0,
+        }
+    }
+    /// SBI call failed due to invalid hart mask parameter, invalid target hart id, invalid operation type
+    /// or invalid resource index.
+    #[inline]
+    pub fn invalid_param() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_INVALID_PARAM,
+            value: 0,
+        }
+    }
+    /// SBI call failed for invalid mask start address, not a valid physical address parameter,
+    /// or the target address is prohibited by PMP to run in supervisor mode.
+    #[inline]
+    pub fn invalid_address() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_INVALID_ADDRESS,
+            value: 0,
+        }
+    }
+    /// SBI call failed for the target resource is already available, e.g. the target hart is already
+    /// started when caller still request it to start.
+    #[inline]
+    pub fn already_available() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_ALREADY_AVAILABLE,
+            value: 0,
+        }
+    }
+    /// SBI call failed for the target resource is already started, e.g. target performance counter is started.
+    #[inline]
+    pub fn already_started() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_ALREADY_STARTED,
+            value: 0,
+        }
+    }
+    /// SBI call failed for the target resource is already stopped, e.g. target performance counter is stopped.
+    #[inline]
+    pub fn already_stopped() -> SbiRet {
+        SbiRet {
+            error: SBI_ERR_ALREADY_STOPPED,
+            value: 0,
+        }
+    }
+    #[cfg(feature = "legacy")]
+    #[inline]
+    pub(crate) fn legacy_ok(legacy_value: usize) -> SbiRet {
+        SbiRet {
+            error: legacy_value,
+            value: 0,
+        }
+    }
+    // only used for legacy where a0, a1 return value is not modified
+    #[cfg(feature = "legacy")]
+    #[inline]
+    pub(crate) fn legacy_void(self, a0: usize, a1: usize) -> SbiRet {
+        SbiRet {
+            error: a0,
+            value: a1,
+        }
+    }
+    #[cfg(feature = "legacy")]
+    #[inline]
+    pub(crate) fn legacy_return(self, a1: usize) -> SbiRet {
+        SbiRet {
+            error: self.error,
+            value: a1,
+        }
+    }
+}

+ 6 - 3
src/hart_mask.rs

@@ -41,6 +41,7 @@ impl HartMask {
                 }
                 hart_mask & (1 << idx) != 0
             }
+            #[cfg(feature = "legacy")]
             MaskInner::Legacy { legacy_bit_vector } => {
                 slow_legacy_has_bit(legacy_bit_vector, hart_id)
             }
@@ -51,6 +52,7 @@ impl HartMask {
     /// from S level, it would result in machine level load access or load misaligned exception.*
     ///
     /// Construct a hart mask from legacy bit vector and number of harts in current platform.
+    #[cfg(feature = "legacy")]
     #[inline]
     pub(crate) unsafe fn legacy_from_addr(vaddr: usize) -> HartMask {
         HartMask {
@@ -67,12 +69,12 @@ enum MaskInner {
         hart_mask: usize,
         hart_mask_base: usize,
     },
-    Legacy {
-        legacy_bit_vector: *const usize,
-    },
+    #[cfg(feature = "legacy")]
+    Legacy { legacy_bit_vector: *const usize },
 }
 
 // not #[inline] to speed up new version bit vector
+#[cfg(feature = "legacy")]
 fn slow_legacy_has_bit(legacy_bit_vector: *const usize, hart_id: usize) -> bool {
     fn split_index_usize(index: usize) -> (usize, usize) {
         let bits_in_usize = usize::BITS as usize;
@@ -83,6 +85,7 @@ fn slow_legacy_has_bit(legacy_bit_vector: *const usize, hart_id: usize) -> bool
     cur_vector & (1 << j) != 0
 }
 
+#[cfg(feature = "legacy")]
 #[inline]
 unsafe fn get_vaddr_usize(vaddr_ptr: *const usize) -> usize {
     match () {

+ 1 - 0
src/lib.rs

@@ -160,6 +160,7 @@
 #![feature(ptr_metadata)]
 #![deny(warnings)] // cancel this line for developing
 
+#[cfg(feature = "legacy")]
 #[doc(hidden)]
 #[macro_use]
 pub mod legacy_stdio;

+ 1 - 0
src/reset.rs

@@ -73,6 +73,7 @@ pub(crate) fn system_reset(reset_type: u32, reset_reason: u32) -> SbiRet {
     SbiRet::not_supported()
 }
 
+#[cfg(feature = "legacy")]
 #[inline]
 pub(crate) fn legacy_reset() -> ! {
     if let Some(obj) = RESET.get() {

+ 97 - 0
src/util.rs

@@ -2,6 +2,7 @@
 
 use core::{arch::asm, cell::UnsafeCell, marker::PhantomData, mem::MaybeUninit, ptr::Pointee};
 
+<<<<<<< HEAD
 /// 只使用 AMO 指令的一次初始化引用存储。
 pub struct AmoOnceRef<'a, T: ?Sized> {
     /// As atomic bool, to check if it is the first time to set `ptr`.
@@ -9,6 +10,26 @@ pub struct AmoOnceRef<'a, T: ?Sized> {
     ptr: UnsafeCell<*const ()>,
     meta: UnsafeCell<MaybeUninit<<T as Pointee>::Metadata>>,
     _lifetime: PhantomData<&'a ()>,
+=======
+use alloc::boxed::Box;
+#[cfg(feature = "legacy")]
+use core::ops::{Deref, DerefMut};
+use core::{
+    arch::asm,
+    cell::UnsafeCell,
+    fmt::{self, Debug},
+    marker::PhantomData,
+    mem::MaybeUninit,
+    ptr::{self, Pointee},
+};
+
+/// A thread-safe fat pointer cell which can be written to only once.
+pub struct OnceFatBox<T: ?Sized> {
+    thin_ptr: UnsafeCell<*mut ()>,
+    lock: UnsafeCell<u8>,
+    meta: MaybeUninit<<T as Pointee>::Metadata>,
+    _marker: PhantomData<Option<Box<T>>>,
+>>>>>>> a577447 (gate SBI legacy extension under `legacy` feature)
 }
 
 /// 如果 AmoOncePtr 保存的引用是静态的,自然可以随意移动。
@@ -61,10 +82,51 @@ impl<'a, T: ?Sized> AmoOnceRef<'a, T> {
                     dst = in(reg) self.ptr.get(),
                 );
             }
+<<<<<<< HEAD
             true
         } else {
             // 未取得锁,对象已被初始化过
             false
+=======
+            // critical section end
+            asm!(
+                "amoswap.w.rl x0, x0, ({lock})", // release lock by storing 0
+                lock = in(reg) self.lock.get(),
+            );
+            ans
+        };
+        if exchange.is_err() {
+            let value = unsafe { Box::from_raw(fat_ptr) };
+            return Err(value);
+        }
+        Ok(())
+    }
+}
+
+unsafe impl<T: Sync + Send + ?Sized> Sync for OnceFatBox<T> {}
+
+/// Use only amo instructions on mutex; no lr/sc instruction is used
+#[cfg(feature = "legacy")]
+pub struct AmoMutex<T: ?Sized> {
+    lock: UnsafeCell<u8>,
+    data: UnsafeCell<T>,
+}
+
+#[cfg(feature = "legacy")]
+pub struct AmoMutexGuard<'a, T: ?Sized> {
+    lock: *mut u8,
+    data: &'a mut T,
+}
+
+#[cfg(feature = "legacy")]
+impl<T> AmoMutex<T> {
+    /// Create a new AmoMutex
+    #[inline]
+    pub const fn new(data: T) -> Self {
+        AmoMutex {
+            data: UnsafeCell::new(data),
+            lock: UnsafeCell::new(0),
+>>>>>>> a577447 (gate SBI legacy extension under `legacy` feature)
         }
     }
 
@@ -116,13 +178,25 @@ impl<'a, T: ?Sized> AmoOnceRef<'a, T> {
         }
     }
 
+<<<<<<< HEAD
     /// 利用指针和元数据生成引用。需要保证传入的指针非空。如果能传入非空指针,meta 也一定存在。
+=======
+#[cfg(feature = "legacy")]
+unsafe impl<T: ?Sized + Send> Sync for AmoMutex<T> {}
+#[cfg(feature = "legacy")]
+unsafe impl<T: ?Sized + Send> Send for AmoMutex<T> {}
+
+#[cfg(feature = "legacy")]
+impl<'a, T: ?Sized> Deref for AmoMutexGuard<'a, T> {
+    type Target = T;
+>>>>>>> a577447 (gate SBI legacy extension under `legacy` feature)
     #[inline]
     unsafe fn build_ref_unchecked(&self, ptr: *const ()) -> &T {
         &*core::ptr::from_raw_parts(ptr, (*self.meta.get()).assume_init())
     }
 }
 
+<<<<<<< HEAD
 // /// Use only amo instructions on mutex; no lr/sc instruction is used
 // pub struct AmoMutex<T: ?Sized> {
 //     lock: UnsafeCell<u32>,
@@ -197,3 +271,26 @@ impl<'a, T: ?Sized> AmoOnceRef<'a, T> {
 //         }
 //     }
 // }
+=======
+#[cfg(feature = "legacy")]
+impl<'a, T: ?Sized> DerefMut for AmoMutexGuard<'a, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        self.data
+    }
+}
+
+#[cfg(feature = "legacy")]
+impl<'a, T: ?Sized> Drop for AmoMutexGuard<'a, T> {
+    /// The dropping of the mutex guard will release the lock it was created from.
+    #[inline]
+    fn drop(&mut self) {
+        unsafe {
+            asm!(
+                "amoswap.w.rl x0, x0, ({lock})", // release lock by storing 0
+                lock = in(reg) self.lock,
+            );
+        }
+    }
+}
+>>>>>>> a577447 (gate SBI legacy extension under `legacy` feature)