binary.rs 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459
  1. //! Chapter 3. Binary Encoding.
  2. use core::marker::PhantomData;
  3. /// SBI functions return type.
  4. ///
  5. /// > SBI functions must return a pair of values in a0 and a1,
  6. /// > with a0 returning an error code.
  7. /// > This is analogous to returning the C structure `SbiRet`.
  8. ///
  9. /// Note: if this structure is used in function return on conventional
  10. /// Rust code, it would not require pinning memory representation as
  11. /// extern C. The `repr(C)` is set in case that some users want to use
  12. /// this structure in FFI code.
  13. #[derive(Clone, Copy, PartialEq, Eq)]
  14. #[repr(C)]
  15. pub struct SbiRet {
  16. /// Error number.
  17. pub error: usize,
  18. /// Result value.
  19. pub value: usize,
  20. }
  21. /// SBI success state return value.
  22. pub const RET_SUCCESS: usize = 0;
  23. /// Error for SBI call failed for unknown reasons.
  24. pub const RET_ERR_FAILED: usize = -1isize as _;
  25. /// Error for target operation not supported.
  26. pub const RET_ERR_NOT_SUPPORTED: usize = -2isize as _;
  27. /// Error for invalid parameter.
  28. pub const RET_ERR_INVALID_PARAM: usize = -3isize as _;
  29. /// Error for denied.
  30. pub const RET_ERR_DENIED: usize = -4isize as _;
  31. /// Error for invalid address.
  32. pub const RET_ERR_INVALID_ADDRESS: usize = -5isize as _;
  33. /// Error for resource already available.
  34. pub const RET_ERR_ALREADY_AVAILABLE: usize = -6isize as _;
  35. /// Error for resource already started.
  36. pub const RET_ERR_ALREADY_STARTED: usize = -7isize as _;
  37. /// Error for resource already stopped.
  38. pub const RET_ERR_ALREADY_STOPPED: usize = -8isize as _;
  39. /// Error for shared memory not available.
  40. pub const RET_ERR_NO_SHMEM: usize = -9isize as _;
  41. impl core::fmt::Debug for SbiRet {
  42. fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
  43. match self.error {
  44. RET_SUCCESS => self.value.fmt(f),
  45. RET_ERR_FAILED => write!(f, "<SBI call failed>"),
  46. RET_ERR_NOT_SUPPORTED => write!(f, "<SBI feature not supported>"),
  47. RET_ERR_INVALID_PARAM => write!(f, "<SBI invalid parameter>"),
  48. RET_ERR_DENIED => write!(f, "<SBI denied>"),
  49. RET_ERR_INVALID_ADDRESS => write!(f, "<SBI invalid address>"),
  50. RET_ERR_ALREADY_AVAILABLE => write!(f, "<SBI already available>"),
  51. RET_ERR_ALREADY_STARTED => write!(f, "<SBI already started>"),
  52. RET_ERR_ALREADY_STOPPED => write!(f, "<SBI already stopped>"),
  53. RET_ERR_NO_SHMEM => write!(f, "<SBI shared memory not available>"),
  54. unknown => write!(f, "[SBI Unknown error: {unknown:#x}]"),
  55. }
  56. }
  57. }
  58. /// RISC-V SBI error in enumeration.
  59. #[derive(Debug, Clone, Copy, PartialEq, Eq)]
  60. pub enum Error {
  61. /// Error for SBI call failed for unknown reasons.
  62. Failed,
  63. /// Error for target operation not supported.
  64. NotSupported,
  65. /// Error for invalid parameter.
  66. InvalidParam,
  67. /// Error for denied.
  68. Denied,
  69. /// Error for invalid address.
  70. InvalidAddress,
  71. /// Error for resource already available.
  72. AlreadyAvailable,
  73. /// Error for resource already started.
  74. AlreadyStarted,
  75. /// Error for resource already stopped.
  76. AlreadyStopped,
  77. /// Error for shared memory not available.
  78. NoShmem,
  79. /// Custom error code.
  80. Custom(isize),
  81. }
  82. impl SbiRet {
  83. /// Returns success SBI state with given `value`.
  84. #[inline]
  85. pub const fn success(value: usize) -> Self {
  86. Self {
  87. error: RET_SUCCESS,
  88. value,
  89. }
  90. }
  91. /// The SBI call request failed for unknown reasons.
  92. #[inline]
  93. pub const fn failed() -> Self {
  94. Self {
  95. error: RET_ERR_FAILED,
  96. value: 0,
  97. }
  98. }
  99. /// SBI call failed due to not supported by target ISA,
  100. /// operation type not supported,
  101. /// or target operation type not implemented on purpose.
  102. #[inline]
  103. pub const fn not_supported() -> Self {
  104. Self {
  105. error: RET_ERR_NOT_SUPPORTED,
  106. value: 0,
  107. }
  108. }
  109. /// SBI call failed due to invalid hart mask parameter,
  110. /// invalid target hart id,
  111. /// invalid operation type,
  112. /// or invalid resource index.
  113. #[inline]
  114. pub const fn invalid_param() -> Self {
  115. Self {
  116. error: RET_ERR_INVALID_PARAM,
  117. value: 0,
  118. }
  119. }
  120. /// SBI call denied for unsatisfied entry criteria, or insufficient access
  121. /// permission to debug console or CPPC register.
  122. #[inline]
  123. pub const fn denied() -> Self {
  124. Self {
  125. error: RET_ERR_DENIED,
  126. value: 0,
  127. }
  128. }
  129. /// SBI call failed for invalid mask start address,
  130. /// not a valid physical address parameter,
  131. /// or the target address is prohibited by PMP to run in supervisor mode.
  132. #[inline]
  133. pub const fn invalid_address() -> Self {
  134. Self {
  135. error: RET_ERR_INVALID_ADDRESS,
  136. value: 0,
  137. }
  138. }
  139. /// SBI call failed for the target resource is already available,
  140. /// e.g., the target hart is already started when caller still requests it to start.
  141. #[inline]
  142. pub const fn already_available() -> Self {
  143. Self {
  144. error: RET_ERR_ALREADY_AVAILABLE,
  145. value: 0,
  146. }
  147. }
  148. /// SBI call failed for the target resource is already started,
  149. /// e.g., target performance counter is started.
  150. #[inline]
  151. pub const fn already_started() -> Self {
  152. Self {
  153. error: RET_ERR_ALREADY_STARTED,
  154. value: 0,
  155. }
  156. }
  157. /// SBI call failed for the target resource is already stopped,
  158. /// e.g., target performance counter is stopped.
  159. #[inline]
  160. pub const fn already_stopped() -> Self {
  161. Self {
  162. error: RET_ERR_ALREADY_STOPPED,
  163. value: 0,
  164. }
  165. }
  166. /// SBI call failed for shared memory is not available,
  167. /// e.g. nested acceleration shared memory is not available.
  168. #[inline]
  169. pub const fn no_shmem() -> Self {
  170. Self {
  171. error: RET_ERR_NO_SHMEM,
  172. value: 0,
  173. }
  174. }
  175. }
  176. impl SbiRet {
  177. /// Converts to a [`Result`] of value and error.
  178. #[inline]
  179. pub const fn into_result(self) -> Result<usize, Error> {
  180. match self.error {
  181. RET_SUCCESS => Ok(self.value),
  182. RET_ERR_FAILED => Err(Error::Failed),
  183. RET_ERR_NOT_SUPPORTED => Err(Error::NotSupported),
  184. RET_ERR_INVALID_PARAM => Err(Error::InvalidParam),
  185. RET_ERR_DENIED => Err(Error::Denied),
  186. RET_ERR_INVALID_ADDRESS => Err(Error::InvalidAddress),
  187. RET_ERR_ALREADY_AVAILABLE => Err(Error::AlreadyAvailable),
  188. RET_ERR_ALREADY_STARTED => Err(Error::AlreadyStarted),
  189. RET_ERR_ALREADY_STOPPED => Err(Error::AlreadyStopped),
  190. RET_ERR_NO_SHMEM => Err(Error::NoShmem),
  191. unknown => Err(Error::Custom(unknown as _)),
  192. }
  193. }
  194. /// Returns `true` if current SBI return succeeded.
  195. ///
  196. /// # Examples
  197. ///
  198. /// Basic usage:
  199. ///
  200. /// ```
  201. /// # use sbi_spec::binary::SbiRet;
  202. /// let x = SbiRet::success(0);
  203. /// assert_eq!(x.is_ok(), true);
  204. ///
  205. /// let x = SbiRet::failed();
  206. /// assert_eq!(x.is_ok(), false);
  207. /// ```
  208. #[must_use = "if you intended to assert that this is ok, consider `.unwrap()` instead"]
  209. #[inline]
  210. pub const fn is_ok(&self) -> bool {
  211. matches!(self.error, RET_SUCCESS)
  212. }
  213. /// Returns `true` if the SBI call succeeded and the value inside of it matches a predicate.
  214. ///
  215. /// # Examples
  216. ///
  217. /// Basic usage:
  218. ///
  219. /// ```
  220. /// # use sbi_spec::binary::SbiRet;
  221. /// let x = SbiRet::success(2);
  222. /// assert_eq!(x.is_ok_and(|x| x > 1), true);
  223. ///
  224. /// let x = SbiRet::success(0);
  225. /// assert_eq!(x.is_ok_and(|x| x > 1), false);
  226. ///
  227. /// let x = SbiRet::no_shmem();
  228. /// assert_eq!(x.is_ok_and(|x| x > 1), false);
  229. /// ```
  230. #[must_use]
  231. #[inline]
  232. pub fn is_ok_and(self, f: impl FnOnce(usize) -> bool) -> bool {
  233. self.into_result().is_ok_and(f)
  234. }
  235. /// Returns `true` if current SBI return is an error.
  236. ///
  237. /// # Examples
  238. ///
  239. /// Basic usage:
  240. ///
  241. /// ```
  242. /// # use sbi_spec::binary::SbiRet;
  243. /// let x = SbiRet::success(0);
  244. /// assert_eq!(x.is_err(), false);
  245. ///
  246. /// let x = SbiRet::not_supported();
  247. /// assert_eq!(x.is_err(), true);
  248. /// ```
  249. #[must_use = "if you intended to assert that this is err, consider `.unwrap_err()` instead"]
  250. #[inline]
  251. pub const fn is_err(&self) -> bool {
  252. !self.is_ok()
  253. }
  254. /// Returns `true` if the result is an error and the value inside of it matches a predicate.
  255. ///
  256. /// # Examples
  257. ///
  258. /// ```
  259. /// # use sbi_spec::binary::{SbiRet, Error};
  260. /// let x = SbiRet::denied();
  261. /// assert_eq!(x.is_err_and(|x| x == Error::Denied), true);
  262. ///
  263. /// let x = SbiRet::invalid_address();
  264. /// assert_eq!(x.is_err_and(|x| x == Error::Denied), false);
  265. ///
  266. /// let x = SbiRet::success(0);
  267. /// assert_eq!(x.is_err_and(|x| x == Error::Denied), false);
  268. /// ```
  269. #[must_use]
  270. #[inline]
  271. pub fn is_err_and(self, f: impl FnOnce(Error) -> bool) -> bool {
  272. self.into_result().is_err_and(f)
  273. }
  274. /// Converts from `SbiRet` to [`Option<usize>`].
  275. ///
  276. /// Converts `self` into an [`Option<usize>`], consuming `self`,
  277. /// and discarding the error, if any.
  278. ///
  279. /// # Examples
  280. ///
  281. /// Basic usage:
  282. ///
  283. /// ```
  284. /// # use sbi_spec::binary::SbiRet;
  285. /// let x = SbiRet::success(2);
  286. /// assert_eq!(x.ok(), Some(2));
  287. ///
  288. /// let x = SbiRet::invalid_param();
  289. /// assert_eq!(x.ok(), None);
  290. /// ```
  291. // fixme: should be pub const fn once this function in Result is stablized in constant
  292. #[inline]
  293. pub fn ok(self) -> Option<usize> {
  294. self.into_result().ok()
  295. }
  296. /// Converts from `SbiRet` to [`Option<Error>`].
  297. ///
  298. /// Converts `self` into an [`Option<Error>`], consuming `self`,
  299. /// and discarding the success value, if any.
  300. ///
  301. /// # Examples
  302. ///
  303. /// Basic usage:
  304. ///
  305. /// ```
  306. /// # use sbi_spec::binary::{SbiRet, Error};
  307. /// let x = SbiRet::success(2);
  308. /// assert_eq!(x.err(), None);
  309. ///
  310. /// let x = SbiRet::denied();
  311. /// assert_eq!(x.err(), Some(Error::Denied));
  312. /// ```
  313. // fixme: should be pub const fn once this function in Result is stablized in constant
  314. #[inline]
  315. pub fn err(self) -> Option<Error> {
  316. self.into_result().err()
  317. }
  318. /// Maps a `SbiRet` to `Result<U, Error>` by applying a function to a
  319. /// contained success value, leaving an error value untouched.
  320. ///
  321. /// This function can be used to compose the results of two functions.
  322. ///
  323. /// # Examples
  324. ///
  325. /// Gets detail of a PMU counter and judge if it is a firmware counter.
  326. ///
  327. /// ```
  328. /// # use sbi_spec::binary::SbiRet;
  329. /// # use core::mem::size_of;
  330. /// # mod sbi_rt {
  331. /// # use sbi_spec::binary::SbiRet;
  332. /// # const TYPE_MASK: usize = 1 << (core::mem::size_of::<usize>() - 1);
  333. /// # pub fn pmu_counter_get_info(_: usize) -> SbiRet { SbiRet::success(TYPE_MASK) }
  334. /// # }
  335. /// // We assume that counter index 42 is a firmware counter.
  336. /// let counter_idx = 42;
  337. /// // Masks PMU counter type by setting highest bit in `usize`.
  338. /// const TYPE_MASK: usize = 1 << (size_of::<usize>() - 1);
  339. /// // Highest bit of returned `counter_info` represents whether it's
  340. /// // a firmware counter or a hardware counter.
  341. /// let is_firmware_counter = sbi_rt::pmu_counter_get_info(counter_idx)
  342. /// .map(|counter_info| counter_info & TYPE_MASK != 0);
  343. /// // If that bit is set, it is a firmware counter.
  344. /// assert_eq!(is_firmware_counter, Ok(true));
  345. /// ```
  346. #[inline]
  347. pub fn map<U, F: FnOnce(usize) -> U>(self, op: F) -> Result<U, Error> {
  348. self.into_result().map(op)
  349. }
  350. /// Returns the provided default (if error),
  351. /// or applies a function to the contained value (if success).
  352. ///
  353. /// Arguments passed to `map_or` are eagerly evaluated;
  354. /// if you are passing the result of a function call,
  355. /// it is recommended to use [`map_or_else`],
  356. /// which is lazily evaluated.
  357. ///
  358. /// [`map_or_else`]: SbiRet::map_or_else
  359. ///
  360. /// # Examples
  361. ///
  362. /// ```
  363. /// # use sbi_spec::binary::SbiRet;
  364. /// let x = SbiRet::success(3);
  365. /// assert_eq!(x.map_or(42, |v| v & 0b1), 1);
  366. ///
  367. /// let x = SbiRet::invalid_address();
  368. /// assert_eq!(x.map_or(42, |v| v & 0b1), 42);
  369. /// ```
  370. #[inline]
  371. pub fn map_or<U, F: FnOnce(usize) -> U>(self, default: U, f: F) -> U {
  372. self.into_result().map_or(default, f)
  373. }
  374. /// Maps a `SbiRet` to `usize` value by applying fallback function `default` to
  375. /// a contained error, or function `f` to a contained success value.
  376. ///
  377. /// This function can be used to unpack a successful result
  378. /// while handling an error.
  379. ///
  380. /// # Examples
  381. ///
  382. /// Basic usage:
  383. ///
  384. /// ```
  385. /// # use sbi_spec::binary::SbiRet;
  386. /// let k = 21;
  387. ///
  388. /// let x = SbiRet::success(3);
  389. /// assert_eq!(x.map_or_else(|e| k * 2, |v| v & 0b1), 1);
  390. ///
  391. /// let x = SbiRet::already_available();
  392. /// assert_eq!(x.map_or_else(|e| k * 2, |v| v & 0b1), 42);
  393. /// ```
  394. #[inline]
  395. pub fn map_or_else<U, D: FnOnce(Error) -> U, F: FnOnce(usize) -> U>(
  396. self,
  397. default: D,
  398. f: F,
  399. ) -> U {
  400. self.into_result().map_or_else(default, f)
  401. }
  402. /// Maps a `SbiRet` to `Result<T, F>` by applying a function to a
  403. /// contained error as [`Error`] struct, leaving success value untouched.
  404. ///
  405. /// This function can be used to pass through a successful result while handling
  406. /// an error.
  407. ///
  408. /// # Examples
  409. ///
  410. /// Basic usage:
  411. ///
  412. /// ```
  413. /// # use sbi_spec::binary::{SbiRet, Error};
  414. /// fn stringify(x: Error) -> String {
  415. /// if x == Error::AlreadyStarted {
  416. /// "error: already started!".to_string()
  417. /// } else {
  418. /// "error: other error!".to_string()
  419. /// }
  420. /// }
  421. ///
  422. /// let x = SbiRet::success(2);
  423. /// assert_eq!(x.map_err(stringify), Ok(2));
  424. ///
  425. /// let x = SbiRet::already_started();
  426. /// assert_eq!(x.map_err(stringify), Err("error: already started!".to_string()));
  427. /// ```
  428. #[inline]
  429. pub fn map_err<F, O: FnOnce(Error) -> F>(self, op: O) -> Result<usize, F> {
  430. self.into_result().map_err(op)
  431. }
  432. /// Calls a function with a reference to the contained value if current SBI call succeeded.
  433. ///
  434. /// Returns the original result.
  435. ///
  436. /// # Examples
  437. ///
  438. /// ```
  439. /// # use sbi_spec::binary::SbiRet;
  440. /// // Assume that SBI debug console have read 512 bytes into a buffer.
  441. /// let ret = SbiRet::success(512);
  442. /// // Inspect the SBI DBCN call result.
  443. /// let idx = ret
  444. /// .inspect(|x| println!("bytes written: {x}"))
  445. /// .map(|x| x - 1)
  446. /// .expect("SBI DBCN call failed");
  447. /// assert_eq!(idx, 511);
  448. /// ```
  449. #[inline]
  450. pub fn inspect<F: FnOnce(&usize)>(self, f: F) -> Self {
  451. if let Ok(ref t) = self.into_result() {
  452. f(t);
  453. }
  454. self
  455. }
  456. /// Calls a function with a reference to the contained value if current SBI result is an error.
  457. ///
  458. /// Returns the original result.
  459. ///
  460. /// # Examples
  461. ///
  462. /// ```
  463. /// # use sbi_spec::binary::SbiRet;
  464. /// // Assume that SBI debug console write operation failed for invalid parameter.
  465. /// let ret = SbiRet::invalid_param();
  466. /// // Print the error if SBI DBCN call failed.
  467. /// let ret = ret.inspect_err(|e| eprintln!("failed to read from SBI console: {e:?}"));
  468. /// ```
  469. #[inline]
  470. pub fn inspect_err<F: FnOnce(&Error)>(self, f: F) -> Self {
  471. if let Err(ref e) = self.into_result() {
  472. f(e);
  473. }
  474. self
  475. }
  476. /// Returns the contained success value, consuming the `self` value.
  477. ///
  478. /// # Panics
  479. ///
  480. /// Panics if self is an SBI error with a panic message including the
  481. /// passed message, and the content of the SBI state.
  482. ///
  483. /// # Examples
  484. ///
  485. /// Basic usage:
  486. ///
  487. /// ```should_panic
  488. /// # use sbi_spec::binary::SbiRet;
  489. /// let x = SbiRet::already_stopped();
  490. /// x.expect("Testing expect"); // panics with `Testing expect`
  491. /// ```
  492. #[inline]
  493. pub fn expect(self, msg: &str) -> usize {
  494. self.into_result().expect(msg)
  495. }
  496. /// Returns the contained success value, consuming the `self` value.
  497. ///
  498. /// # Panics
  499. ///
  500. /// Panics if self is an SBI error, with a panic message provided by the
  501. /// SBI error converted into [`Error`] struct.
  502. ///
  503. /// # Examples
  504. ///
  505. /// Basic usage:
  506. ///
  507. /// ```
  508. /// # use sbi_spec::binary::SbiRet;
  509. /// let x = SbiRet::success(2);
  510. /// assert_eq!(x.unwrap(), 2);
  511. /// ```
  512. ///
  513. /// ```should_panic
  514. /// # use sbi_spec::binary::SbiRet;
  515. /// let x = SbiRet::failed();
  516. /// x.unwrap(); // panics
  517. /// ```
  518. #[inline]
  519. pub fn unwrap(self) -> usize {
  520. self.into_result().unwrap()
  521. }
  522. /// Returns the contained error as [`Error`] struct, consuming the `self` value.
  523. ///
  524. /// # Panics
  525. ///
  526. /// Panics if the self is SBI success value, with a panic message
  527. /// including the passed message, and the content of the success value.
  528. ///
  529. /// # Examples
  530. ///
  531. /// Basic usage:
  532. ///
  533. /// ```should_panic
  534. /// # use sbi_spec::binary::SbiRet;
  535. /// let x = SbiRet::success(10);
  536. /// x.expect_err("Testing expect_err"); // panics with `Testing expect_err`
  537. /// ```
  538. #[inline]
  539. pub fn expect_err(self, msg: &str) -> Error {
  540. self.into_result().expect_err(msg)
  541. }
  542. /// Returns the contained error as [`Error`] struct, consuming the `self` value.
  543. ///
  544. /// # Panics
  545. ///
  546. /// Panics if the self is SBI success value, with a custom panic message provided
  547. /// by the success value.
  548. ///
  549. /// # Examples
  550. ///
  551. /// ```should_panic
  552. /// # use sbi_spec::binary::SbiRet;
  553. /// let x = SbiRet::success(2);
  554. /// x.unwrap_err(); // panics with `2`
  555. /// ```
  556. ///
  557. /// ```
  558. /// # use sbi_spec::binary::{SbiRet, Error};
  559. /// let x = SbiRet::not_supported();
  560. /// assert_eq!(x.unwrap_err(), Error::NotSupported);
  561. /// ```
  562. #[inline]
  563. pub fn unwrap_err(self) -> Error {
  564. self.into_result().unwrap_err()
  565. }
  566. /// Returns `res` if self is success value, otherwise otherwise returns the contained error
  567. /// of `self` as [`Error`] struct.
  568. ///
  569. /// Arguments passed to `and` are eagerly evaluated; if you are passing the
  570. /// result of a function call, it is recommended to use [`and_then`], which is
  571. /// lazily evaluated.
  572. ///
  573. /// [`and_then`]: SbiRet::and_then
  574. ///
  575. /// # Examples
  576. ///
  577. /// Basic usage:
  578. ///
  579. /// ```
  580. /// # use sbi_spec::binary::{SbiRet, Error};
  581. /// let x = SbiRet::success(2);
  582. /// let y = SbiRet::invalid_param().into_result();
  583. /// assert_eq!(x.and(y), Err(Error::InvalidParam));
  584. ///
  585. /// let x = SbiRet::denied();
  586. /// let y = SbiRet::success(3).into_result();
  587. /// assert_eq!(x.and(y), Err(Error::Denied));
  588. ///
  589. /// let x = SbiRet::invalid_address();
  590. /// let y = SbiRet::already_available().into_result();
  591. /// assert_eq!(x.and(y), Err(Error::InvalidAddress));
  592. ///
  593. /// let x = SbiRet::success(4);
  594. /// let y = SbiRet::success(5).into_result();
  595. /// assert_eq!(x.and(y), Ok(5));
  596. /// ```
  597. // fixme: should be pub const fn once this function in Result is stablized in constant
  598. // fixme: should parameter be `res: SbiRet`?
  599. #[inline]
  600. pub fn and<U>(self, res: Result<U, Error>) -> Result<U, Error> {
  601. self.into_result().and(res)
  602. }
  603. /// Calls `op` if self is success value, otherwise returns the contained error
  604. /// as [`Error`] struct.
  605. ///
  606. /// This function can be used for control flow based on `SbiRet` values.
  607. ///
  608. /// # Examples
  609. ///
  610. /// ```
  611. /// # use sbi_spec::binary::{SbiRet, Error};
  612. /// fn sq_then_to_string(x: usize) -> Result<String, Error> {
  613. /// x.checked_mul(x).map(|sq| sq.to_string()).ok_or(Error::Failed)
  614. /// }
  615. ///
  616. /// assert_eq!(SbiRet::success(2).and_then(sq_then_to_string), Ok(4.to_string()));
  617. /// assert_eq!(SbiRet::success(1_000_000_000_000).and_then(sq_then_to_string), Err(Error::Failed));
  618. /// assert_eq!(SbiRet::invalid_param().and_then(sq_then_to_string), Err(Error::InvalidParam));
  619. /// ```
  620. #[inline]
  621. pub fn and_then<U, F: FnOnce(usize) -> Result<U, Error>>(self, op: F) -> Result<U, Error> {
  622. self.into_result().and_then(op)
  623. }
  624. /// Returns `res` if self is SBI error, otherwise returns the success value of `self`.
  625. ///
  626. /// Arguments passed to `or` are eagerly evaluated; if you are passing the
  627. /// result of a function call, it is recommended to use [`or_else`], which is
  628. /// lazily evaluated.
  629. ///
  630. /// [`or_else`]: Result::or_else
  631. ///
  632. /// # Examples
  633. ///
  634. /// Basic usage:
  635. ///
  636. /// ```
  637. /// # use sbi_spec::binary::{SbiRet, Error};
  638. /// let x = SbiRet::success(2);
  639. /// let y = SbiRet::invalid_param().into_result();
  640. /// assert_eq!(x.or(y), Ok(2));
  641. ///
  642. /// let x = SbiRet::denied();
  643. /// let y = SbiRet::success(3).into_result();
  644. /// assert_eq!(x.or(y), Ok(3));
  645. ///
  646. /// let x = SbiRet::invalid_address();
  647. /// let y = SbiRet::already_available().into_result();
  648. /// assert_eq!(x.or(y), Err(Error::AlreadyAvailable));
  649. ///
  650. /// let x = SbiRet::success(4);
  651. /// let y = SbiRet::success(100).into_result();
  652. /// assert_eq!(x.or(y), Ok(4));
  653. /// ```
  654. // fixme: should be pub const fn once this function in Result is stablized in constant
  655. // fixme: should parameter be `res: SbiRet`?
  656. #[inline]
  657. pub fn or<F>(self, res: Result<usize, F>) -> Result<usize, F> {
  658. self.into_result().or(res)
  659. }
  660. /// Calls `op` if self is SBI error, otherwise returns the success value of `self`.
  661. ///
  662. /// This function can be used for control flow based on result values.
  663. ///
  664. ///
  665. /// # Examples
  666. ///
  667. /// Basic usage:
  668. ///
  669. /// ```
  670. /// # use sbi_spec::binary::{SbiRet, Error};
  671. /// fn is_failed(x: Error) -> Result<usize, bool> { Err(x == Error::Failed) }
  672. ///
  673. /// assert_eq!(SbiRet::success(2).or_else(is_failed), Ok(2));
  674. /// assert_eq!(SbiRet::failed().or_else(is_failed), Err(true));
  675. /// ```
  676. #[inline]
  677. pub fn or_else<F, O: FnOnce(Error) -> Result<usize, F>>(self, op: O) -> Result<usize, F> {
  678. self.into_result().or_else(op)
  679. }
  680. /// Returns the contained success value or a provided default.
  681. ///
  682. /// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
  683. /// the result of a function call, it is recommended to use [`unwrap_or_else`],
  684. /// which is lazily evaluated.
  685. ///
  686. /// [`unwrap_or_else`]: SbiRet::unwrap_or_else
  687. ///
  688. /// # Examples
  689. ///
  690. /// Basic usage:
  691. ///
  692. /// ```
  693. /// # use sbi_spec::binary::SbiRet;
  694. /// let default = 2;
  695. /// let x = SbiRet::success(9);
  696. /// assert_eq!(x.unwrap_or(default), 9);
  697. ///
  698. /// let x = SbiRet::invalid_param();
  699. /// assert_eq!(x.unwrap_or(default), default);
  700. /// ```
  701. // fixme: should be pub const fn once this function in Result is stablized in constant
  702. #[inline]
  703. pub fn unwrap_or(self, default: usize) -> usize {
  704. self.into_result().unwrap_or(default)
  705. }
  706. /// Returns the contained success value or computes it from a closure.
  707. ///
  708. /// # Examples
  709. ///
  710. /// Basic usage:
  711. ///
  712. /// ```
  713. /// # use sbi_spec::binary::{SbiRet, Error};
  714. /// fn invalid_use_zero(x: Error) -> usize { if x == Error::InvalidParam { 0 } else { 3 } }
  715. ///
  716. /// assert_eq!(SbiRet::success(2).unwrap_or_else(invalid_use_zero), 2);
  717. /// assert_eq!(SbiRet::invalid_param().unwrap_or_else(invalid_use_zero), 0);
  718. /// ```
  719. #[inline]
  720. pub fn unwrap_or_else<F: FnOnce(Error) -> usize>(self, op: F) -> usize {
  721. self.into_result().unwrap_or_else(op)
  722. }
  723. }
  724. /// Check if the implementation can contains the provided `bit`.
  725. #[inline]
  726. pub(crate) const fn valid_bit(base: usize, bit: usize) -> bool {
  727. if bit < base {
  728. // invalid index, under minimum range.
  729. false
  730. } else if (bit - base) >= usize::BITS as usize {
  731. // invalid index, over max range.
  732. false
  733. } else {
  734. true
  735. }
  736. }
  737. /// Check if the implementation contains the provided `bit`.
  738. ///
  739. /// ## Parameters
  740. ///
  741. /// - `mask`: bitmask defining the range of bits.
  742. /// - `base`: the starting bit index. (default: `0`)
  743. /// - `ignore`: if `base` is equal to this value, ignore the `mask` parameter, and consider all `bit`s set.
  744. /// - `bit`: the bit index to check for membership in the `mask`.
  745. #[inline]
  746. pub(crate) const fn has_bit(mask: usize, base: usize, ignore: usize, bit: usize) -> bool {
  747. if base == ignore {
  748. // ignore the `mask`, consider all `bit`s as set.
  749. true
  750. } else if !valid_bit(base, bit) {
  751. false
  752. } else {
  753. // index is in range, check if it is set in the mask.
  754. mask & (1 << (bit - base)) != 0
  755. }
  756. }
  757. /// Hart mask structure in SBI function calls.
  758. #[repr(C)]
  759. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
  760. pub struct HartMask {
  761. hart_mask: usize,
  762. hart_mask_base: usize,
  763. }
  764. impl HartMask {
  765. /// Special value to ignore the `mask`, and consider all `bit`s as set.
  766. pub const IGNORE_MASK: usize = usize::MAX;
  767. /// Construct a [HartMask] from mask value and base hart id.
  768. #[inline]
  769. pub const fn from_mask_base(hart_mask: usize, hart_mask_base: usize) -> Self {
  770. Self {
  771. hart_mask,
  772. hart_mask_base,
  773. }
  774. }
  775. /// Construct a [HartMask] that selects all available harts on the current environment.
  776. ///
  777. /// According to the RISC-V SBI Specification, `hart_mask_base` can be set to `-1` (i.e. `usize::MAX`)
  778. /// to indicate that `hart_mask` shall be ignored and all available harts must be considered.
  779. /// In case of this function in the `sbi-spec` crate, we fill in `usize::MAX` in `hart_mask_base`
  780. /// parameter to match the RISC-V SBI standard, while choosing 0 as the ignored `hart_mask` value.
  781. #[inline]
  782. pub const fn all() -> Self {
  783. Self {
  784. hart_mask: 0,
  785. hart_mask_base: usize::MAX,
  786. }
  787. }
  788. /// Gets the special value for ignoring the `mask` parameter.
  789. #[inline]
  790. pub const fn ignore_mask(&self) -> usize {
  791. Self::IGNORE_MASK
  792. }
  793. /// Returns `mask` and `base` parameters from the [HartMask].
  794. #[inline]
  795. pub const fn into_inner(self) -> (usize, usize) {
  796. (self.hart_mask, self.hart_mask_base)
  797. }
  798. /// Returns whether the [HartMask] contains the provided `hart_id`.
  799. #[inline]
  800. pub const fn has_bit(self, hart_id: usize) -> bool {
  801. has_bit(
  802. self.hart_mask,
  803. self.hart_mask_base,
  804. Self::IGNORE_MASK,
  805. hart_id,
  806. )
  807. }
  808. /// Insert a hart id into this [HartMask].
  809. ///
  810. /// Returns error when `hart_id` is invalid.
  811. #[inline]
  812. pub const fn insert(&mut self, hart_id: usize) -> Result<(), MaskError> {
  813. if self.hart_mask_base == Self::IGNORE_MASK {
  814. Ok(())
  815. } else if valid_bit(self.hart_mask_base, hart_id) {
  816. self.hart_mask |= 1usize << (hart_id - self.hart_mask_base);
  817. Ok(())
  818. } else {
  819. Err(MaskError::InvalidBit)
  820. }
  821. }
  822. /// Remove a hart id from this [HartMask].
  823. ///
  824. /// Returns error when `hart_id` is invalid, or it has been ignored.
  825. #[inline]
  826. pub const fn remove(&mut self, hart_id: usize) -> Result<(), MaskError> {
  827. if self.hart_mask_base == Self::IGNORE_MASK {
  828. Err(MaskError::Ignored)
  829. } else if valid_bit(self.hart_mask_base, hart_id) {
  830. self.hart_mask &= !(1usize << (hart_id - self.hart_mask_base));
  831. Ok(())
  832. } else {
  833. Err(MaskError::InvalidBit)
  834. }
  835. }
  836. /// Returns [HartIds] of self.
  837. #[inline]
  838. pub const fn iter(&self) -> HartIds {
  839. HartIds {
  840. inner: match self.hart_mask_base {
  841. Self::IGNORE_MASK => UnvisitedMask::Range(0, usize::MAX),
  842. _ => UnvisitedMask::MaskBase(self.hart_mask, self.hart_mask_base),
  843. },
  844. }
  845. }
  846. }
  847. impl IntoIterator for HartMask {
  848. type Item = usize;
  849. type IntoIter = HartIds;
  850. #[inline]
  851. fn into_iter(self) -> Self::IntoIter {
  852. self.iter()
  853. }
  854. }
  855. /// Iterator structure for `HartMask`.
  856. ///
  857. /// It will iterate hart id from low to high.
  858. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
  859. pub struct HartIds {
  860. inner: UnvisitedMask,
  861. }
  862. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
  863. enum UnvisitedMask {
  864. MaskBase(usize, usize),
  865. Range(usize, usize),
  866. }
  867. impl Iterator for HartIds {
  868. type Item = usize;
  869. #[inline]
  870. fn next(&mut self) -> Option<Self::Item> {
  871. match &mut self.inner {
  872. UnvisitedMask::MaskBase(0, _base) => None,
  873. UnvisitedMask::MaskBase(unvisited_mask, base) => {
  874. let low_bit = unvisited_mask.trailing_zeros();
  875. let hart_id = usize::try_from(low_bit).unwrap() + *base;
  876. *unvisited_mask &= !(1usize << low_bit);
  877. Some(hart_id)
  878. }
  879. UnvisitedMask::Range(from, to) => {
  880. assert!(from <= to);
  881. if *from < *to {
  882. let ans = *from;
  883. *from += 1;
  884. Some(ans)
  885. } else {
  886. None
  887. }
  888. }
  889. }
  890. }
  891. #[inline]
  892. fn size_hint(&self) -> (usize, Option<usize>) {
  893. match self.inner {
  894. UnvisitedMask::MaskBase(unvisited_mask, _base) => {
  895. let exact_popcnt = usize::try_from(unvisited_mask.count_ones()).unwrap();
  896. (exact_popcnt, Some(exact_popcnt))
  897. }
  898. UnvisitedMask::Range(from, to) => {
  899. assert!(from <= to);
  900. let exact_num_harts = to - from;
  901. (exact_num_harts, Some(exact_num_harts))
  902. }
  903. }
  904. }
  905. }
  906. impl DoubleEndedIterator for HartIds {
  907. #[inline]
  908. fn next_back(&mut self) -> Option<Self::Item> {
  909. match &mut self.inner {
  910. UnvisitedMask::MaskBase(0, _base) => None,
  911. UnvisitedMask::MaskBase(unvisited_mask, base) => {
  912. let high_bit = unvisited_mask.leading_zeros();
  913. let hart_id = usize::try_from(usize::BITS - high_bit - 1).unwrap() + *base;
  914. *unvisited_mask &= !(1usize << (usize::BITS - high_bit - 1));
  915. Some(hart_id)
  916. }
  917. UnvisitedMask::Range(from, to) => {
  918. assert!(from <= to);
  919. if *from < *to {
  920. let ans = *to;
  921. *to -= 1;
  922. Some(ans)
  923. } else {
  924. None
  925. }
  926. }
  927. }
  928. }
  929. }
  930. impl ExactSizeIterator for HartIds {}
  931. impl core::iter::FusedIterator for HartIds {}
  932. /// Error of mask modification.
  933. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
  934. pub enum MaskError {
  935. /// This mask has been ignored.
  936. Ignored,
  937. /// Request bit is invalid.
  938. InvalidBit,
  939. }
  940. /// Counter index mask structure in SBI function calls for the `PMU` extension §11.
  941. #[repr(C)]
  942. #[derive(Debug, Copy, Clone, Eq, PartialEq)]
  943. pub struct CounterMask {
  944. counter_idx_mask: usize,
  945. counter_idx_base: usize,
  946. }
  947. impl CounterMask {
  948. /// Special value to ignore the `mask`, and consider all `bit`s as set.
  949. pub const IGNORE_MASK: usize = usize::MAX;
  950. /// Construct a [CounterMask] from mask value and base counter index.
  951. #[inline]
  952. pub const fn from_mask_base(counter_idx_mask: usize, counter_idx_base: usize) -> Self {
  953. Self {
  954. counter_idx_mask,
  955. counter_idx_base,
  956. }
  957. }
  958. /// Gets the special value for ignoring the `mask` parameter.
  959. #[inline]
  960. pub const fn ignore_mask(&self) -> usize {
  961. Self::IGNORE_MASK
  962. }
  963. /// Returns `mask` and `base` parameters from the [CounterMask].
  964. #[inline]
  965. pub const fn into_inner(self) -> (usize, usize) {
  966. (self.counter_idx_mask, self.counter_idx_base)
  967. }
  968. /// Returns whether the [CounterMask] contains the provided `counter`.
  969. #[inline]
  970. pub const fn has_bit(self, counter: usize) -> bool {
  971. has_bit(
  972. self.counter_idx_mask,
  973. self.counter_idx_base,
  974. Self::IGNORE_MASK,
  975. counter,
  976. )
  977. }
  978. }
  979. /// Physical slice wrapper with type annotation.
  980. ///
  981. /// This struct wraps slices in RISC-V physical memory by low and high part of the
  982. /// physical base address as well as its length. It is usually used by SBI extensions
  983. /// as parameter types to pass base address and length parameters on physical memory
  984. /// other than a virtual one.
  985. ///
  986. /// Generic parameter `P` represents a hint of how this physical slice would be used.
  987. /// For example, `Physical<&[u8]>` represents an immutable reference to physical byte slice,
  988. /// while `Physical<&mut [u8]>` represents a mutable one.
  989. ///
  990. /// An SBI implementation should load or store memory using both `phys_addr_lo` and
  991. /// `phys_addr_hi` combined as base address. A supervisor program (kernels etc.)
  992. /// should provide continuous physical memory, wrapping its reference using this structure
  993. /// before passing into SBI runtime.
  994. #[derive(Clone, Copy)]
  995. pub struct Physical<P> {
  996. num_bytes: usize,
  997. phys_addr_lo: usize,
  998. phys_addr_hi: usize,
  999. _marker: PhantomData<P>,
  1000. }
  1001. impl<P> Physical<P> {
  1002. /// Create a physical memory slice by length and physical address.
  1003. #[inline]
  1004. pub const fn new(num_bytes: usize, phys_addr_lo: usize, phys_addr_hi: usize) -> Self {
  1005. Self {
  1006. num_bytes,
  1007. phys_addr_lo,
  1008. phys_addr_hi,
  1009. _marker: core::marker::PhantomData,
  1010. }
  1011. }
  1012. /// Returns length of the physical memory slice.
  1013. #[inline]
  1014. pub const fn num_bytes(&self) -> usize {
  1015. self.num_bytes
  1016. }
  1017. /// Returns low-part base address of physical memory slice.
  1018. #[inline]
  1019. pub const fn phys_addr_lo(&self) -> usize {
  1020. self.phys_addr_lo
  1021. }
  1022. /// Returns high-part base address of physical memory slice.
  1023. #[inline]
  1024. pub const fn phys_addr_hi(&self) -> usize {
  1025. self.phys_addr_hi
  1026. }
  1027. }
  1028. /// Shared memory physical address raw pointer with type annotation.
  1029. ///
  1030. /// This is a structure wrapping a raw pointer to the value of the type `T` without
  1031. /// a pointer metadata. `SharedPtr`'s are _thin_; they won't include metadata
  1032. /// as RISC-V SBI does not provide an approach to pass them via SBI calls,
  1033. /// thus the length of type `T` should be decided independently of raw
  1034. /// pointer structure.
  1035. ///
  1036. /// `SharedPtr` can be used as a parameter to pass the shared memory physical pointer
  1037. /// with a given base address in RISC-V SBI calls. For example, a `SharedPtr<[u8; 64]>`
  1038. /// would represent a fixed-size 64 byte array on a RISC-V SBI function argument
  1039. /// type.
  1040. ///
  1041. /// This structure cannot be dereferenced directly with physical addresses,
  1042. /// because on RISC-V systems the physical address space could be larger than the
  1043. /// virtual ones. Hence, this structure describes the physical memory range by
  1044. /// two `usize` values: the upper `phys_addr_hi` and lower `phys_addr_lo`.
  1045. ///
  1046. /// RISC-V SBI extensions may declare special pointer values for shared memory
  1047. /// raw pointers. For example, SBI STA declares that steal-time information
  1048. /// should stop from reporting when the SBI call is invoked using all-ones
  1049. /// bitwise shared pointer, i.e. `phys_addr_hi` and `phys_addr_lo` both equals
  1050. /// `usize::MAX`. `SharedPtr` can be constructed using such special values
  1051. /// by providing them to the `SharedPtr::new` function.
  1052. ///
  1053. /// # Requirements
  1054. ///
  1055. /// If an SBI function needs to pass a shared memory physical address range to
  1056. /// the SBI implementation (or higher privilege mode), then this physical memory
  1057. /// address range MUST satisfy the following requirements:
  1058. ///
  1059. /// * The SBI implementation MUST check that the supervisor-mode software is
  1060. /// allowed to access the specified physical memory range with the access
  1061. /// type requested (read and/or write).
  1062. /// * The SBI implementation MUST access the specified physical memory range
  1063. /// using the PMA attributes.
  1064. /// * The data in the shared memory MUST follow little-endian byte ordering.
  1065. ///
  1066. /// *NOTE:* If the supervisor-mode software accesses the same physical memory
  1067. /// range using a memory type different from the PMA, then a loss of coherence
  1068. /// or unexpected memory ordering may occur. The invoking software should
  1069. /// follow the rules and sequences defined in the RISC-V Svpbmt specification
  1070. /// to prevent the loss of coherence and memory ordering.
  1071. ///
  1072. /// It is recommended that a memory physical address passed to an SBI function
  1073. /// should use at least two `usize` parameters to support platforms
  1074. /// which have memory physical addresses wider than `XLEN` bits.
  1075. // FIXME: should constrain with `T: Thin` once ptr_metadata feature is stabled;
  1076. // RISC-V SBI does not provide an approach to pass pointer metadata by SBI calls.
  1077. pub struct SharedPtr<T> {
  1078. phys_addr_lo: usize,
  1079. phys_addr_hi: usize,
  1080. _marker: PhantomData<*mut T>,
  1081. }
  1082. // FIXME: we should consider strict provenance rules for this pointer-like structure
  1083. // once feature strict_provenance is stabled.
  1084. impl<T> SharedPtr<T> {
  1085. /// Create a shared physical memory pointer by physical address.
  1086. #[inline]
  1087. pub const fn new(phys_addr_lo: usize, phys_addr_hi: usize) -> Self {
  1088. Self {
  1089. phys_addr_lo,
  1090. phys_addr_hi,
  1091. _marker: PhantomData,
  1092. }
  1093. }
  1094. /// Returns low-part physical address of the shared physical memory pointer.
  1095. #[inline]
  1096. pub const fn phys_addr_lo(self) -> usize {
  1097. self.phys_addr_lo
  1098. }
  1099. /// Returns high-part physical address of the shared physical memory pointer.
  1100. #[inline]
  1101. pub const fn phys_addr_hi(self) -> usize {
  1102. self.phys_addr_hi
  1103. }
  1104. }
  1105. impl<T> Clone for SharedPtr<T> {
  1106. #[inline(always)]
  1107. fn clone(&self) -> Self {
  1108. *self
  1109. }
  1110. }
  1111. impl<T> Copy for SharedPtr<T> {}
  1112. #[cfg(test)]
  1113. mod tests {
  1114. use super::*;
  1115. #[test]
  1116. fn rustsbi_hart_mask() {
  1117. let mask = HartMask::from_mask_base(0b1, 400);
  1118. assert!(!mask.has_bit(0));
  1119. assert!(mask.has_bit(400));
  1120. assert!(!mask.has_bit(401));
  1121. let mask = HartMask::from_mask_base(0b110, 500);
  1122. assert!(!mask.has_bit(0));
  1123. assert!(!mask.has_bit(500));
  1124. assert!(mask.has_bit(501));
  1125. assert!(mask.has_bit(502));
  1126. assert!(!mask.has_bit(500 + (usize::BITS as usize)));
  1127. let max_bit = 1 << (usize::BITS - 1);
  1128. let mask = HartMask::from_mask_base(max_bit, 600);
  1129. assert!(mask.has_bit(600 + (usize::BITS as usize) - 1));
  1130. assert!(!mask.has_bit(600 + (usize::BITS as usize)));
  1131. let mask = HartMask::from_mask_base(0b11, usize::MAX - 1);
  1132. assert!(!mask.has_bit(usize::MAX - 2));
  1133. assert!(mask.has_bit(usize::MAX - 1));
  1134. assert!(mask.has_bit(usize::MAX));
  1135. assert!(!mask.has_bit(0));
  1136. // hart_mask_base == usize::MAX is special, it means hart_mask should be ignored
  1137. // and this hart mask contains all harts available
  1138. let mask = HartMask::from_mask_base(0, usize::MAX);
  1139. for i in 0..5 {
  1140. assert!(mask.has_bit(i));
  1141. }
  1142. assert!(mask.has_bit(usize::MAX));
  1143. let mut mask = HartMask::from_mask_base(0, 1);
  1144. assert!(!mask.has_bit(1));
  1145. assert!(mask.insert(1).is_ok());
  1146. assert!(mask.has_bit(1));
  1147. assert!(mask.remove(1).is_ok());
  1148. assert!(!mask.has_bit(1));
  1149. }
  1150. #[test]
  1151. fn rustsbi_hart_ids_iterator() {
  1152. let mask = HartMask::from_mask_base(0b101011, 1);
  1153. // Test the `next` method of `HartIds` structure.
  1154. let mut hart_ids = mask.iter();
  1155. assert_eq!(hart_ids.next(), Some(1));
  1156. assert_eq!(hart_ids.next(), Some(2));
  1157. assert_eq!(hart_ids.next(), Some(4));
  1158. assert_eq!(hart_ids.next(), Some(6));
  1159. assert_eq!(hart_ids.next(), None);
  1160. // `HartIds` structures are fused, meaning they return `None` forever once iteration finished.
  1161. assert_eq!(hart_ids.next(), None);
  1162. // Test `for` loop on mask (`HartMask`) as `IntoIterator`.
  1163. let mut ans = [0; 4];
  1164. let mut idx = 0;
  1165. for hart_id in mask {
  1166. ans[idx] = hart_id;
  1167. idx += 1;
  1168. }
  1169. assert_eq!(ans, [1, 2, 4, 6]);
  1170. // Test `Iterator` methods on `HartIds`.
  1171. let mut hart_ids = mask.iter();
  1172. assert_eq!(hart_ids.size_hint(), (4, Some(4)));
  1173. let _ = hart_ids.next();
  1174. assert_eq!(hart_ids.size_hint(), (3, Some(3)));
  1175. let _ = hart_ids.next();
  1176. let _ = hart_ids.next();
  1177. assert_eq!(hart_ids.size_hint(), (1, Some(1)));
  1178. let _ = hart_ids.next();
  1179. assert_eq!(hart_ids.size_hint(), (0, Some(0)));
  1180. let _ = hart_ids.next();
  1181. assert_eq!(hart_ids.size_hint(), (0, Some(0)));
  1182. let mut hart_ids = mask.iter();
  1183. assert_eq!(hart_ids.count(), 4);
  1184. let _ = hart_ids.next();
  1185. assert_eq!(hart_ids.count(), 3);
  1186. let _ = hart_ids.next();
  1187. let _ = hart_ids.next();
  1188. let _ = hart_ids.next();
  1189. assert_eq!(hart_ids.count(), 0);
  1190. let _ = hart_ids.next();
  1191. assert_eq!(hart_ids.count(), 0);
  1192. let hart_ids = mask.iter();
  1193. assert_eq!(hart_ids.last(), Some(6));
  1194. let mut hart_ids = mask.iter();
  1195. assert_eq!(hart_ids.nth(2), Some(4));
  1196. let mut hart_ids = mask.iter();
  1197. assert_eq!(hart_ids.nth(0), Some(1));
  1198. let mut iter = mask.iter().step_by(2);
  1199. assert_eq!(iter.next(), Some(1));
  1200. assert_eq!(iter.next(), Some(4));
  1201. assert_eq!(iter.next(), None);
  1202. let mask_2 = HartMask::from_mask_base(0b1001101, 64);
  1203. let mut iter = mask.iter().chain(mask_2);
  1204. assert_eq!(iter.next(), Some(1));
  1205. assert_eq!(iter.next(), Some(2));
  1206. assert_eq!(iter.next(), Some(4));
  1207. assert_eq!(iter.next(), Some(6));
  1208. assert_eq!(iter.next(), Some(64));
  1209. assert_eq!(iter.next(), Some(66));
  1210. assert_eq!(iter.next(), Some(67));
  1211. assert_eq!(iter.next(), Some(70));
  1212. assert_eq!(iter.next(), None);
  1213. let mut iter = mask.iter().zip(mask_2);
  1214. assert_eq!(iter.next(), Some((1, 64)));
  1215. assert_eq!(iter.next(), Some((2, 66)));
  1216. assert_eq!(iter.next(), Some((4, 67)));
  1217. assert_eq!(iter.next(), Some((6, 70)));
  1218. assert_eq!(iter.next(), None);
  1219. fn to_plic_context_id(hart_id_machine: usize) -> usize {
  1220. hart_id_machine * 2
  1221. }
  1222. let mut iter = mask.iter().map(to_plic_context_id);
  1223. assert_eq!(iter.next(), Some(2));
  1224. assert_eq!(iter.next(), Some(4));
  1225. assert_eq!(iter.next(), Some(8));
  1226. assert_eq!(iter.next(), Some(12));
  1227. assert_eq!(iter.next(), None);
  1228. let mut channel_received = [0; 4];
  1229. let mut idx = 0;
  1230. let mut channel_send = |hart_id| {
  1231. channel_received[idx] = hart_id;
  1232. idx += 1;
  1233. };
  1234. mask.iter().for_each(|value| channel_send(value));
  1235. assert_eq!(channel_received, [1, 2, 4, 6]);
  1236. let is_in_cluster_1 = |hart_id: &usize| *hart_id >= 4 && *hart_id < 7;
  1237. let mut iter = mask.iter().filter(is_in_cluster_1);
  1238. assert_eq!(iter.next(), Some(4));
  1239. assert_eq!(iter.next(), Some(6));
  1240. assert_eq!(iter.next(), None);
  1241. let if_in_cluster_1_get_plic_context_id = |hart_id: usize| {
  1242. if hart_id >= 4 && hart_id < 7 {
  1243. Some(hart_id * 2)
  1244. } else {
  1245. None
  1246. }
  1247. };
  1248. let mut iter = mask.iter().filter_map(if_in_cluster_1_get_plic_context_id);
  1249. assert_eq!(iter.next(), Some(8));
  1250. assert_eq!(iter.next(), Some(12));
  1251. assert_eq!(iter.next(), None);
  1252. let mut iter = mask.iter().enumerate();
  1253. assert_eq!(iter.next(), Some((0, 1)));
  1254. assert_eq!(iter.next(), Some((1, 2)));
  1255. assert_eq!(iter.next(), Some((2, 4)));
  1256. assert_eq!(iter.next(), Some((3, 6)));
  1257. assert_eq!(iter.next(), None);
  1258. let mut ans = [(0, 0); 4];
  1259. let mut idx = 0;
  1260. for (i, hart_id) in mask.iter().enumerate() {
  1261. ans[idx] = (i, hart_id);
  1262. idx += 1;
  1263. }
  1264. assert_eq!(ans, [(0, 1), (1, 2), (2, 4), (3, 6)]);
  1265. let mut iter = mask.iter().peekable();
  1266. assert_eq!(iter.peek(), Some(&1));
  1267. assert_eq!(iter.next(), Some(1));
  1268. assert_eq!(iter.peek(), Some(&2));
  1269. assert_eq!(iter.next(), Some(2));
  1270. assert_eq!(iter.peek(), Some(&4));
  1271. assert_eq!(iter.next(), Some(4));
  1272. assert_eq!(iter.peek(), Some(&6));
  1273. assert_eq!(iter.next(), Some(6));
  1274. assert_eq!(iter.peek(), None);
  1275. assert_eq!(iter.next(), None);
  1276. // TODO: other iterator tests.
  1277. assert!(mask.iter().is_sorted());
  1278. assert!(mask.iter().is_sorted_by(|a, b| a <= b));
  1279. // Reverse iterator as `DoubleEndedIterator`.
  1280. let mut iter = mask.iter().rev();
  1281. assert_eq!(iter.next(), Some(6));
  1282. assert_eq!(iter.next(), Some(4));
  1283. assert_eq!(iter.next(), Some(2));
  1284. assert_eq!(iter.next(), Some(1));
  1285. assert_eq!(iter.next(), None);
  1286. // Special iterator values.
  1287. let nothing = HartMask::from_mask_base(0, 1000);
  1288. assert!(nothing.iter().eq([]));
  1289. let all_mask_bits_set = HartMask::from_mask_base(usize::MAX, 1000);
  1290. let range = 1000..(1000 + usize::BITS as usize);
  1291. assert!(all_mask_bits_set.iter().eq(range));
  1292. let all_harts = HartMask::all();
  1293. let mut iter = all_harts.iter();
  1294. assert_eq!(iter.size_hint(), (usize::MAX, Some(usize::MAX)));
  1295. // Don't use `Iterator::eq` here; it would literally run `Iterator::try_for_each` from 0 to usize::MAX
  1296. // which will cost us forever to run the test.
  1297. assert_eq!(iter.next(), Some(0));
  1298. assert_eq!(iter.size_hint(), (usize::MAX - 1, Some(usize::MAX - 1)));
  1299. assert_eq!(iter.next(), Some(1));
  1300. assert_eq!(iter.next(), Some(2));
  1301. let mut iter = iter.skip(500);
  1302. assert_eq!(iter.next(), Some(503));
  1303. assert_eq!(iter.size_hint(), (usize::MAX - 504, Some(usize::MAX - 504)));
  1304. assert_eq!(iter.next_back(), Some(usize::MAX));
  1305. assert_eq!(iter.next_back(), Some(usize::MAX - 1));
  1306. assert_eq!(iter.size_hint(), (usize::MAX - 506, Some(usize::MAX - 506)));
  1307. // A common usage of `HartMask::all`, we assume that this platform filters out hart 0..=3.
  1308. let environment_available_hart_ids = 4..128;
  1309. // `iter` contains 64..=usize::MAX.
  1310. let hart_mask_iter = all_harts.iter().skip(64);
  1311. let mut iter_peekable = hart_mask_iter.peekable();
  1312. let filtered_iter = environment_available_hart_ids.filter(|&x| {
  1313. while let Some(&y) = iter_peekable.peek() {
  1314. match y.cmp(&x) {
  1315. core::cmp::Ordering::Equal => return true,
  1316. core::cmp::Ordering::Greater => break,
  1317. core::cmp::Ordering::Less => iter_peekable.next(),
  1318. };
  1319. }
  1320. false
  1321. });
  1322. assert!(filtered_iter.eq(64..128));
  1323. }
  1324. #[test]
  1325. fn rustsbi_counter_index_mask() {
  1326. let mask = CounterMask::from_mask_base(0b1, 400);
  1327. assert!(!mask.has_bit(0));
  1328. assert!(mask.has_bit(400));
  1329. assert!(!mask.has_bit(401));
  1330. let mask = CounterMask::from_mask_base(0b110, 500);
  1331. assert!(!mask.has_bit(0));
  1332. assert!(!mask.has_bit(500));
  1333. assert!(mask.has_bit(501));
  1334. assert!(mask.has_bit(502));
  1335. assert!(!mask.has_bit(500 + (usize::BITS as usize)));
  1336. let max_bit = 1 << (usize::BITS - 1);
  1337. let mask = CounterMask::from_mask_base(max_bit, 600);
  1338. assert!(mask.has_bit(600 + (usize::BITS as usize) - 1));
  1339. assert!(!mask.has_bit(600 + (usize::BITS as usize)));
  1340. let mask = CounterMask::from_mask_base(0b11, usize::MAX - 1);
  1341. assert!(!mask.has_bit(usize::MAX - 2));
  1342. assert!(mask.has_bit(usize::MAX - 1));
  1343. assert!(mask.has_bit(usize::MAX));
  1344. assert!(!mask.has_bit(0));
  1345. let mask = CounterMask::from_mask_base(0, usize::MAX);
  1346. let null_mask = CounterMask::from_mask_base(0, 0);
  1347. (0..=usize::BITS as usize).for_each(|i| {
  1348. assert!(mask.has_bit(i));
  1349. assert!(!null_mask.has_bit(i));
  1350. });
  1351. assert!(mask.has_bit(usize::MAX));
  1352. }
  1353. }