hart_mask.rs 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /// Hart mask structure reference
  2. #[derive(Debug, Clone)]
  3. pub struct HartMask {
  4. inner: MaskInner,
  5. }
  6. impl HartMask {
  7. /// Construct a hart mask from mask value and base hart id.
  8. #[inline]
  9. pub fn from_mask_base(hart_mask: usize, hart_mask_base: usize) -> HartMask {
  10. HartMask {
  11. inner: MaskInner::BitVector {
  12. hart_mask,
  13. hart_mask_base,
  14. },
  15. }
  16. }
  17. /// Check if the `hart_id` is included in this hart mask structure.
  18. #[inline]
  19. pub fn has_bit(&self, hart_id: usize) -> bool {
  20. match self.inner {
  21. MaskInner::BitVector {
  22. hart_mask,
  23. hart_mask_base,
  24. } => {
  25. if hart_mask_base == usize::MAX {
  26. // If `hart_mask_base` equals `usize::MAX`, that means `hart_mask` is ignored
  27. // and all available harts must be considered.
  28. return true;
  29. }
  30. let idx = if let Some(idx) = hart_id.checked_sub(hart_mask_base) {
  31. idx
  32. } else {
  33. // hart_id < hart_mask_base, not in current mask range
  34. return false;
  35. };
  36. if idx >= usize::BITS as usize {
  37. // hart_idx >= hart_mask_base + XLEN, not in current mask range
  38. return false;
  39. }
  40. hart_mask & (1 << idx) != 0
  41. }
  42. #[cfg(feature = "legacy")]
  43. MaskInner::Legacy { legacy_bit_vector } => {
  44. slow_legacy_has_bit(legacy_bit_vector, hart_id)
  45. }
  46. }
  47. }
  48. /// *This is a legacy function; it should not be used in newer designs. If `vaddr` is invalid
  49. /// from S level, it would result in machine level load access or load misaligned exception.*
  50. ///
  51. /// Construct a hart mask from legacy bit vector and number of harts in current platform.
  52. #[cfg(feature = "legacy")]
  53. #[inline]
  54. pub(crate) unsafe fn legacy_from_addr(vaddr: usize) -> HartMask {
  55. HartMask {
  56. inner: MaskInner::Legacy {
  57. legacy_bit_vector: vaddr as *const _,
  58. },
  59. }
  60. }
  61. }
  62. #[derive(Debug, Clone)]
  63. enum MaskInner {
  64. BitVector {
  65. hart_mask: usize,
  66. hart_mask_base: usize,
  67. },
  68. #[cfg(feature = "legacy")]
  69. Legacy { legacy_bit_vector: *const usize },
  70. }
  71. // not #[inline] to speed up new version bit vector
  72. #[cfg(feature = "legacy")]
  73. fn slow_legacy_has_bit(legacy_bit_vector: *const usize, hart_id: usize) -> bool {
  74. fn split_index_usize(index: usize) -> (usize, usize) {
  75. let bits_in_usize = usize::BITS as usize;
  76. (index / bits_in_usize, index % bits_in_usize)
  77. }
  78. let (i, j) = split_index_usize(hart_id);
  79. let cur_vector = unsafe { get_vaddr_usize(legacy_bit_vector.add(i)) };
  80. cur_vector & (1 << j) != 0
  81. }
  82. #[cfg(feature = "legacy")]
  83. #[inline]
  84. unsafe fn get_vaddr_usize(vaddr_ptr: *const usize) -> usize {
  85. match () {
  86. #[cfg(target_arch = "riscv32")]
  87. () => {
  88. let mut ans: usize;
  89. core::arch::asm!("
  90. li {tmp}, (1 << 17)
  91. csrrs {tmp}, mstatus, {tmp}
  92. lw {ans}, 0({vmem})
  93. csrw mstatus, {tmp}
  94. ", ans = lateout(reg) ans, vmem = in(reg) vaddr_ptr, tmp = out(reg) _);
  95. ans
  96. }
  97. #[cfg(target_arch = "riscv64")]
  98. () => {
  99. let mut ans: usize;
  100. core::arch::asm!("
  101. li {tmp}, (1 << 17)
  102. csrrs {tmp}, mstatus, {tmp}
  103. ld {ans}, 0({vmem})
  104. csrw mstatus, {tmp}
  105. ", ans = lateout(reg) ans, vmem = in(reg) vaddr_ptr, tmp = out(reg) _);
  106. ans
  107. }
  108. #[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
  109. () => {
  110. drop(vaddr_ptr);
  111. unimplemented!("not RISC-V instruction set architecture")
  112. }
  113. }
  114. }
  115. #[cfg(test)]
  116. mod tests {
  117. use super::HartMask;
  118. #[test]
  119. fn rustsbi_hart_mask() {
  120. let mask = HartMask::from_mask_base(0b1, 400);
  121. assert!(!mask.has_bit(0));
  122. assert!(mask.has_bit(400));
  123. assert!(!mask.has_bit(401));
  124. let mask = HartMask::from_mask_base(0b110, 500);
  125. assert!(!mask.has_bit(0));
  126. assert!(!mask.has_bit(500));
  127. assert!(mask.has_bit(501));
  128. assert!(mask.has_bit(502));
  129. assert!(!mask.has_bit(500 + (usize::BITS as usize)));
  130. let max_bit = 1 << (usize::BITS - 1);
  131. let mask = HartMask::from_mask_base(max_bit, 600);
  132. assert!(mask.has_bit(600 + (usize::BITS as usize) - 1));
  133. assert!(!mask.has_bit(600 + (usize::BITS as usize)));
  134. let mask = HartMask::from_mask_base(0b11, usize::MAX - 1);
  135. assert!(!mask.has_bit(usize::MAX - 2));
  136. assert!(mask.has_bit(usize::MAX - 1));
  137. assert!(mask.has_bit(usize::MAX));
  138. assert!(!mask.has_bit(0));
  139. // hart_mask_base == usize::MAX is special, it means hart_mask should be ignored
  140. // and this hart mask contains all harts available
  141. let mask = HartMask::from_mask_base(0, usize::MAX);
  142. for i in 0..5 {
  143. assert!(mask.has_bit(i));
  144. }
  145. assert!(mask.has_bit(usize::MAX));
  146. }
  147. }