hart_mask.rs 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /// Hart mask structure reference
  2. #[derive(Debug, Clone)]
  3. pub struct HartMask {
  4. inner: MaskInner,
  5. }
  6. impl HartMask {
  7. /// Construct a hart mask from mask value and base hart id.
  8. #[inline]
  9. pub fn from_mask_base(hart_mask: usize, hart_mask_base: usize) -> HartMask {
  10. HartMask {
  11. inner: MaskInner::BitVector {
  12. hart_mask,
  13. hart_mask_base,
  14. },
  15. }
  16. }
  17. /// Check if the `hart_id` is included in this hart mask structure.
  18. #[inline]
  19. pub fn has_bit(&self, hart_id: usize) -> bool {
  20. match self.inner {
  21. MaskInner::BitVector {
  22. hart_mask,
  23. hart_mask_base,
  24. } => {
  25. if hart_mask_base == usize::MAX {
  26. // If `hart_mask_base` equals `usize::MAX`, that means `hart_mask` is ignored
  27. // and all available harts must be considered.
  28. return true;
  29. }
  30. let idx = if let Some(idx) = hart_id.checked_sub(hart_mask_base) {
  31. idx
  32. } else {
  33. // hart_id < hart_mask_base, not in current mask range
  34. return false;
  35. };
  36. if idx >= usize::BITS as usize {
  37. // hart_idx >= hart_mask_base + XLEN, not in current mask range
  38. return false;
  39. }
  40. hart_mask & (1 << idx) != 0
  41. }
  42. MaskInner::Legacy { legacy_bit_vector } => {
  43. slow_legacy_has_bit(legacy_bit_vector, hart_id)
  44. }
  45. }
  46. }
  47. /// *This is a legacy function; it should not be used in newer designs. If `vaddr` is invalid
  48. /// from S level, it would result in machine level load access or load misaligned exception.*
  49. ///
  50. /// Construct a hart mask from legacy bit vector and number of harts in current platform.
  51. #[inline]
  52. pub(crate) unsafe fn legacy_from_addr(vaddr: usize) -> HartMask {
  53. HartMask {
  54. inner: MaskInner::Legacy {
  55. legacy_bit_vector: vaddr as *const _,
  56. },
  57. }
  58. }
  59. }
  60. #[derive(Debug, Clone)]
  61. enum MaskInner {
  62. BitVector {
  63. hart_mask: usize,
  64. hart_mask_base: usize,
  65. },
  66. Legacy {
  67. legacy_bit_vector: *const usize,
  68. },
  69. }
  70. // not #[inline] to speed up new version bit vector
  71. fn slow_legacy_has_bit(legacy_bit_vector: *const usize, hart_id: usize) -> bool {
  72. fn split_index_usize(index: usize) -> (usize, usize) {
  73. let bits_in_usize = usize::BITS as usize;
  74. (index / bits_in_usize, index % bits_in_usize)
  75. }
  76. let (i, j) = split_index_usize(hart_id);
  77. let cur_vector = unsafe { get_vaddr_usize(legacy_bit_vector.add(i)) };
  78. cur_vector & (1 << j) != 0
  79. }
  80. #[inline]
  81. unsafe fn get_vaddr_usize(vaddr_ptr: *const usize) -> usize {
  82. match () {
  83. #[cfg(target_arch = "riscv32")]
  84. () => {
  85. let mut ans: usize;
  86. core::arch::asm!("
  87. li {tmp}, (1 << 17)
  88. csrrs {tmp}, mstatus, {tmp}
  89. lw {ans}, 0({vmem})
  90. csrw mstatus, {tmp}
  91. ", ans = lateout(reg) ans, vmem = in(reg) vaddr_ptr, tmp = out(reg) _);
  92. ans
  93. }
  94. #[cfg(target_arch = "riscv64")]
  95. () => {
  96. let mut ans: usize;
  97. core::arch::asm!("
  98. li {tmp}, (1 << 17)
  99. csrrs {tmp}, mstatus, {tmp}
  100. ld {ans}, 0({vmem})
  101. csrw mstatus, {tmp}
  102. ", ans = lateout(reg) ans, vmem = in(reg) vaddr_ptr, tmp = out(reg) _);
  103. ans
  104. }
  105. #[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
  106. () => {
  107. drop(vaddr_ptr);
  108. unimplemented!("not RISC-V instruction set architecture")
  109. }
  110. }
  111. }
  112. #[cfg(test)]
  113. mod tests {
  114. use super::HartMask;
  115. #[test]
  116. fn rustsbi_hart_mask() {
  117. let mask = HartMask::from_mask_base(0b1, 400);
  118. assert!(!mask.has_bit(0));
  119. assert!(mask.has_bit(400));
  120. assert!(!mask.has_bit(401));
  121. let mask = HartMask::from_mask_base(0b110, 500);
  122. assert!(!mask.has_bit(0));
  123. assert!(!mask.has_bit(500));
  124. assert!(mask.has_bit(501));
  125. assert!(mask.has_bit(502));
  126. assert!(!mask.has_bit(500 + (usize::BITS as usize)));
  127. let max_bit = 1 << (usize::BITS - 1);
  128. let mask = HartMask::from_mask_base(max_bit, 600);
  129. assert!(mask.has_bit(600 + (usize::BITS as usize) - 1));
  130. assert!(!mask.has_bit(600 + (usize::BITS as usize)));
  131. let mask = HartMask::from_mask_base(0b11, usize::MAX - 1);
  132. assert!(!mask.has_bit(usize::MAX - 2));
  133. assert!(mask.has_bit(usize::MAX - 1));
  134. assert!(mask.has_bit(usize::MAX));
  135. assert!(!mask.has_bit(0));
  136. // hart_mask_base == usize::MAX is special, it means hart_mask should be ignored
  137. // and this hart mask contains all harts available
  138. let mask = HartMask::from_mask_base(0, usize::MAX);
  139. for i in 0..5 {
  140. assert!(mask.has_bit(i));
  141. }
  142. assert!(mask.has_bit(usize::MAX));
  143. }
  144. }