rfence.rs 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. use rustsbi::{HartMask, SbiRet};
  2. use spin::Mutex;
  3. use crate::board::SBI_IMPL;
  4. use crate::riscv_spec::current_hartid;
  5. use crate::sbi::fifo::{Fifo, FifoError};
  6. use crate::sbi::trap;
  7. use crate::sbi::trap_stack::ROOT_STACK;
  8. use core::sync::atomic::{AtomicU32, Ordering};
  9. /// Cell for managing remote fence operations between harts.
  10. pub(crate) struct RFenceCell {
  11. // Queue of fence operations with source hart ID
  12. queue: Mutex<Fifo<(RFenceContext, usize)>>,
  13. // Counter for tracking pending synchronization operations
  14. wait_sync_count: AtomicU32,
  15. }
  16. /// Context information for a remote fence operation.
  17. #[repr(C)]
  18. #[derive(Clone, Copy, Debug)]
  19. pub struct RFenceContext {
  20. /// Start address of memory region to fence.
  21. pub start_addr: usize,
  22. /// Size of memory region to fence.
  23. pub size: usize,
  24. /// Address space ID.
  25. pub asid: usize,
  26. /// Virtual machine ID.
  27. pub vmid: usize,
  28. /// Type of fence operation.
  29. pub op: RFenceType,
  30. }
  31. /// Types of remote fence operations supported.
  32. #[allow(unused)]
  33. #[derive(Clone, Copy, Debug)]
  34. pub enum RFenceType {
  35. /// Instruction fence.
  36. FenceI,
  37. /// Supervisor fence for virtual memory.
  38. SFenceVma,
  39. /// Supervisor fence for virtual memory with ASID.
  40. SFenceVmaAsid,
  41. /// Hypervisor fence for guest virtual memory with VMID.
  42. HFenceGvmaVmid,
  43. /// Hypervisor fence for guest virtual memory.
  44. HFenceGvma,
  45. /// Hypervisor fence for virtual machine virtual memory with ASID.
  46. HFenceVvmaAsid,
  47. /// Hypervisor fence for virtual machine virtual memory.
  48. HFenceVvma,
  49. }
  50. impl RFenceCell {
  51. /// Creates a new RFenceCell with empty queue and zero sync count.
  52. pub fn new() -> Self {
  53. Self {
  54. queue: Mutex::new(Fifo::new()),
  55. wait_sync_count: AtomicU32::new(0),
  56. }
  57. }
  58. /// Gets a local view of this fence cell for the current hart.
  59. #[inline]
  60. pub fn local(&self) -> LocalRFenceCell<'_> {
  61. LocalRFenceCell(self)
  62. }
  63. /// Gets a remote view of this fence cell for accessing from other harts.
  64. #[inline]
  65. pub fn remote(&self) -> RemoteRFenceCell<'_> {
  66. RemoteRFenceCell(self)
  67. }
  68. }
  69. // Mark RFenceCell as safe to share between threads
  70. unsafe impl Sync for RFenceCell {}
  71. unsafe impl Send for RFenceCell {}
  72. /// View of RFenceCell for operations on the current hart.
  73. pub struct LocalRFenceCell<'a>(&'a RFenceCell);
  74. /// View of RFenceCell for operations from other harts.
  75. pub struct RemoteRFenceCell<'a>(&'a RFenceCell);
  76. /// Gets the local fence context for the current hart.
  77. pub(crate) fn local_rfence() -> Option<LocalRFenceCell<'static>> {
  78. unsafe {
  79. ROOT_STACK
  80. .get_mut(current_hartid())
  81. .map(|x| x.hart_context().rfence.local())
  82. }
  83. }
  84. /// Gets the remote fence context for a specific hart.
  85. pub(crate) fn remote_rfence(hart_id: usize) -> Option<RemoteRFenceCell<'static>> {
  86. unsafe {
  87. ROOT_STACK
  88. .get_mut(hart_id)
  89. .map(|x| x.hart_context().rfence.remote())
  90. }
  91. }
  92. #[allow(unused)]
  93. impl LocalRFenceCell<'_> {
  94. /// Checks if all synchronization operations are complete.
  95. pub fn is_sync(&self) -> bool {
  96. self.0.wait_sync_count.load(Ordering::Relaxed) == 0
  97. }
  98. /// Increments the synchronization counter.
  99. pub fn add(&self) {
  100. self.0.wait_sync_count.fetch_add(1, Ordering::Relaxed);
  101. }
  102. /// Checks if the operation queue is empty.
  103. pub fn is_empty(&self) -> bool {
  104. self.0.queue.lock().is_empty()
  105. }
  106. /// Gets the next fence operation from the queue.
  107. pub fn get(&self) -> Option<(RFenceContext, usize)> {
  108. self.0.queue.lock().pop().ok()
  109. }
  110. /// Adds a fence operation to the queue, retrying if full.
  111. pub fn set(&self, ctx: RFenceContext) {
  112. let hart_id = current_hartid();
  113. loop {
  114. let mut queue = self.0.queue.lock();
  115. match queue.push((ctx, hart_id)) {
  116. Ok(_) => break,
  117. Err(FifoError::Full) => {
  118. drop(queue);
  119. trap::rfence_single_handler();
  120. }
  121. Err(_) => panic!("Unable to push fence ops to fifo"),
  122. }
  123. }
  124. }
  125. }
  126. #[allow(unused)]
  127. impl RemoteRFenceCell<'_> {
  128. /// Adds a fence operation to the queue from a remote hart.
  129. pub fn set(&self, ctx: RFenceContext) {
  130. let hart_id = current_hartid();
  131. loop {
  132. let mut queue = self.0.queue.lock();
  133. match queue.push((ctx, hart_id)) {
  134. Ok(_) => return,
  135. Err(FifoError::Full) => {
  136. drop(queue);
  137. trap::rfence_single_handler();
  138. }
  139. Err(_) => panic!("Unable to push fence ops to fifo"),
  140. }
  141. }
  142. }
  143. /// Decrements the synchronization counter.
  144. pub fn sub(&self) {
  145. self.0.wait_sync_count.fetch_sub(1, Ordering::Relaxed);
  146. }
  147. }
  148. /// Implementation of RISC-V remote fence operations.
  149. pub(crate) struct SbiRFence;
  150. /// Validates address range for fence operations
  151. #[inline(always)]
  152. fn validate_address_range(start_addr: usize, size: usize) -> Result<usize, SbiRet> {
  153. // Check page alignment using bitwise AND instead of modulo
  154. if start_addr & 0xFFF != 0 {
  155. return Err(SbiRet::invalid_address());
  156. }
  157. // Avoid checked_add by checking for overflow directly
  158. if size > usize::MAX - start_addr {
  159. return Err(SbiRet::invalid_address());
  160. }
  161. Ok(size)
  162. }
  163. /// Processes a remote fence operation by sending IPI to target harts.
  164. fn remote_fence_process(rfence_ctx: RFenceContext, hart_mask: HartMask) -> SbiRet {
  165. let sbi_ret = unsafe { SBI_IMPL.assume_init_mut() }
  166. .ipi
  167. .as_ref()
  168. .unwrap()
  169. .send_ipi_by_fence(hart_mask, rfence_ctx);
  170. sbi_ret
  171. }
  172. impl rustsbi::Fence for SbiRFence {
  173. /// Remote instruction fence for specified harts.
  174. fn remote_fence_i(&self, hart_mask: HartMask) -> SbiRet {
  175. remote_fence_process(
  176. RFenceContext {
  177. start_addr: 0,
  178. size: 0,
  179. asid: 0,
  180. vmid: 0,
  181. op: RFenceType::FenceI,
  182. },
  183. hart_mask,
  184. )
  185. }
  186. /// Remote supervisor fence for virtual memory on specified harts.
  187. fn remote_sfence_vma(&self, hart_mask: HartMask, start_addr: usize, size: usize) -> SbiRet {
  188. let flush_size = match validate_address_range(start_addr, size) {
  189. Ok(size) => size,
  190. Err(e) => return e,
  191. };
  192. remote_fence_process(
  193. RFenceContext {
  194. start_addr,
  195. size: flush_size,
  196. asid: 0,
  197. vmid: 0,
  198. op: RFenceType::SFenceVma,
  199. },
  200. hart_mask,
  201. )
  202. }
  203. /// Remote supervisor fence for virtual memory with ASID on specified harts.
  204. fn remote_sfence_vma_asid(
  205. &self,
  206. hart_mask: HartMask,
  207. start_addr: usize,
  208. size: usize,
  209. asid: usize,
  210. ) -> SbiRet {
  211. let flush_size = match validate_address_range(start_addr, size) {
  212. Ok(size) => size,
  213. Err(e) => return e,
  214. };
  215. remote_fence_process(
  216. RFenceContext {
  217. start_addr,
  218. size: flush_size,
  219. asid,
  220. vmid: 0,
  221. op: RFenceType::SFenceVmaAsid,
  222. },
  223. hart_mask,
  224. )
  225. }
  226. }