napi.rs 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. use crate::driver::net::Iface;
  2. use crate::init::initcall::INITCALL_SUBSYS;
  3. use crate::libs::spinlock::{SpinLock, SpinLockGuard};
  4. use crate::libs::wait_queue::WaitQueue;
  5. use crate::process::kthread::{KernelThreadClosure, KernelThreadMechanism};
  6. use crate::process::ProcessState;
  7. use alloc::boxed::Box;
  8. use alloc::string::ToString;
  9. use alloc::sync::{Arc, Weak};
  10. use alloc::vec::Vec;
  11. use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
  12. use system_error::SystemError;
  13. use unified_init::macros::unified_init;
  14. lazy_static! {
  15. //todo 按照软中断的做法,这里应该是每个CPU一个列表,但目前只实现单CPU版本
  16. static ref GLOBAL_NAPI_MANAGER: Arc<NapiManager> =
  17. NapiManager::new();
  18. }
  19. /// # NAPI 结构体
  20. ///
  21. /// https://elixir.bootlin.com/linux/v6.13/source/include/linux/netdevice.h#L359
  22. #[derive(Debug)]
  23. pub struct NapiStruct {
  24. /// NAPI实例状态
  25. pub state: AtomicU32,
  26. /// NAPI实例权重,表示每次轮询时处理的最大包数
  27. pub weight: usize,
  28. /// 唯一id
  29. pub napi_id: usize,
  30. /// 指向所属网卡的弱引用
  31. pub net_device: Weak<dyn Iface>,
  32. }
  33. impl NapiStruct {
  34. pub fn new(net_device: Arc<dyn Iface>, weight: usize) -> Arc<Self> {
  35. Arc::new(Self {
  36. state: AtomicU32::new(NapiState::empty().bits()),
  37. weight,
  38. napi_id: net_device.nic_id(),
  39. net_device: Arc::downgrade(&net_device),
  40. })
  41. }
  42. pub fn poll(&self) -> bool {
  43. // log::info!("NAPI instance {} polling", self.napi_id);
  44. // 获取网卡的强引用
  45. if let Some(iface) = self.net_device.upgrade() {
  46. // 这里的weight原意是此次执行可以处理的包,如果超过了这个数就交给专门的内核线程(ksoftirqd)继续处理
  47. // 但目前我们就是在相当于ksoftirqd里面处理,如果在weight之内发现没数据包被处理了,在直接返回
  48. // 如果超过weight,返回true,表示还有工作没做完,会在下一次轮询继续处理
  49. // 因此语义是相同的
  50. for _ in 0..self.weight {
  51. if !iface.poll() {
  52. return false;
  53. }
  54. }
  55. } else {
  56. log::error!(
  57. "NAPI instance {}: associated net device is gone",
  58. self.napi_id
  59. );
  60. }
  61. true
  62. }
  63. }
  64. bitflags! {
  65. /// # NAPI状态标志
  66. ///
  67. /// https://elixir.bootlin.com/linux/v6.13/source/include/linux/netdevice.h#L398
  68. pub struct NapiState:u32{
  69. /// Poll is scheduled. 这是最核心的状态,表示NAPI实例已被调度,
  70. /// 存在于某个CPU的poll_list中等待处理。
  71. const SCHED = 1 << 0;
  72. /// Missed a poll. 如果在NAPI实例被调度后但在实际处理前又有新的数据到达,
  73. const MISSED = 1 << 1;
  74. /// Disable pending. NAPI正在被禁用,不应再被调度。
  75. const DISABLE = 1 << 2;
  76. const NPSVC = 1 << 3;
  77. /// NAPI added to system lists. 表示NAPI实例已注册到设备中。
  78. const LISTED = 1 << 4;
  79. const NO_BUSY_POLL = 1 << 5;
  80. const IN_BUSY_POLL = 1 << 6;
  81. const PREFER_BUSY_POLL = 1 << 7;
  82. /// The poll is performed inside its own thread.
  83. /// 一个可选的高级功能,表示此NAPI由专用内核线程处理。
  84. const THREADED = 1 << 8;
  85. const SCHED_THREADED = 1 << 9;
  86. }
  87. }
  88. #[inline(never)]
  89. #[unified_init(INITCALL_SUBSYS)]
  90. pub fn napi_init() -> Result<(), SystemError> {
  91. // 软中断做法
  92. // let napi_handler = Arc::new(NapiSoftirq::default());
  93. // softirq_vectors()
  94. // .register_softirq(SoftirqNumber::NetReceive, napi_handler)
  95. // .expect("Failed to register napi softirq");
  96. // 软中断的方式无法唤醒 :(
  97. // 使用一个专门的内核线程来处理NAPI轮询,模拟软中断的行为,相当于ksoftirq :)
  98. let closure: Box<dyn Fn() -> i32 + Send + Sync + 'static> = Box::new(move || {
  99. net_rx_action();
  100. 0
  101. });
  102. let closure = KernelThreadClosure::EmptyClosure((closure, ()));
  103. let name = "napi_handler".to_string();
  104. let _pcb = KernelThreadMechanism::create_and_run(closure, name)
  105. .ok_or("")
  106. .expect("create napi_handler thread failed");
  107. log::info!("napi initialized successfully");
  108. Ok(())
  109. }
  110. fn net_rx_action() {
  111. use crate::sched::SchedMode;
  112. loop {
  113. // 这里直接将全局的NAPI管理器的napi_list取出,清空全局的列表,避免占用锁时间过长
  114. let mut inner = GLOBAL_NAPI_MANAGER.inner();
  115. let mut poll_list = inner.napi_list.clone();
  116. inner.napi_list.clear();
  117. drop(inner);
  118. // log::info!("NAPI softirq processing {} instances", poll_list.len());
  119. // 如果此时长度为0,则让当前进程休眠,等待被唤醒
  120. if poll_list.is_empty() {
  121. GLOBAL_NAPI_MANAGER
  122. .inner()
  123. .has_pending_signal
  124. .store(false, Ordering::SeqCst);
  125. }
  126. while let Some(napi) = poll_list.pop() {
  127. let has_work_left = napi.poll();
  128. // log::info!("yes");
  129. if has_work_left {
  130. poll_list.push(napi);
  131. } else {
  132. napi_complete(napi);
  133. }
  134. }
  135. // log::info!("napi softirq iteration complete")
  136. // 在这种情况下,poll_list 中仍然有待处理的 NAPI 实例,压回队列,等待下一次唤醒时处理
  137. if !poll_list.is_empty() {
  138. GLOBAL_NAPI_MANAGER.inner().napi_list.extend(poll_list);
  139. }
  140. let _ = wq_wait_event_interruptible!(
  141. GLOBAL_NAPI_MANAGER.wait_queue(),
  142. GLOBAL_NAPI_MANAGER
  143. .inner()
  144. .has_pending_signal
  145. .load(Ordering::SeqCst),
  146. {}
  147. );
  148. }
  149. }
  150. /// 标记这个napi任务已经完成
  151. pub fn napi_complete(napi: Arc<NapiStruct>) {
  152. napi.state
  153. .fetch_and(!NapiState::SCHED.bits(), Ordering::SeqCst);
  154. }
  155. /// 标记这个napi任务加入处理队列,已被调度
  156. pub fn napi_schedule(napi: Arc<NapiStruct>) {
  157. let current_state = NapiState::from_bits_truncate(
  158. napi.state
  159. .fetch_or(NapiState::SCHED.bits(), Ordering::SeqCst),
  160. );
  161. if !current_state.contains(NapiState::SCHED) {
  162. let new_state = current_state.union(NapiState::SCHED);
  163. // log::info!("NAPI instance {} scheduled", napi.napi_id);
  164. napi.state.store(new_state.bits(), Ordering::SeqCst);
  165. }
  166. let mut inner = GLOBAL_NAPI_MANAGER.inner();
  167. inner.napi_list.push(napi);
  168. inner.has_pending_signal.store(true, Ordering::SeqCst);
  169. GLOBAL_NAPI_MANAGER.wakeup();
  170. // softirq_vectors().raise_softirq(SoftirqNumber::NetReceive);
  171. }
  172. pub struct NapiManager {
  173. inner: SpinLock<NapiManagerInner>,
  174. wait_queue: WaitQueue,
  175. }
  176. impl NapiManager {
  177. pub fn new() -> Arc<Self> {
  178. let inner = SpinLock::new(NapiManagerInner {
  179. has_pending_signal: AtomicBool::new(false),
  180. napi_list: Vec::new(),
  181. });
  182. Arc::new(Self {
  183. inner,
  184. wait_queue: WaitQueue::default(),
  185. })
  186. }
  187. pub fn inner(&self) -> SpinLockGuard<'_, NapiManagerInner> {
  188. self.inner.lock()
  189. }
  190. pub fn wait_queue(&self) -> &WaitQueue {
  191. &self.wait_queue
  192. }
  193. pub fn wakeup(&self) {
  194. self.wait_queue.wakeup(Some(ProcessState::Blocked(true)));
  195. }
  196. }
  197. pub struct NapiManagerInner {
  198. has_pending_signal: AtomicBool,
  199. napi_list: Vec<Arc<NapiStruct>>,
  200. }
  201. // 下面的是软中断的做法,无法唤醒,做个记录
  202. // #[derive(Debug)]
  203. // pub struct NapiSoftirq {
  204. // running: AtomicBool,
  205. // }
  206. // impl Default for NapiSoftirq {
  207. // fn default() -> Self {
  208. // Self {
  209. // running: AtomicBool::new(false),
  210. // }
  211. // }
  212. // }
  213. // impl SoftirqVec for NapiSoftirq {
  214. // fn run(&self) {
  215. // log::info!("NAPI softirq running");
  216. // if self
  217. // .running
  218. // .compare_exchange(
  219. // false,
  220. // true,
  221. // core::sync::atomic::Ordering::SeqCst,
  222. // core::sync::atomic::Ordering::SeqCst,
  223. // )
  224. // .is_ok()
  225. // {
  226. // net_rx_action();
  227. // self.running
  228. // .store(false, core::sync::atomic::Ordering::SeqCst);
  229. // } else {
  230. // log::warn!("NAPI softirq is already running");
  231. // }
  232. // }
  233. // }