core.rs 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. use core::{
  2. intrinsics::unlikely,
  3. sync::atomic::{compiler_fence, Ordering},
  4. };
  5. use alloc::{sync::Arc, vec::Vec};
  6. use crate::{
  7. kinfo,
  8. mm::percpu::PerCpu,
  9. process::{AtomicPid, Pid, ProcessControlBlock, ProcessFlags, ProcessManager, ProcessState},
  10. smp::{core::smp_get_processor_id, cpu::ProcessorId},
  11. };
  12. use super::rt::{sched_rt_init, SchedulerRT, __get_rt_scheduler};
  13. use super::{
  14. cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler},
  15. SchedPolicy,
  16. };
  17. lazy_static! {
  18. /// 记录每个cpu上正在执行的进程的pid
  19. pub static ref CPU_EXECUTING: CpuExecuting = CpuExecuting::new();
  20. }
  21. #[derive(Debug)]
  22. pub struct CpuExecuting {
  23. data: Vec<AtomicPid>,
  24. }
  25. impl CpuExecuting {
  26. pub fn new() -> Self {
  27. let mut data = Vec::new();
  28. for _ in 0..PerCpu::MAX_CPU_NUM {
  29. data.push(AtomicPid::new(Pid::new(0)));
  30. }
  31. Self { data }
  32. }
  33. #[inline(always)]
  34. pub fn set(&self, cpu_id: ProcessorId, pid: Pid) {
  35. self.data[cpu_id.data() as usize].store(pid, Ordering::SeqCst);
  36. }
  37. #[inline(always)]
  38. pub fn get(&self, cpu_id: ProcessorId) -> Pid {
  39. self.data[cpu_id.data() as usize].load(Ordering::SeqCst)
  40. }
  41. }
  42. // 获取某个cpu的负载情况,返回当前负载,cpu_id 是获取负载的cpu的id
  43. // TODO:将获取负载情况调整为最近一段时间运行进程的数量
  44. #[allow(dead_code)]
  45. pub fn get_cpu_loads(cpu_id: ProcessorId) -> u32 {
  46. let cfs_scheduler = __get_cfs_scheduler();
  47. let rt_scheduler = __get_rt_scheduler();
  48. let len_cfs = cfs_scheduler.get_cfs_queue_len(cpu_id);
  49. let len_rt = rt_scheduler.rt_queue_len(cpu_id);
  50. // let load_rt = rt_scheduler.get_load_list_len(cpu_id);
  51. // kdebug!("this cpu_id {} is load rt {}", cpu_id, load_rt);
  52. return (len_rt + len_cfs) as u32;
  53. }
  54. // 负载均衡
  55. pub fn loads_balance(pcb: Arc<ProcessControlBlock>) {
  56. // FIXME: 由于目前负载均衡是直接添加到目标CPU的队列中,导致会由于时序问题导致进程在两个CPU上都存在。
  57. // 在调度子系统重写/改进之前,暂时只设置进程在0号CPU上运行
  58. // 由于调度器问题,暂时不进行负载均衡,见issue: https://github.com/DragonOS-Community/DragonOS/issues/571
  59. let min_loads_cpu_id = ProcessorId::new(0);
  60. // 获取总的CPU数量
  61. // let cpu_num = unsafe { smp_get_total_cpu() };
  62. // 获取当前负载最小的CPU的id
  63. // let mut min_loads = get_cpu_loads(smp_get_processor_id());
  64. // for cpu_id in 0..cpu_num {
  65. // let cpu_id = ProcessorId::new(cpu_id);
  66. // let tmp_cpu_loads = get_cpu_loads(cpu_id);
  67. // if min_loads - tmp_cpu_loads > 0 {
  68. // min_loads_cpu_id = cpu_id;
  69. // min_loads = tmp_cpu_loads;
  70. // }
  71. // }
  72. let pcb_cpu = pcb.sched_info().on_cpu();
  73. // 将当前pcb迁移到负载最小的CPU
  74. // 如果当前pcb的PF_NEED_MIGRATE已经置位,则不进行迁移操作
  75. if pcb_cpu.is_none()
  76. || (min_loads_cpu_id != pcb_cpu.unwrap()
  77. && !pcb.flags().contains(ProcessFlags::NEED_MIGRATE))
  78. {
  79. pcb.flags().insert(ProcessFlags::NEED_MIGRATE);
  80. pcb.sched_info().set_migrate_to(Some(min_loads_cpu_id));
  81. // kdebug!("set migrating, pcb:{:?}", pcb);
  82. }
  83. }
  84. /// @brief 具体的调度器应当实现的trait
  85. pub trait Scheduler {
  86. /// @brief 使用该调度器发起调度的时候,要调用的函数
  87. fn sched(&mut self) -> Option<Arc<ProcessControlBlock>>;
  88. /// @brief 将pcb加入这个调度器的调度队列
  89. fn enqueue(&mut self, pcb: Arc<ProcessControlBlock>);
  90. }
  91. pub fn do_sched() -> Option<Arc<ProcessControlBlock>> {
  92. // 当前进程持有锁,不切换,避免死锁
  93. if ProcessManager::current_pcb().preempt_count() != 0 {
  94. let binding = ProcessManager::current_pcb();
  95. let guard = binding
  96. .sched_info()
  97. .inner_lock_try_upgradable_read_irqsave(5);
  98. if unlikely(guard.is_none()) {
  99. return None;
  100. }
  101. let mut guard = guard.unwrap();
  102. let state = guard.state();
  103. if state.is_blocked() {
  104. // try to upgrade
  105. for _ in 0..50 {
  106. match guard.try_upgrade() {
  107. Ok(mut writer) => {
  108. // 被mark_sleep但是还在临界区的进程将其设置为Runnable
  109. writer.set_state(ProcessState::Runnable);
  110. break;
  111. }
  112. Err(s) => {
  113. guard = s;
  114. }
  115. }
  116. }
  117. }
  118. return None;
  119. }
  120. compiler_fence(core::sync::atomic::Ordering::SeqCst);
  121. let cfs_scheduler: &mut SchedulerCFS = __get_cfs_scheduler();
  122. let rt_scheduler: &mut SchedulerRT = __get_rt_scheduler();
  123. compiler_fence(core::sync::atomic::Ordering::SeqCst);
  124. let next: Arc<ProcessControlBlock>;
  125. match rt_scheduler.pick_next_task_rt(smp_get_processor_id()) {
  126. Some(p) => {
  127. next = p;
  128. // 将pick的进程放回原处
  129. rt_scheduler.enqueue_front(next);
  130. return rt_scheduler.sched();
  131. }
  132. None => {
  133. return cfs_scheduler.sched();
  134. }
  135. }
  136. }
  137. /// @brief 将进程加入调度队列
  138. ///
  139. /// @param pcb 要被加入队列的pcb
  140. /// @param reset_time 是否重置虚拟运行时间
  141. pub fn sched_enqueue(pcb: Arc<ProcessControlBlock>, mut reset_time: bool) {
  142. compiler_fence(core::sync::atomic::Ordering::SeqCst);
  143. if pcb.sched_info().inner_lock_read_irqsave().state() != ProcessState::Runnable {
  144. return;
  145. }
  146. let cfs_scheduler = __get_cfs_scheduler();
  147. let rt_scheduler = __get_rt_scheduler();
  148. // 除了IDLE以外的进程,都进行负载均衡
  149. if pcb.pid().into() > 0 {
  150. loads_balance(pcb.clone());
  151. }
  152. if pcb.flags().contains(ProcessFlags::NEED_MIGRATE) {
  153. // kdebug!("migrating pcb:{:?}", pcb);
  154. pcb.flags().remove(ProcessFlags::NEED_MIGRATE);
  155. pcb.sched_info().set_on_cpu(pcb.sched_info().migrate_to());
  156. reset_time = true;
  157. }
  158. assert!(pcb.sched_info().on_cpu().is_some());
  159. match pcb.sched_info().inner_lock_read_irqsave().policy() {
  160. SchedPolicy::CFS => {
  161. if reset_time {
  162. cfs_scheduler.enqueue_reset_vruntime(pcb.clone());
  163. } else {
  164. cfs_scheduler.enqueue(pcb.clone());
  165. }
  166. }
  167. SchedPolicy::FIFO | SchedPolicy::RR => rt_scheduler.enqueue(pcb.clone()),
  168. }
  169. }
  170. /// 初始化进程调度器模块
  171. #[inline(never)]
  172. pub fn sched_init() {
  173. kinfo!("Initializing schedulers...");
  174. unsafe {
  175. sched_cfs_init();
  176. sched_rt_init();
  177. }
  178. kinfo!("Schedulers initialized");
  179. }
  180. /// @brief 当时钟中断到达时,更新时间片
  181. /// 请注意,该函数只能被时钟中断处理程序调用
  182. #[inline(never)]
  183. pub fn sched_update_jiffies() {
  184. let binding = ProcessManager::current_pcb();
  185. let guard = binding.sched_info().inner_lock_try_read_irqsave(10);
  186. if unlikely(guard.is_none()) {
  187. return;
  188. }
  189. let guard = guard.unwrap();
  190. let policy = guard.policy();
  191. drop(guard);
  192. match policy {
  193. SchedPolicy::CFS => {
  194. __get_cfs_scheduler().timer_update_jiffies(binding.sched_info());
  195. }
  196. SchedPolicy::FIFO | SchedPolicy::RR => {
  197. __get_rt_scheduler().timer_update_jiffies();
  198. }
  199. }
  200. }