rt.rs 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. use core::{ptr::null_mut, sync::atomic::compiler_fence};
  2. use alloc::{boxed::Box, vec::Vec, collections::LinkedList};
  3. use crate::{
  4. arch::asm::current::current_pcb,
  5. include::bindings::bindings::{process_control_block, PF_NEED_SCHED, SCHED_FIFO, SCHED_RR},
  6. kBUG, kdebug,
  7. libs::spinlock::RawSpinlock,
  8. };
  9. use super::core::{sched_enqueue, Scheduler};
  10. /// 声明全局的rt调度器实例
  11. pub static mut RT_SCHEDULER_PTR: *mut SchedulerRT = null_mut();
  12. /// @brief 获取rt调度器实例的可变引用
  13. #[inline]
  14. pub fn __get_rt_scheduler() -> &'static mut SchedulerRT {
  15. return unsafe { RT_SCHEDULER_PTR.as_mut().unwrap() };
  16. }
  17. /// @brief 初始化rt调度器
  18. pub unsafe fn sched_rt_init() {
  19. kdebug!("test rt init");
  20. if RT_SCHEDULER_PTR.is_null() {
  21. RT_SCHEDULER_PTR = Box::leak(Box::new(SchedulerRT::new()));
  22. } else {
  23. kBUG!("Try to init RT Scheduler twice.");
  24. panic!("Try to init RT Scheduler twice.");
  25. }
  26. }
  27. /// @brief RT队列(per-cpu的)
  28. #[derive(Debug)]
  29. struct RTQueue {
  30. /// 队列的锁
  31. lock: RawSpinlock,
  32. /// 存储进程的双向队列
  33. queue: LinkedList<&'static mut process_control_block>,
  34. }
  35. impl RTQueue {
  36. pub fn new() -> RTQueue {
  37. RTQueue {
  38. queue: LinkedList::new(),
  39. lock: RawSpinlock::INIT,
  40. }
  41. }
  42. /// @brief 将pcb加入队列
  43. pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
  44. self.lock.lock();
  45. // 如果进程是IDLE进程,那么就不加入队列
  46. if pcb.pid == 0 {
  47. self.lock.unlock();
  48. return;
  49. }
  50. self.queue.push_back(pcb);
  51. self.lock.unlock();
  52. }
  53. /// @brief 将pcb从调度队列头部取出,若队列为空,则返回None
  54. pub fn dequeue(&mut self) -> Option<&'static mut process_control_block> {
  55. let res: Option<&'static mut process_control_block>;
  56. self.lock.lock();
  57. if self.queue.len() > 0 {
  58. // 队列不为空,返回下一个要执行的pcb
  59. res = Some(self.queue.pop_front().unwrap());
  60. } else {
  61. // 如果队列为空,则返回None
  62. res = None;
  63. }
  64. self.lock.unlock();
  65. return res;
  66. }
  67. pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
  68. self.lock.lock();
  69. // 如果进程是IDLE进程,那么就不加入队列
  70. if pcb.pid == 0 {
  71. self.lock.unlock();
  72. return;
  73. }
  74. self.queue.push_front(pcb);
  75. self.lock.unlock();
  76. }
  77. }
  78. /// @brief RT调度器类
  79. pub struct SchedulerRT {
  80. cpu_queue: Vec<&'static mut RTQueue>,
  81. }
  82. impl SchedulerRT {
  83. const RR_TIMESLICE: i64 = 100;
  84. const MAX_RT_PRIO: i64 = 100;
  85. pub fn new() -> SchedulerRT {
  86. // 暂时手动指定核心数目
  87. // todo: 从cpu模块来获取核心的数目
  88. let mut result = SchedulerRT {
  89. cpu_queue: Default::default(),
  90. };
  91. // 为每个cpu核心创建队列
  92. for _ in 0..SchedulerRT::MAX_RT_PRIO {
  93. result.cpu_queue.push(Box::leak(Box::new(RTQueue::new())));
  94. }
  95. return result;
  96. }
  97. /// @brief 挑选下一个可执行的rt进程
  98. pub fn pick_next_task_rt(&mut self) -> Option<&'static mut process_control_block> {
  99. // 循环查找,直到找到
  100. // 这里应该是优先级数量,而不是CPU数量,需要修改
  101. for i in 0..SchedulerRT::MAX_RT_PRIO {
  102. let cpu_queue_i: &mut RTQueue = self.cpu_queue[i as usize];
  103. let proc: Option<&'static mut process_control_block> = cpu_queue_i.dequeue();
  104. if proc.is_some() {
  105. return proc;
  106. }
  107. }
  108. // return 一个空值
  109. None
  110. }
  111. }
  112. impl Scheduler for SchedulerRT {
  113. /// @brief 在当前cpu上进行调度。
  114. /// 请注意,进入该函数之前,需要关中断
  115. fn sched(&mut self) -> Option<&'static mut process_control_block> {
  116. current_pcb().flags &= !(PF_NEED_SCHED as u64);
  117. // 正常流程下,这里一定是会pick到next的pcb的,如果是None的话,要抛出错误
  118. let proc: &'static mut process_control_block =
  119. self.pick_next_task_rt().expect("No RT process found");
  120. // 如果是fifo策略,则可以一直占有cpu直到有优先级更高的任务就绪(即使优先级相同也不行)或者主动放弃(等待资源)
  121. if proc.policy == SCHED_FIFO {
  122. // 如果挑选的进程优先级小于当前进程,则不进行切换
  123. if proc.priority <= current_pcb().priority {
  124. sched_enqueue(proc, false);
  125. } else {
  126. // 将当前的进程加进队列
  127. sched_enqueue(current_pcb(), false);
  128. compiler_fence(core::sync::atomic::Ordering::SeqCst);
  129. return Some(proc);
  130. }
  131. }
  132. // RR调度策略需要考虑时间片
  133. else if proc.policy == SCHED_RR {
  134. // 同等优先级的,考虑切换
  135. if proc.priority >= current_pcb().priority {
  136. // 判断这个进程时间片是否耗尽,若耗尽则将其时间片赋初值然后入队
  137. if proc.rt_time_slice <= 0 {
  138. proc.rt_time_slice = SchedulerRT::RR_TIMESLICE;
  139. proc.flags |= !(PF_NEED_SCHED as u64);
  140. sched_enqueue(proc, false);
  141. }
  142. // 目标进程时间片未耗尽,切换到目标进程
  143. else {
  144. // 将当前进程加进队列
  145. sched_enqueue(current_pcb(), false);
  146. compiler_fence(core::sync::atomic::Ordering::SeqCst);
  147. return Some(proc);
  148. }
  149. }
  150. // curr优先级更大,说明一定是实时进程,将所选进程入队列,此时需要入队首
  151. else {
  152. self.cpu_queue[proc.cpu_id as usize].enqueue_front(proc);
  153. }
  154. }
  155. return None;
  156. }
  157. fn enqueue(&mut self, pcb: &'static mut process_control_block) {
  158. let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
  159. cpu_queue.enqueue(pcb);
  160. }
  161. }