Browse Source

fix: 修正进程pcb的`on_cpu`字段未设置导致的panic问题 (#1057)

Signed-off-by: longjin <[email protected]>
LoGin 3 months ago
parent
commit
62da73bbe5
4 changed files with 25 additions and 16 deletions
  1. 6 1
      kernel/src/process/mod.rs
  2. 2 2
      kernel/src/sched/clock.rs
  3. 9 6
      kernel/src/sched/cputime.rs
  4. 8 7
      kernel/src/sched/mod.rs

+ 6 - 1
kernel/src/process/mod.rs

@@ -275,7 +275,12 @@ impl ProcessManager {
                 // avoid deadlock
                 drop(writer);
 
-                let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize);
+                let rq = cpu_rq(
+                    pcb.sched_info()
+                        .on_cpu()
+                        .unwrap_or(smp_get_processor_id())
+                        .data() as usize,
+                );
 
                 let (rq, _guard) = rq.self_lock();
                 rq.update_rq_clock();

+ 2 - 2
kernel/src/sched/clock.rs

@@ -1,12 +1,12 @@
 //! 这个文件实现的是调度过程中涉及到的时钟
 //!
-use crate::{arch::CurrentTimeArch, time::TimeArch};
+use crate::{arch::CurrentTimeArch, smp::cpu::ProcessorId, time::TimeArch};
 
 pub struct SchedClock;
 
 impl SchedClock {
     #[inline]
-    pub fn sched_clock_cpu(_cpu: usize) -> u64 {
+    pub fn sched_clock_cpu(_cpu: ProcessorId) -> u64 {
         #[cfg(target_arch = "x86_64")]
         {
             if crate::arch::driver::tsc::TSCManager::cpu_khz() == 0 {

+ 9 - 6
kernel/src/sched/cputime.rs

@@ -1,14 +1,17 @@
 use core::sync::atomic::{compiler_fence, AtomicUsize, Ordering};
 
 use crate::{
-    arch::CurrentIrqArch, exception::InterruptArch, process::ProcessControlBlock,
-    smp::core::smp_get_processor_id, time::jiffies::TICK_NESC,
+    arch::CurrentIrqArch,
+    exception::InterruptArch,
+    process::ProcessControlBlock,
+    smp::{core::smp_get_processor_id, cpu::ProcessorId},
+    time::jiffies::TICK_NESC,
 };
 use alloc::sync::Arc;
 
 use super::{clock::SchedClock, cpu_irq_time};
 
-pub fn irq_time_read(cpu: usize) -> u64 {
+pub fn irq_time_read(cpu: ProcessorId) -> u64 {
     compiler_fence(Ordering::SeqCst);
     let irqtime = cpu_irq_time(cpu);
 
@@ -49,7 +52,7 @@ impl IrqTime {
     }
 
     pub fn irqtime_start() {
-        let cpu = smp_get_processor_id().data() as usize;
+        let cpu = smp_get_processor_id();
         let irq_time = cpu_irq_time(cpu);
         compiler_fence(Ordering::SeqCst);
         irq_time.irq_start_time = SchedClock::sched_clock_cpu(cpu) as u64;
@@ -58,7 +61,7 @@ impl IrqTime {
 
     pub fn irqtime_account_irq(_pcb: Arc<ProcessControlBlock>) {
         compiler_fence(Ordering::SeqCst);
-        let cpu = smp_get_processor_id().data() as usize;
+        let cpu = smp_get_processor_id();
         let irq_time = cpu_irq_time(cpu);
         compiler_fence(Ordering::SeqCst);
         let delta = SchedClock::sched_clock_cpu(cpu) as u64 - irq_time.irq_start_time;
@@ -93,7 +96,7 @@ impl CpuTimeFunc {
         let mut accounted = Self::steal_account_process_time(max);
 
         if accounted < max {
-            let irqtime = cpu_irq_time(smp_get_processor_id().data() as usize);
+            let irqtime = cpu_irq_time(smp_get_processor_id());
             accounted += irqtime.irqtime_tick_accounted(max - accounted);
         }
 

+ 8 - 7
kernel/src/sched/mod.rs

@@ -63,8 +63,8 @@ pub const SCHED_CAPACITY_SHIFT: u64 = SCHED_FIXEDPOINT_SHIFT;
 pub const SCHED_CAPACITY_SCALE: u64 = 1 << SCHED_CAPACITY_SHIFT;
 
 #[inline]
-pub fn cpu_irq_time(cpu: usize) -> &'static mut IrqTime {
-    unsafe { CPU_IRQ_TIME.as_mut().unwrap()[cpu] }
+pub fn cpu_irq_time(cpu: ProcessorId) -> &'static mut IrqTime {
+    unsafe { CPU_IRQ_TIME.as_mut().unwrap()[cpu.data() as usize] }
 }
 
 #[inline]
@@ -289,7 +289,7 @@ pub struct CpuRunQueue {
     lock: SpinLock<()>,
     lock_on_who: AtomicUsize,
 
-    cpu: usize,
+    cpu: ProcessorId,
     clock_task: u64,
     clock: u64,
     prev_irq_time: u64,
@@ -329,7 +329,7 @@ pub struct CpuRunQueue {
 }
 
 impl CpuRunQueue {
-    pub fn new(cpu: usize) -> Self {
+    pub fn new(cpu: ProcessorId) -> Self {
         Self {
             lock: SpinLock::new(()),
             lock_on_who: AtomicUsize::new(usize::MAX),
@@ -460,6 +460,7 @@ impl CpuRunQueue {
         self.enqueue_task(pcb.clone(), flags);
 
         *pcb.sched_info().on_rq.lock_irqsave() = OnRq::Queued;
+        pcb.sched_info().set_on_cpu(Some(self.cpu));
     }
 
     /// 检查对应的task是否可以抢占当前运行的task
@@ -638,7 +639,7 @@ impl CpuRunQueue {
 
         let cpu = self.cpu;
 
-        if cpu == smp_get_processor_id().data() as usize {
+        if cpu == smp_get_processor_id() {
             // assert!(
             //     Arc::ptr_eq(&current, &ProcessManager::current_pcb()),
             //     "rq current name {} process current {}",
@@ -653,7 +654,7 @@ impl CpuRunQueue {
         }
 
         // 向目标cpu发送重调度ipi
-        send_resched_ipi(ProcessorId::new(cpu as u32));
+        send_resched_ipi(cpu);
     }
 
     /// 选择下一个task
@@ -986,7 +987,7 @@ pub fn sched_init() {
 
         let mut cpu_runqueue = Vec::with_capacity(PerCpu::MAX_CPU_NUM as usize);
         for cpu in 0..PerCpu::MAX_CPU_NUM as usize {
-            let rq = Arc::new(CpuRunQueue::new(cpu));
+            let rq = Arc::new(CpuRunQueue::new(ProcessorId::new(cpu as u32)));
             rq.cfs.force_mut().set_rq(Arc::downgrade(&rq));
             cpu_runqueue.push(rq);
         }