Browse Source

Patch fix sched and net lockdep error (#479)

- fix: 修复调度器,软中断,定时器,网络子系统的部分锁的使用不符合锁依赖安全规范的问题
- fix: 修复创建pcb时内核栈爆栈的问题
- 把异常的trap gate改成intr gate

---------

Co-authored-by: GnoCiYeH <[email protected]>
LoGin 1 year ago
parent
commit
0d6cf65aa1

+ 3 - 2
kernel/src/arch/x86_64/driver/apic/apic.c

@@ -57,9 +57,10 @@ int apic_init()
  */
 void do_IRQ(struct pt_regs *rsp, ul number)
 {
-    if((rsp->cs & 0x3) == 3)
+    
+    if ((rsp->cs & 0x3) == 3)
     {
-        asm volatile("swapgs":::"memory");
+        asm volatile("swapgs" ::: "memory");
     }
     if (number < 0x80 && number >= 32) // 以0x80为界限,低于0x80的是外部中断控制器,高于0x80的是Local APIC
     {

+ 2 - 2
kernel/src/arch/x86_64/driver/apic/apic_timer.rs

@@ -112,7 +112,7 @@ pub enum LocalApicTimerMode {
 impl LocalApicTimer {
     /// 定时器中断的间隔
     pub const INTERVAL_MS: u64 = 1000 / HZ as u64;
-    pub const DIVISOR: u64 = 3;
+    pub const DIVISOR: u64 = 4;
 
     /// IoApicManager 初值为0或false
     pub const fn new() -> Self {
@@ -131,7 +131,7 @@ impl LocalApicTimer {
         // 疑惑:这里使用khz吗?
         // 我觉得应该是hz,但是由于旧的代码是测量出initcnt的,而不是计算的
         // 然后我发现使用hz会导致计算出来的initcnt太大,导致系统卡顿,而khz的却能跑
-        let count = cpu_khz * Self::INTERVAL_MS / (1000 * Self::DIVISOR);
+        let count = cpu_khz * Self::INTERVAL_MS / (Self::DIVISOR);
         return count;
     }
 

+ 9 - 4
kernel/src/arch/x86_64/driver/hpet.rs

@@ -10,11 +10,15 @@ use acpi::HpetInfo;
 use system_error::SystemError;
 
 use crate::{
+    arch::CurrentIrqArch,
     driver::{
         acpi::acpi_manager,
         timers::hpet::{HpetRegisters, HpetTimerRegisters},
     },
-    exception::softirq::{softirq_vectors, SoftirqNumber},
+    exception::{
+        softirq::{softirq_vectors, SoftirqNumber},
+        InterruptArch,
+    },
     kdebug, kerror, kinfo,
     libs::{
         rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
@@ -51,8 +55,8 @@ struct InnerHpet {
 }
 
 impl Hpet {
-    /// HPET0 中断间隔为500us
-    pub const HPET0_INTERVAL_USEC: u64 = 500;
+    /// HPET0 中断间隔为 10ms
+    pub const HPET0_INTERVAL_USEC: u64 = 10000;
 
     fn new(mut hpet_info: HpetInfo) -> Result<Self, SystemError> {
         let paddr = PhysAddr::new(hpet_info.base_address);
@@ -222,7 +226,8 @@ impl Hpet {
     /// 处理HPET的中断
     pub(super) fn handle_irq(&self, timer_num: u32) {
         if timer_num == 0 {
-            update_timer_jiffies(Self::HPET0_INTERVAL_USEC);
+            assert!(CurrentIrqArch::is_irq_enabled() == false);
+            update_timer_jiffies(Self::HPET0_INTERVAL_USEC, Self::HPET0_INTERVAL_USEC as i64);
 
             if let Ok(first_expire) = timer_get_first_expire() {
                 if first_expire <= clock() {

+ 11 - 0
kernel/src/arch/x86_64/driver/tsc.rs

@@ -239,6 +239,17 @@ impl TSCManager {
     ///
     /// 参考 https://opengrok.ringotek.cn/xref/linux-6.1.9/arch/x86/kernel/tsc.c#389
     fn pit_calibrate_tsc(latch: u64, ms: u64, loopmin: u64) -> Option<u64> {
+        // 当前暂时没写legacy pic的驱动,因此这里直接返回
+        let has_legacy_pic = false;
+        if !has_legacy_pic {
+            let mut cnt = 10000;
+            while cnt > 0 {
+                cnt -= 1;
+            }
+
+            return None;
+        }
+
         unsafe {
             // Set the Gate high, disable speaker
             let d = (CurrentPortIOArch::in8(0x61) & (!0x02)) | 0x01;

+ 1 - 1
kernel/src/arch/x86_64/ipc/signal.rs

@@ -384,7 +384,7 @@ impl SigContext {
     /// - `false` -> 执行失败
     pub fn restore_sigcontext(&mut self, frame: &mut TrapFrame) -> bool {
         let guard = ProcessManager::current_pcb();
-        let mut arch_info = guard.arch_info();
+        let mut arch_info = guard.arch_info_irqsave();
         (*frame) = self.frame.clone();
         // (*current_thread).trap_num = (*context).trap_num;
         *arch_info.cr2_mut() = self.cr2 as usize;

+ 11 - 10
kernel/src/arch/x86_64/process/mod.rs

@@ -98,6 +98,7 @@ impl ArchPCBInfo {
     /// ## 返回值
     ///
     /// 返回一个新的ArchPCBInfo
+    #[inline(never)]
     pub fn new(kstack: &KernelStack) -> Self {
         let mut r = Self {
             rflags: 0,
@@ -325,7 +326,7 @@ impl ProcessManager {
         child_trapframe.set_return_value(0);
 
         // 设置子进程的栈基址(开始执行中断返回流程时的栈基址)
-        let mut new_arch_guard = new_pcb.arch_info();
+        let mut new_arch_guard = unsafe { new_pcb.arch_info() };
         let kernel_stack_guard = new_pcb.kernel_stack();
 
         // 设置子进程在内核态开始执行时的rsp、rbp
@@ -385,13 +386,13 @@ impl ProcessManager {
         assert!(CurrentIrqArch::is_irq_enabled() == false);
 
         // 保存浮点寄存器
-        prev.arch_info().save_fp_state();
+        prev.arch_info_irqsave().save_fp_state();
         // 切换浮点寄存器
-        next.arch_info().restore_fp_state();
+        next.arch_info_irqsave().restore_fp_state();
 
         // 切换fsbase
-        prev.arch_info().save_fsbase();
-        next.arch_info().restore_fsbase();
+        prev.arch_info_irqsave().save_fsbase();
+        next.arch_info_irqsave().restore_fsbase();
 
         // 切换gsbase
         Self::switch_gsbase(&prev, &next);
@@ -406,8 +407,8 @@ impl ProcessManager {
         // 切换内核栈
 
         // 获取arch info的锁,并强制泄露其守卫(切换上下文后,在switch_finish_hook中会释放锁)
-        let next_arch = SpinLockGuard::leak(next.arch_info()) as *mut ArchPCBInfo;
-        let prev_arch = SpinLockGuard::leak(prev.arch_info()) as *mut ArchPCBInfo;
+        let next_arch = SpinLockGuard::leak(next.arch_info_irqsave()) as *mut ArchPCBInfo;
+        let prev_arch = SpinLockGuard::leak(prev.arch_info_irqsave()) as *mut ArchPCBInfo;
 
         (*prev_arch).rip = switch_back as usize;
 
@@ -430,10 +431,10 @@ impl ProcessManager {
 
     unsafe fn switch_gsbase(prev: &Arc<ProcessControlBlock>, next: &Arc<ProcessControlBlock>) {
         asm!("swapgs", options(nostack, preserves_flags));
-        prev.arch_info().save_gsbase();
-        next.arch_info().restore_gsbase();
+        prev.arch_info_irqsave().save_gsbase();
+        next.arch_info_irqsave().restore_gsbase();
         // 将下一个进程的kstack写入kernel_gsbase
-        next.arch_info().store_kernel_gsbase();
+        next.arch_info_irqsave().store_kernel_gsbase();
         asm!("swapgs", options(nostack, preserves_flags));
     }
 }

+ 5 - 1
kernel/src/arch/x86_64/syscall/mod.rs

@@ -67,7 +67,11 @@ macro_rules! syscall_return {
 pub extern "sysv64" fn syscall_handler(frame: &mut TrapFrame) -> () {
     let syscall_num = frame.rax as usize;
     // 防止sys_sched由于超时无法退出导致的死锁
-    if syscall_num != SYS_SCHED {
+    if syscall_num == SYS_SCHED {
+        unsafe {
+            CurrentIrqArch::interrupt_disable();
+        }
+    } else {
         unsafe {
             CurrentIrqArch::interrupt_enable();
         }

+ 3 - 1
kernel/src/driver/net/e1000e/e1000e_driver.rs

@@ -343,6 +343,8 @@ pub fn e1000e_driver_init(device: E1000EDevice) {
     let driver = E1000EDriver::new(device);
     let iface = E1000EInterface::new(driver);
     // 将网卡的接口信息注册到全局的网卡接口信息表中
-    NET_DRIVERS.write().insert(iface.nic_id(), iface.clone());
+    NET_DRIVERS
+        .write_irqsave()
+        .insert(iface.nic_id(), iface.clone());
     kinfo!("e1000e driver init successfully!\tMAC: [{}]", mac);
 }

+ 3 - 1
kernel/src/driver/net/virtio_net.rs

@@ -234,7 +234,9 @@ pub fn virtio_net<T: Transport + 'static>(transport: T) {
     let iface = VirtioInterface::new(driver);
     let name = iface.name.clone();
     // 将网卡的接口信息注册到全局的网卡接口信息表中
-    NET_DRIVERS.write().insert(iface.nic_id(), iface.clone());
+    NET_DRIVERS
+        .write_irqsave()
+        .insert(iface.nic_id(), iface.clone());
     kinfo!(
         "Virtio-net driver init successfully!\tNetDevID: [{}], MAC: [{}]",
         name,

+ 53 - 8
kernel/src/exception/softirq.rs

@@ -3,16 +3,21 @@ use core::{
     intrinsics::unlikely,
     mem::{self, MaybeUninit},
     ptr::null_mut,
-    sync::atomic::{compiler_fence, Ordering},
+    sync::atomic::{compiler_fence, AtomicI16, Ordering},
 };
 
-use alloc::{boxed::Box, sync::Arc};
+use alloc::{boxed::Box, sync::Arc, vec::Vec};
 use num_traits::FromPrimitive;
 use system_error::SystemError;
 
 use crate::{
-    arch::CurrentIrqArch, exception::InterruptArch, kdebug, kinfo, libs::rwlock::RwLock,
-    mm::percpu::PerCpu, process::ProcessManager, smp::core::smp_get_processor_id,
+    arch::CurrentIrqArch,
+    exception::InterruptArch,
+    kdebug, kinfo,
+    libs::rwlock::RwLock,
+    mm::percpu::{PerCpu, PerCpuVar},
+    process::ProcessManager,
+    smp::core::smp_get_processor_id,
     time::timer::clock,
 };
 
@@ -94,8 +99,12 @@ pub trait SoftirqVec: Send + Sync + Debug {
 #[derive(Debug)]
 pub struct Softirq {
     table: RwLock<[Option<Arc<dyn SoftirqVec>>; MAX_SOFTIRQ_NUM as usize]>,
+    /// 软中断嵌套层数(per cpu)
+    cpu_running_count: PerCpuVar<AtomicI16>,
 }
 impl Softirq {
+    /// 每个CPU最大嵌套的软中断数量
+    const MAX_RUNNING_PER_CPU: i16 = 3;
     fn new() -> Softirq {
         let mut data: [MaybeUninit<Option<Arc<dyn SoftirqVec>>>; MAX_SOFTIRQ_NUM as usize] =
             unsafe { MaybeUninit::uninit().assume_init() };
@@ -108,11 +117,20 @@ impl Softirq {
             mem::transmute::<_, [Option<Arc<dyn SoftirqVec>>; MAX_SOFTIRQ_NUM as usize]>(data)
         };
 
+        let mut percpu_count = Vec::with_capacity(PerCpu::MAX_CPU_NUM as usize);
+        percpu_count.resize_with(PerCpu::MAX_CPU_NUM as usize, || AtomicI16::new(0));
+        let cpu_running_count = PerCpuVar::new(percpu_count).unwrap();
+
         return Softirq {
             table: RwLock::new(data),
+            cpu_running_count,
         };
     }
 
+    fn cpu_running_count(&self) -> &PerCpuVar<AtomicI16> {
+        return &self.cpu_running_count;
+    }
+
     /// @brief 注册软中断向量
     ///
     /// @param softirq_num 中断向量号
@@ -127,7 +145,7 @@ impl Softirq {
 
         // let self = &mut SOFTIRQ_VECTORS.lock();
         // 判断该软中断向量是否已经被注册
-        let mut table_guard = self.table.write();
+        let mut table_guard = self.table.write_irqsave();
         if table_guard[softirq_num as usize].is_some() {
             // kdebug!("register_softirq failed");
 
@@ -149,7 +167,7 @@ impl Softirq {
     /// @param irq_num 中断向量号码   
     pub fn unregister_softirq(&self, softirq_num: SoftirqNumber) {
         // kdebug!("unregister_softirq softirq_num = {:?}", softirq_num as u64);
-        let mut table_guard = self.table.write();
+        let mut table_guard = self.table.write_irqsave();
         // 将软中断向量清空
         table_guard[softirq_num as usize] = None;
         drop(table_guard);
@@ -162,8 +180,14 @@ impl Softirq {
     }
 
     pub fn do_softirq(&self) {
+        if self.cpu_running_count().get().load(Ordering::SeqCst) >= Self::MAX_RUNNING_PER_CPU {
+            // 当前CPU的软中断嵌套层数已经达到最大值,不再执行
+            return;
+        }
+        // 创建一个RunningCountGuard,当退出作用域时,会自动将cpu_running_count减1
+        let _count_guard = RunningCountGuard::new(self.cpu_running_count());
+
         // TODO pcb的flags未修改
-        // todo: 是否需要判断在当前cpu上面,该函数的嵌套层数?(防止爆栈)
         let end = clock() + 500 * 2;
         let cpu_id = smp_get_processor_id();
         let mut max_restart = MAX_SOFTIRQ_RESTART;
@@ -180,7 +204,7 @@ impl Softirq {
                         continue;
                     }
 
-                    let table_guard = self.table.read();
+                    let table_guard = self.table.read_irqsave();
                     let softirq_func = table_guard[i as usize].clone();
                     drop(table_guard);
                     if softirq_func.is_none() {
@@ -236,6 +260,27 @@ impl Softirq {
     }
 }
 
+/// 当前CPU的软中断嵌套层数的计数器守卫
+///
+/// 当进入作用域时,会自动将cpu_running_count加1,
+/// 当退出作用域时,会自动将cpu_running_count减1
+struct RunningCountGuard<'a> {
+    cpu_running_count: &'a PerCpuVar<AtomicI16>,
+}
+
+impl<'a> RunningCountGuard<'a> {
+    fn new(cpu_running_count: &'a PerCpuVar<AtomicI16>) -> RunningCountGuard {
+        cpu_running_count.get().fetch_add(1, Ordering::SeqCst);
+        return RunningCountGuard { cpu_running_count };
+    }
+}
+
+impl<'a> Drop for RunningCountGuard<'a> {
+    fn drop(&mut self) {
+        self.cpu_running_count.get().fetch_sub(1, Ordering::SeqCst);
+    }
+}
+
 // ======= 以下为给C提供的接口 =======
 #[no_mangle]
 pub extern "C" fn rs_raise_softirq(softirq_num: u32) {

+ 16 - 16
kernel/src/exception/trap.c

@@ -316,27 +316,27 @@ void sys_vector_init()
     for (int i = 0; i < 256; ++i)
         set_intr_gate(i, 0, ignore_int);
 
-    set_trap_gate(0, 0, divide_error);
-    set_trap_gate(1, 0, debug);
+    set_intr_gate(0, 0, divide_error);
+    set_intr_gate(1, 0, debug);
     set_intr_gate(2, 0, nmi);
     set_system_trap_gate(3, 0, int3);
     set_system_trap_gate(4, 0, overflow);
     set_system_trap_gate(5, 0, bounds);
-    set_trap_gate(6, 0, undefined_opcode);
-    set_trap_gate(7, 0, dev_not_avaliable);
-    set_trap_gate(8, 0, double_fault);
-    set_trap_gate(9, 0, coprocessor_segment_overrun);
-    set_trap_gate(10, 0, invalid_TSS);
-    set_trap_gate(11, 0, segment_not_exists);
-    set_trap_gate(12, 0, stack_segment_fault);
-    set_trap_gate(13, 0, general_protection);
-    set_trap_gate(14, 0, page_fault);
+    set_intr_gate(6, 0, undefined_opcode);
+    set_intr_gate(7, 0, dev_not_avaliable);
+    set_intr_gate(8, 0, double_fault);
+    set_intr_gate(9, 0, coprocessor_segment_overrun);
+    set_intr_gate(10, 0, invalid_TSS);
+    set_intr_gate(11, 0, segment_not_exists);
+    set_intr_gate(12, 0, stack_segment_fault);
+    set_intr_gate(13, 0, general_protection);
+    set_intr_gate(14, 0, page_fault);
     // 中断号15由Intel保留,不能使用
-    set_trap_gate(16, 0, x87_FPU_error);
-    set_trap_gate(17, 0, alignment_check);
-    set_trap_gate(18, 0, machine_check);
-    set_trap_gate(19, 0, SIMD_exception);
-    set_trap_gate(20, 0, virtualization_exception);
+    set_intr_gate(16, 0, x87_FPU_error);
+    set_intr_gate(17, 0, alignment_check);
+    set_intr_gate(18, 0, machine_check);
+    set_intr_gate(19, 0, SIMD_exception);
+    set_intr_gate(20, 0, virtualization_exception);
     // 中断号21-31由Intel保留,不能使用
 
     // 32-255为用户自定义中断内部

+ 1 - 3
kernel/src/filesystem/procfs/mod.rs

@@ -146,7 +146,7 @@ impl ProcFSInode {
         );
 
         let sched_info_guard = pcb.sched_info();
-        let state = sched_info_guard.state();
+        let state = sched_info_guard.inner_lock_read_irqsave().state();
         let cpu_id = sched_info_guard
             .on_cpu()
             .map(|cpu| cpu as i32)
@@ -155,8 +155,6 @@ impl ProcFSInode {
         let priority = sched_info_guard.priority();
         let vrtime = sched_info_guard.virtual_runtime();
 
-        drop(sched_info_guard);
-
         pdata.append(&mut format!("\nState:\t{:?}", state).as_bytes().to_owned());
         pdata.append(
             &mut format!("\nPid:\t{}", pcb.pid().into())

+ 5 - 7
kernel/src/ipc/signal.rs

@@ -201,10 +201,8 @@ impl Signal {
         if *self == Signal::SIGKILL {
             return true;
         }
-
-        if pcb.sched_info().state().is_blocked()
-            && (pcb.sched_info().state().is_blocked_interruptable() == false)
-        {
+        let state = pcb.sched_info().inner_lock_read_irqsave().state();
+        if state.is_blocked() && (state.is_blocked_interruptable() == false) {
             return false;
         }
 
@@ -287,7 +285,7 @@ fn signal_wake_up(pcb: Arc<ProcessControlBlock>, _guard: SpinLockGuard<SignalStr
     // 如果不是 fatal 的就只唤醒 stop 的进程来响应
     // kdebug!("signal_wake_up");
     // 如果目标进程已经在运行,则发起一个ipi,使得它陷入内核
-    let state = pcb.sched_info().state();
+    let state = pcb.sched_info().inner_lock_read_irqsave().state();
     let mut wakeup_ok = true;
     if state.is_blocked_interruptable() {
         ProcessManager::wakeup(&pcb).unwrap_or_else(|e| {
@@ -337,7 +335,7 @@ fn recalc_sigpending() {
 pub fn flush_signal_handlers(pcb: Arc<ProcessControlBlock>, force_default: bool) {
     compiler_fence(core::sync::atomic::Ordering::SeqCst);
     // kdebug!("hand=0x{:018x}", hand as *const sighand_struct as usize);
-    let actions = &mut pcb.sig_struct().handlers;
+    let actions = &mut pcb.sig_struct_irqsave().handlers;
 
     for sigaction in actions.iter_mut() {
         if force_default || !sigaction.is_ignore() {
@@ -436,7 +434,7 @@ pub fn set_current_sig_blocked(new_set: &mut SigSet) {
         return;
     }
 
-    let guard = pcb.sig_struct_irq();
+    let guard = pcb.sig_struct_irqsave();
     // todo: 当一个进程有多个线程后,在这里需要设置每个线程的block字段,并且 retarget_shared_pending(虽然我还没搞明白linux这部分是干啥的)
 
     // 设置当前进程的sig blocked

+ 36 - 3
kernel/src/ipc/signal_types.rs

@@ -1,6 +1,11 @@
-use core::{ffi::c_void, mem::size_of, sync::atomic::AtomicI64};
+use core::{
+    ffi::c_void,
+    mem::size_of,
+    ops::{Deref, DerefMut},
+    sync::atomic::AtomicI64,
+};
 
-use alloc::vec::Vec;
+use alloc::{boxed::Box, vec::Vec};
 use system_error::SystemError;
 
 use crate::{
@@ -55,13 +60,41 @@ pub const SIG_KERNEL_IGNORE_MASK: SigSet = Signal::into_sigset(Signal::SIGCONT)
 /// SignalStruct 在 pcb 中加锁
 #[derive(Debug)]
 pub struct SignalStruct {
+    inner: Box<InnerSignalStruct>,
+}
+
+#[derive(Debug)]
+pub struct InnerSignalStruct {
     pub cnt: AtomicI64,
     /// 如果对应linux,这部分会有一个引用计数,但是没发现在哪里有用到需要计算引用的地方,因此
     /// 暂时删掉,不然这个Arc会导致其他地方的代码十分丑陋
     pub handlers: [Sigaction; MAX_SIG_NUM as usize],
 }
 
-impl Default for SignalStruct {
+impl SignalStruct {
+    #[inline(never)]
+    pub fn new() -> Self {
+        Self {
+            inner: Box::new(InnerSignalStruct::default()),
+        }
+    }
+}
+
+impl Deref for SignalStruct {
+    type Target = InnerSignalStruct;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+impl DerefMut for SignalStruct {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.inner
+    }
+}
+
+impl Default for InnerSignalStruct {
     fn default() -> Self {
         Self {
             cnt: Default::default(),

+ 12 - 0
kernel/src/libs/rwlock.rs

@@ -164,6 +164,7 @@ impl<T> RwLock<T> {
         } //忙等待
     }
 
+    /// 关中断并获取读者守卫
     pub fn read_irqsave(&self) -> RwLockReadGuard<T> {
         loop {
             let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
@@ -177,6 +178,17 @@ impl<T> RwLock<T> {
         }
     }
 
+    /// 尝试关闭中断并获取读者守卫
+    pub fn try_read_irqsave(&self) -> Option<RwLockReadGuard<T>> {
+        let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
+        if let Some(mut guard) = self.try_read() {
+            guard.irq_guard = Some(irq_guard);
+            return Some(guard);
+        } else {
+            return None;
+        }
+    }
+
     #[allow(dead_code)]
     #[inline]
     /// @brief 获取读者+UPGRADER的数量, 不能保证能否获得同步值

+ 18 - 6
kernel/src/libs/wait_queue.rs

@@ -184,18 +184,27 @@ impl WaitQueue {
     /// @return true 成功唤醒进程
     /// @return false 没有唤醒进程
     pub fn wakeup(&self, state: Option<ProcessState>) -> bool {
-        let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock();
+        let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock_irqsave();
         // 如果队列为空,则返回
         if guard.wait_list.is_empty() {
             return false;
         }
         // 如果队列头部的pcb的state与给定的state相与,结果不为0,则唤醒
         if let Some(state) = state {
-            if guard.wait_list.front().unwrap().sched_info().state() != state {
+            if guard
+                .wait_list
+                .front()
+                .unwrap()
+                .sched_info()
+                .inner_lock_read_irqsave()
+                .state()
+                != state
+            {
                 return false;
             }
         }
         let to_wakeup = guard.wait_list.pop_front().unwrap();
+        drop(guard);
         let res = ProcessManager::wakeup(&to_wakeup).is_ok();
         return res;
     }
@@ -215,7 +224,7 @@ impl WaitQueue {
         while let Some(to_wakeup) = guard.wait_list.pop_front() {
             let mut wake = false;
             if let Some(state) = state {
-                if to_wakeup.sched_info().state() == state {
+                if to_wakeup.sched_info().inner_lock_read_irqsave().state() == state {
                     wake = true;
                 }
             } else {
@@ -302,7 +311,7 @@ impl EventWaitQueue {
 
     pub fn sleep_unlock_spinlock<T>(&self, events: u64, to_unlock: SpinLockGuard<T>) {
         before_sleep_check(1);
-        let mut guard = self.wait_list.lock();
+        let mut guard = self.wait_list.lock_irqsave();
         let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
         ProcessManager::mark_sleep(true).unwrap_or_else(|e| {
             panic!("sleep error: {:?}", e);
@@ -322,7 +331,9 @@ impl EventWaitQueue {
     /// 需要注意的是,只要触发了events中的任意一件事件,进程都会被唤醒
     pub fn wakeup_any(&self, events: u64) -> usize {
         let mut ret = 0;
-        let _ = self.wait_list.lock().extract_if(|(es, pcb)| {
+
+        let mut wq_guard = self.wait_list.lock_irqsave();
+        wq_guard.retain(|(es, pcb)| {
             if *es & events > 0 {
                 // 有感兴趣的事件
                 if ProcessManager::wakeup(pcb).is_ok() {
@@ -346,7 +357,8 @@ impl EventWaitQueue {
     /// 需要注意的是,只有满足所有事件的进程才会被唤醒
     pub fn wakeup(&self, events: u64) -> usize {
         let mut ret = 0;
-        let _ = self.wait_list.lock().extract_if(|(es, pcb)| {
+        let mut wq_guard = self.wait_list.lock_irqsave();
+        wq_guard.retain(|(es, pcb)| {
             if *es == events {
                 // 有感兴趣的事件
                 if ProcessManager::wakeup(pcb).is_ok() {

+ 4 - 2
kernel/src/net/mod.rs

@@ -30,11 +30,13 @@ pub mod socket;
 pub mod syscall;
 
 lazy_static! {
-    /// @brief 所有网络接口的列表
+    /// 所有网络接口的列表
+    ///
+    /// 这个列表在中断上下文会使用到,因此需要irqsave
     pub static ref NET_DRIVERS: RwLock<BTreeMap<usize, Arc<dyn NetDriver>>> = RwLock::new(BTreeMap::new());
 }
 
-/// @brief 生成网络接口的id (全局自增)
+/// 生成网络接口的id (全局自增)
 pub fn generate_iface_id() -> usize {
     static IFACE_ID: AtomicUsize = AtomicUsize::new(0);
     return IFACE_ID

+ 14 - 13
kernel/src/net/net_core.rs

@@ -41,7 +41,7 @@ pub fn net_init() -> Result<(), SystemError> {
 }
 
 fn dhcp_query() -> Result<(), SystemError> {
-    let binding = NET_DRIVERS.write();
+    let binding = NET_DRIVERS.write_irqsave();
 
     let net_face = binding.get(&0).ok_or(SystemError::ENODEV)?.clone();
 
@@ -56,13 +56,13 @@ fn dhcp_query() -> Result<(), SystemError> {
     // IMPORTANT: This should be removed in production.
     dhcp_socket.set_max_lease_duration(Some(smoltcp::time::Duration::from_secs(10)));
 
-    let dhcp_handle = SOCKET_SET.lock().add(dhcp_socket);
+    let dhcp_handle = SOCKET_SET.lock_irqsave().add(dhcp_socket);
 
     const DHCP_TRY_ROUND: u8 = 10;
     for i in 0..DHCP_TRY_ROUND {
         kdebug!("DHCP try round: {}", i);
-        net_face.poll(&mut SOCKET_SET.lock()).ok();
-        let mut binding = SOCKET_SET.lock();
+        net_face.poll(&mut SOCKET_SET.lock_irqsave()).ok();
+        let mut binding = SOCKET_SET.lock_irqsave();
         let event = binding.get_mut::<dhcpv4::Socket>(dhcp_handle).poll();
 
         match event {
@@ -120,12 +120,12 @@ fn dhcp_query() -> Result<(), SystemError> {
 }
 
 pub fn poll_ifaces() {
-    let guard: RwLockReadGuard<BTreeMap<usize, Arc<dyn NetDriver>>> = NET_DRIVERS.read();
+    let guard: RwLockReadGuard<BTreeMap<usize, Arc<dyn NetDriver>>> = NET_DRIVERS.read_irqsave();
     if guard.len() == 0 {
         kwarn!("poll_ifaces: No net driver found!");
         return;
     }
-    let mut sockets = SOCKET_SET.lock();
+    let mut sockets = SOCKET_SET.lock_irqsave();
     for (_, iface) in guard.iter() {
         iface.poll(&mut sockets).ok();
     }
@@ -140,13 +140,14 @@ pub fn poll_ifaces() {
 pub fn poll_ifaces_try_lock(times: u16) -> Result<(), SystemError> {
     let mut i = 0;
     while i < times {
-        let guard: RwLockReadGuard<BTreeMap<usize, Arc<dyn NetDriver>>> = NET_DRIVERS.read();
+        let guard: RwLockReadGuard<BTreeMap<usize, Arc<dyn NetDriver>>> =
+            NET_DRIVERS.read_irqsave();
         if guard.len() == 0 {
             kwarn!("poll_ifaces: No net driver found!");
             // 没有网卡,返回错误
             return Err(SystemError::ENODEV);
         }
-        let sockets = SOCKET_SET.try_lock();
+        let sockets = SOCKET_SET.try_lock_irqsave();
         // 加锁失败,继续尝试
         if sockets.is_err() {
             i += 1;
@@ -171,13 +172,13 @@ pub fn poll_ifaces_try_lock(times: u16) -> Result<(), SystemError> {
 /// @return 加锁超时,返回SystemError::EAGAIN_OR_EWOULDBLOCK
 /// @return 没有网卡,返回SystemError::ENODEV
 pub fn poll_ifaces_try_lock_onetime() -> Result<(), SystemError> {
-    let guard: RwLockReadGuard<BTreeMap<usize, Arc<dyn NetDriver>>> = NET_DRIVERS.read();
+    let guard: RwLockReadGuard<BTreeMap<usize, Arc<dyn NetDriver>>> = NET_DRIVERS.read_irqsave();
     if guard.len() == 0 {
         kwarn!("poll_ifaces: No net driver found!");
         // 没有网卡,返回错误
         return Err(SystemError::ENODEV);
     }
-    let mut sockets = SOCKET_SET.try_lock()?;
+    let mut sockets = SOCKET_SET.try_lock_irqsave()?;
     for (_, iface) in guard.iter() {
         iface.poll(&mut sockets).ok();
     }
@@ -241,7 +242,7 @@ fn send_event(sockets: &smoltcp::iface::SocketSet) -> Result<(), SystemError> {
 fn wakeup_epoll(handle: SocketHandle, events: u32) -> Result<(), SystemError> {
     let mut handle_guard = HANDLE_MAP.write_irqsave();
     let handle_item = handle_guard.get_mut(&handle).unwrap();
-    let mut epitems_guard = handle_item.epitems.try_lock()?;
+    let mut epitems_guard = handle_item.epitems.try_lock_irqsave()?;
 
     // 从events拿到epoll相关事件
     let pollflags = EPollEventType::from_bits_truncate(events);
@@ -249,9 +250,9 @@ fn wakeup_epoll(handle: SocketHandle, events: u32) -> Result<(), SystemError> {
     // 一次只取一个,因为一次也只有一个进程能拿到对应文件的🔓
     if let Some(epitem) = epitems_guard.pop_front() {
         let epoll = epitem.epoll().upgrade().unwrap();
-        let mut epoll_guard = epoll.try_lock()?;
+        let mut epoll_guard = epoll.try_lock_irqsave()?;
         let binding = epitem.clone();
-        let event_guard = binding.event().read();
+        let event_guard = binding.event().read_irqsave();
         let ep_events = EPollEventType::from_bits_truncate(event_guard.events());
 
         // 检查事件合理性以及是否有感兴趣的事件

+ 24 - 24
kernel/src/net/socket.rs

@@ -99,7 +99,7 @@ impl SocketHandleItem {
     }
 
     pub fn shutdown_type_writer(&mut self) -> RwLockWriteGuard<ShutdownType> {
-        self.shutdown_type.write()
+        self.shutdown_type.write_irqsave()
     }
 
     pub fn add_epoll(&mut self, epitem: Arc<EPollItem>) {
@@ -238,7 +238,7 @@ impl Clone for GlobalSocketHandle {
 
 impl Drop for GlobalSocketHandle {
     fn drop(&mut self) {
-        let mut socket_set_guard = SOCKET_SET.lock();
+        let mut socket_set_guard = SOCKET_SET.lock_irqsave();
         socket_set_guard.remove(self.0); // 删除的时候,会发送一条FINISH的信息?
         drop(socket_set_guard);
         poll_ifaces();
@@ -353,7 +353,7 @@ impl RawSocket {
 
         // 把socket添加到socket集合中,并得到socket的句柄
         let handle: Arc<GlobalSocketHandle> =
-            GlobalSocketHandle::new(SOCKET_SET.lock().add(socket));
+            GlobalSocketHandle::new(SOCKET_SET.lock_irqsave().add(socket));
 
         let metadata = SocketMetadata::new(
             SocketType::RawSocket,
@@ -376,7 +376,7 @@ impl Socket for RawSocket {
         poll_ifaces();
         loop {
             // 如何优化这里?
-            let mut socket_set_guard = SOCKET_SET.lock();
+            let mut socket_set_guard = SOCKET_SET.lock_irqsave();
             let socket = socket_set_guard.get_mut::<raw::Socket>(self.handle.0);
 
             match socket.recv_slice(buf) {
@@ -409,7 +409,7 @@ impl Socket for RawSocket {
     fn write(&self, buf: &[u8], to: Option<super::Endpoint>) -> Result<usize, SystemError> {
         // 如果用户发送的数据包,包含IP头,则直接发送
         if self.header_included {
-            let mut socket_set_guard = SOCKET_SET.lock();
+            let mut socket_set_guard = SOCKET_SET.lock_irqsave();
             let socket = socket_set_guard.get_mut::<raw::Socket>(self.handle.0);
             match socket.send_slice(buf) {
                 Ok(_len) => {
@@ -423,12 +423,12 @@ impl Socket for RawSocket {
             // 如果用户发送的数据包,不包含IP头,则需要自己构造IP头
 
             if let Some(Endpoint::Ip(Some(endpoint))) = to {
-                let mut socket_set_guard = SOCKET_SET.lock();
+                let mut socket_set_guard = SOCKET_SET.lock_irqsave();
                 let socket: &mut raw::Socket =
                     socket_set_guard.get_mut::<raw::Socket>(self.handle.0);
 
                 // 暴力解决方案:只考虑0号网卡。 TODO:考虑多网卡的情况!!!
-                let iface = NET_DRIVERS.read().get(&0).unwrap().clone();
+                let iface = NET_DRIVERS.read_irqsave().get(&0).unwrap().clone();
 
                 // 构造IP头
                 let ipv4_src_addr: Option<smoltcp::wire::Ipv4Address> =
@@ -535,7 +535,7 @@ impl UdpSocket {
 
         // 把socket添加到socket集合中,并得到socket的句柄
         let handle: Arc<GlobalSocketHandle> =
-            GlobalSocketHandle::new(SOCKET_SET.lock().add(socket));
+            GlobalSocketHandle::new(SOCKET_SET.lock_irqsave().add(socket));
 
         let metadata = SocketMetadata::new(
             SocketType::UdpSocket,
@@ -579,7 +579,7 @@ impl Socket for UdpSocket {
         loop {
             // kdebug!("Wait22 to Read");
             poll_ifaces();
-            let mut socket_set_guard = SOCKET_SET.lock();
+            let mut socket_set_guard = SOCKET_SET.lock_irqsave();
             let socket = socket_set_guard.get_mut::<udp::Socket>(self.handle.0);
 
             // kdebug!("Wait to Read");
@@ -616,7 +616,7 @@ impl Socket for UdpSocket {
         };
         // kdebug!("udp write: remote = {:?}", remote_endpoint);
 
-        let mut socket_set_guard = SOCKET_SET.lock();
+        let mut socket_set_guard = SOCKET_SET.lock_irqsave();
         let socket = socket_set_guard.get_mut::<udp::Socket>(self.handle.0);
         // kdebug!("is open()={}", socket.is_open());
         // kdebug!("socket endpoint={:?}", socket.endpoint());
@@ -660,14 +660,14 @@ impl Socket for UdpSocket {
     }
 
     fn bind(&mut self, endpoint: Endpoint) -> Result<(), SystemError> {
-        let mut sockets = SOCKET_SET.lock();
+        let mut sockets = SOCKET_SET.lock_irqsave();
         let socket = sockets.get_mut::<udp::Socket>(self.handle.0);
         // kdebug!("UDP Bind to {:?}", endpoint);
         return self.do_bind(socket, endpoint);
     }
 
     fn poll(&self) -> EPollEventType {
-        let sockets = SOCKET_SET.lock();
+        let sockets = SOCKET_SET.lock_irqsave();
         let socket = sockets.get::<udp::Socket>(self.handle.0);
 
         return SocketPollMethod::udp_poll(
@@ -708,7 +708,7 @@ impl Socket for UdpSocket {
     }
 
     fn endpoint(&self) -> Option<Endpoint> {
-        let sockets = SOCKET_SET.lock();
+        let sockets = SOCKET_SET.lock_irqsave();
         let socket = sockets.get::<udp::Socket>(self.handle.0);
         let listen_endpoint = socket.endpoint();
 
@@ -773,7 +773,7 @@ impl TcpSocket {
 
         // 把socket添加到socket集合中,并得到socket的句柄
         let handle: Arc<GlobalSocketHandle> =
-            GlobalSocketHandle::new(SOCKET_SET.lock().add(socket));
+            GlobalSocketHandle::new(SOCKET_SET.lock_irqsave().add(socket));
 
         let metadata = SocketMetadata::new(
             SocketType::TcpSocket,
@@ -833,7 +833,7 @@ impl Socket for TcpSocket {
 
         loop {
             poll_ifaces();
-            let mut socket_set_guard = SOCKET_SET.lock();
+            let mut socket_set_guard = SOCKET_SET.lock_irqsave();
             let socket = socket_set_guard.get_mut::<tcp::Socket>(self.handle.0);
 
             // 如果socket已经关闭,返回错误
@@ -898,7 +898,7 @@ impl Socket for TcpSocket {
         {
             return Err(SystemError::ENOTCONN);
         }
-        let mut socket_set_guard = SOCKET_SET.lock();
+        let mut socket_set_guard = SOCKET_SET.lock_irqsave();
         let socket = socket_set_guard.get_mut::<tcp::Socket>(self.handle.0);
 
         if socket.is_open() {
@@ -923,7 +923,7 @@ impl Socket for TcpSocket {
     }
 
     fn poll(&self) -> EPollEventType {
-        let mut socket_set_guard = SOCKET_SET.lock();
+        let mut socket_set_guard = SOCKET_SET.lock_irqsave();
         let socket = socket_set_guard.get_mut::<tcp::Socket>(self.handle.0);
 
         return SocketPollMethod::tcp_poll(
@@ -937,7 +937,7 @@ impl Socket for TcpSocket {
     }
 
     fn connect(&mut self, endpoint: Endpoint) -> Result<(), SystemError> {
-        let mut sockets = SOCKET_SET.lock();
+        let mut sockets = SOCKET_SET.lock_irqsave();
         let socket = sockets.get_mut::<tcp::Socket>(self.handle.0);
 
         if let Endpoint::Ip(Some(ip)) = endpoint {
@@ -946,7 +946,7 @@ impl Socket for TcpSocket {
             PORT_MANAGER.bind_port(self.metadata.socket_type, temp_port, self.handle.clone())?;
 
             // kdebug!("temp_port: {}", temp_port);
-            let iface: Arc<dyn NetDriver> = NET_DRIVERS.write().get(&0).unwrap().clone();
+            let iface: Arc<dyn NetDriver> = NET_DRIVERS.write_irqsave().get(&0).unwrap().clone();
             let mut inner_iface = iface.inner_iface().lock();
             // kdebug!("to connect: {ip:?}");
 
@@ -958,7 +958,7 @@ impl Socket for TcpSocket {
                     drop(sockets);
                     loop {
                         poll_ifaces();
-                        let mut sockets = SOCKET_SET.lock();
+                        let mut sockets = SOCKET_SET.lock_irqsave();
                         let socket = sockets.get_mut::<tcp::Socket>(self.handle.0);
 
                         match socket.state() {
@@ -1001,7 +1001,7 @@ impl Socket for TcpSocket {
         }
 
         let local_endpoint = self.local_endpoint.ok_or(SystemError::EINVAL)?;
-        let mut sockets = SOCKET_SET.lock();
+        let mut sockets = SOCKET_SET.lock_irqsave();
         let socket = sockets.get_mut::<tcp::Socket>(self.handle.0);
 
         if socket.is_listening() {
@@ -1044,7 +1044,7 @@ impl Socket for TcpSocket {
             // kdebug!("tcp accept: poll_ifaces()");
             poll_ifaces();
 
-            let mut sockets = SOCKET_SET.lock();
+            let mut sockets = SOCKET_SET.lock_irqsave();
 
             let socket = sockets.get_mut::<tcp::Socket>(self.handle.0);
 
@@ -1126,7 +1126,7 @@ impl Socket for TcpSocket {
             self.local_endpoint.clone().map(|x| Endpoint::Ip(Some(x)));
 
         if result.is_none() {
-            let sockets = SOCKET_SET.lock();
+            let sockets = SOCKET_SET.lock_irqsave();
             let socket = sockets.get::<tcp::Socket>(self.handle.0);
             if let Some(ep) = socket.local_endpoint() {
                 result = Some(Endpoint::Ip(Some(ep)));
@@ -1136,7 +1136,7 @@ impl Socket for TcpSocket {
     }
 
     fn peer_endpoint(&self) -> Option<Endpoint> {
-        let sockets = SOCKET_SET.lock();
+        let sockets = SOCKET_SET.lock_irqsave();
         let socket = sockets.get::<tcp::Socket>(self.handle.0);
         return socket.remote_endpoint().map(|x| Endpoint::Ip(Some(x)));
     }

+ 4 - 3
kernel/src/process/exit.rs

@@ -154,8 +154,9 @@ fn do_wait(kwo: &mut KernelWaitOption) -> Result<usize, SystemError> {
             let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
             for pid in rd_childen.iter() {
                 let pcb = ProcessManager::find(*pid).ok_or(SystemError::ECHILD)?;
-                if pcb.sched_info().state().is_exited() {
-                    kwo.ret_status = pcb.sched_info().state().exit_code().unwrap() as i32;
+                let state = pcb.sched_info().inner_lock_read_irqsave().state();
+                if state.is_exited() {
+                    kwo.ret_status = state.exit_code().unwrap() as i32;
                     drop(pcb);
                     unsafe { ProcessManager::release(pid.clone()) };
                     return Ok(pid.clone().into());
@@ -179,7 +180,7 @@ fn do_waitpid(
     child_pcb: Arc<ProcessControlBlock>,
     kwo: &mut KernelWaitOption,
 ) -> Option<Result<usize, SystemError>> {
-    let state = child_pcb.sched_info().state();
+    let state = child_pcb.sched_info().inner_lock_read_irqsave().state();
     // 获取退出码
     match state {
         ProcessState::Runnable => {

+ 8 - 6
kernel/src/process/fork.rs

@@ -159,6 +159,7 @@ impl ProcessManager {
         let new_kstack: KernelStack = KernelStack::new()?;
 
         let name = current_pcb.basic().name().to_string();
+
         let pcb = ProcessControlBlock::new(name, new_kstack);
 
         let mut args = KernelCloneArgs::new();
@@ -166,7 +167,6 @@ impl ProcessManager {
         args.exit_signal = Signal::SIGCHLD;
 
         Self::copy_process(&current_pcb, &pcb, args, current_trapframe)?;
-
         ProcessManager::add_pcb(pcb.clone());
 
         // 向procfs注册进程
@@ -232,7 +232,7 @@ impl ProcessManager {
             unsafe { new_pcb.basic_mut().set_user_vm(Some(old_address_space)) };
             return Ok(());
         }
-        let new_address_space = old_address_space.write().try_clone().unwrap_or_else(|e| {
+        let new_address_space = old_address_space.write_irqsave().try_clone().unwrap_or_else(|e| {
             panic!(
                 "copy_mm: Failed to clone address space of current process, current pid: [{:?}], new pid: [{:?}]. Error: {:?}",
                 current_pcb.pid(), new_pcb.pid(), e
@@ -242,6 +242,7 @@ impl ProcessManager {
         return Ok(());
     }
 
+    #[inline(never)]
     fn copy_files(
         clone_flags: &CloneFlags,
         current_pcb: &Arc<ProcessControlBlock>,
@@ -274,7 +275,8 @@ impl ProcessManager {
         }
 
         if clone_flags.contains(CloneFlags::CLONE_SIGHAND) {
-            (*new_pcb.sig_struct()).handlers = current_pcb.sig_struct().handlers.clone();
+            (*new_pcb.sig_struct_irqsave()).handlers =
+                current_pcb.sig_struct_irqsave().handlers.clone();
         }
         return Ok(());
     }
@@ -288,8 +290,8 @@ impl ProcessManager {
     /// ## 参数
     ///
     /// - clone_flags 标志位
-    /// - des_pcb 目标pcb
-    /// - src_pcb 拷贝源pcb
+    /// - current_pcb 拷贝源pcb
+    /// - pcb 目标pcb
     ///
     /// ## return
     /// - 发生错误时返回Err(SystemError)
@@ -350,7 +352,7 @@ impl ProcessManager {
 
         // 克隆架构相关
         let guard = current_pcb.arch_info_irqsave();
-        pcb.arch_info().clone_from(&guard);
+        unsafe { pcb.arch_info().clone_from(&guard) };
         drop(guard);
 
         // 为内核线程设置WorkerPrivate

+ 1 - 2
kernel/src/process/kthread.rs

@@ -291,7 +291,6 @@ impl KernelThreadMechanism {
                 CloneFlags::CLONE_VM | CloneFlags::CLONE_FS | CloneFlags::CLONE_SIGNAL,
             )
             .expect("Failed to create kthread daemon");
-
             let pcb = ProcessManager::find(kthreadd_pid).unwrap();
             ProcessManager::wakeup(&pcb).expect("Failed to wakeup kthread daemon");
             unsafe {
@@ -379,7 +378,7 @@ impl KernelThreadMechanism {
         // 忙等目标内核线程退出
         // todo: 使用completion机制优化这里
         loop {
-            if let ProcessState::Exited(code) = pcb.sched_info().state() {
+            if let ProcessState::Exited(code) = pcb.sched_info().inner_lock_read_irqsave().state() {
                 return Ok(code);
             }
             spin_loop();

+ 97 - 81
kernel/src/process/mod.rs

@@ -112,7 +112,7 @@ impl ProcessManager {
             compiler_fence(Ordering::SeqCst);
         };
 
-        ALL_PROCESS.lock().replace(HashMap::new());
+        ALL_PROCESS.lock_irqsave().replace(HashMap::new());
         Self::arch_init();
         kdebug!("process arch init done.");
         Self::init_idle();
@@ -164,7 +164,7 @@ impl ProcessManager {
     ///
     /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None
     pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> {
-        return ALL_PROCESS.lock().as_ref()?.get(&pid).cloned();
+        return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned();
     }
 
     /// 向系统中添加一个进程的pcb
@@ -178,7 +178,7 @@ impl ProcessManager {
     /// 无
     pub fn add_pcb(pcb: Arc<ProcessControlBlock>) {
         ALL_PROCESS
-            .lock()
+            .lock_irqsave()
             .as_mut()
             .unwrap()
             .insert(pcb.pid(), pcb.clone());
@@ -187,9 +187,9 @@ impl ProcessManager {
     /// 唤醒一个进程
     pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> {
         let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
-        let state = pcb.sched_info().state();
+        let state = pcb.sched_info().inner_lock_read_irqsave().state();
         if state.is_blocked() {
-            let mut writer: RwLockWriteGuard<'_, ProcessSchedulerInfo> = pcb.sched_info_mut();
+            let mut writer = pcb.sched_info().inner_lock_write_irqsave();
             let state = writer.state();
             if state.is_blocked() {
                 writer.set_state(ProcessState::Runnable);
@@ -213,9 +213,9 @@ impl ProcessManager {
     /// 唤醒暂停的进程
     pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> {
         let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
-        let state = pcb.sched_info().state();
+        let state = pcb.sched_info().inner_lock_read_irqsave().state();
         if let ProcessState::Stopped = state {
-            let mut writer = pcb.sched_info_mut();
+            let mut writer = pcb.sched_info().inner_lock_write_irqsave();
             let state = writer.state();
             if let ProcessState::Stopped = state {
                 writer.set_state(ProcessState::Runnable);
@@ -251,7 +251,7 @@ impl ProcessManager {
         );
 
         let pcb = ProcessManager::current_pcb();
-        let mut writer = pcb.sched_info_mut_irqsave();
+        let mut writer = pcb.sched_info().inner_lock_write_irqsave();
         if !matches!(writer.state(), ProcessState::Exited(_)) {
             writer.set_state(ProcessState::Blocked(interruptable));
             pcb.flags().insert(ProcessFlags::NEED_SCHEDULE);
@@ -276,7 +276,7 @@ impl ProcessManager {
         );
 
         let pcb = ProcessManager::current_pcb();
-        let mut writer = pcb.sched_info_mut_irqsave();
+        let mut writer = pcb.sched_info().inner_lock_write_irqsave();
         if !matches!(writer.state(), ProcessState::Exited(_)) {
             writer.set_state(ProcessState::Stopped);
             pcb.flags().insert(ProcessFlags::NEED_SCHEDULE);
@@ -321,10 +321,10 @@ impl ProcessManager {
     /// - `exit_code` : 进程的退出码
     pub fn exit(exit_code: usize) -> ! {
         // 关中断
-        let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
+        unsafe { CurrentIrqArch::interrupt_disable() };
         let pcb = ProcessManager::current_pcb();
         pcb.sched_info
-            .write()
+            .inner_lock_write_irqsave()
             .set_state(ProcessState::Exited(exit_code));
         pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true)));
 
@@ -350,7 +350,7 @@ impl ProcessManager {
         unsafe { pcb.basic_mut().set_user_vm(None) };
         drop(pcb);
         ProcessManager::exit_notify();
-        drop(irq_guard);
+        unsafe { CurrentIrqArch::interrupt_enable() };
 
         sched();
         loop {}
@@ -373,7 +373,7 @@ impl ProcessManager {
             //     panic!()
             // }
 
-            ALL_PROCESS.lock().as_mut().unwrap().remove(&pid);
+            ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid);
         }
     }
 
@@ -544,7 +544,7 @@ pub struct ProcessControlBlock {
     syscall_stack: RwLock<KernelStack>,
 
     /// 与调度相关的信息
-    sched_info: RwLock<ProcessSchedulerInfo>,
+    sched_info: ProcessSchedulerInfo,
     /// 与处理器架构相关的信息
     arch_info: SpinLock<ArchPCBInfo>,
     /// 与信号处理相关的信息(似乎可以是无锁的)
@@ -592,6 +592,7 @@ impl ProcessControlBlock {
         return Self::do_create_pcb(name, kstack, true);
     }
 
+    #[inline(never)]
     fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> {
         let (pid, ppid, cwd) = if is_idle {
             (Pid(0), Pid(0), "/".to_string())
@@ -624,7 +625,7 @@ impl ProcessControlBlock {
             sched_info,
             arch_info,
             sig_info: RwLock::new(ProcessSignalInfo::default()),
-            sig_struct: SpinLock::new(SignalStruct::default()),
+            sig_struct: SpinLock::new(SignalStruct::new()),
             exit_signal: AtomicSignal::new(Signal::SIGCHLD),
             parent_pcb: RwLock::new(ppcb.clone()),
             real_parent_pcb: RwLock::new(ppcb),
@@ -657,7 +658,7 @@ impl ProcessControlBlock {
         // 将当前pcb加入父进程的子进程哈希表中
         if pcb.pid() > Pid(1) {
             if let Some(ppcb_arc) = pcb.parent_pcb.read().upgrade() {
-                let mut children = ppcb_arc.children.write();
+                let mut children = ppcb_arc.children.write_irqsave();
                 children.push(pcb.pid());
             } else {
                 panic!("parent pcb is None");
@@ -702,6 +703,8 @@ impl ProcessControlBlock {
         return self.flags.get_mut();
     }
 
+    /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改
+    /// 否则会导致死锁
     #[inline(always)]
     pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> {
         return self.basic.read();
@@ -714,17 +717,26 @@ impl ProcessControlBlock {
 
     #[inline(always)]
     pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> {
-        return self.basic.write();
+        return self.basic.write_irqsave();
     }
 
+    /// # 获取arch info的锁,同时关闭中断
     #[inline(always)]
-    pub fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> {
-        return self.arch_info.lock();
+    pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> {
+        return self.arch_info.lock_irqsave();
     }
 
+    /// # 获取arch info的锁,但是不关闭中断
+    ///
+    /// 由于arch info在进程切换的时候会使用到,
+    /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的.
+    ///
+    /// 只能在以下情况下使用这个函数:
+    /// - 在中断上下文中(中断已经禁用),获取arch info的锁。
+    /// - 刚刚创建新的pcb
     #[inline(always)]
-    pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> {
-        return self.arch_info.lock_irqsave();
+    pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> {
+        return self.arch_info.lock();
     }
 
     #[inline(always)]
@@ -739,48 +751,8 @@ impl ProcessControlBlock {
     }
 
     #[inline(always)]
-    pub fn sched_info(&self) -> RwLockReadGuard<ProcessSchedulerInfo> {
-        return self.sched_info.read();
-    }
-
-    #[inline(always)]
-    pub fn try_sched_info(&self, times: u8) -> Option<RwLockReadGuard<ProcessSchedulerInfo>> {
-        for _ in 0..times {
-            if let Some(r) = self.sched_info.try_read() {
-                return Some(r);
-            }
-        }
-
-        return None;
-    }
-
-    #[allow(dead_code)]
-    #[inline(always)]
-    pub fn sched_info_irqsave(&self) -> RwLockReadGuard<ProcessSchedulerInfo> {
-        return self.sched_info.read_irqsave();
-    }
-
-    #[inline(always)]
-    pub fn sched_info_try_upgradeable_irqsave(
-        &self,
-        times: u8,
-    ) -> Option<RwLockUpgradableGuard<ProcessSchedulerInfo>> {
-        for _ in 0..times {
-            if let Some(r) = self.sched_info.try_upgradeable_read_irqsave() {
-                return Some(r);
-            }
-        }
-        return None;
-    }
-
-    #[inline(always)]
-    pub fn sched_info_mut(&self) -> RwLockWriteGuard<ProcessSchedulerInfo> {
-        return self.sched_info.write();
-    }
-
-    #[inline(always)]
-    pub fn sched_info_mut_irqsave(&self) -> RwLockWriteGuard<ProcessSchedulerInfo> {
-        return self.sched_info.write_irqsave();
+    pub fn sched_info(&self) -> &ProcessSchedulerInfo {
+        return &self.sched_info;
     }
 
     #[inline(always)]
@@ -877,7 +849,7 @@ impl ProcessControlBlock {
     }
 
     pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> {
-        self.sig_info.write()
+        self.sig_info.write_irqsave()
     }
 
     pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> {
@@ -904,7 +876,7 @@ impl ProcessControlBlock {
         return None;
     }
 
-    pub fn sig_struct_irq(&self) -> SpinLockGuard<SignalStruct> {
+    pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> {
         self.sig_struct.lock_irqsave()
     }
 }
@@ -971,6 +943,7 @@ pub struct ProcessBasicInfo {
 }
 
 impl ProcessBasicInfo {
+    #[inline(never)]
     pub fn new(
         pgid: Pid,
         ppid: Pid,
@@ -1036,11 +1009,7 @@ pub struct ProcessSchedulerInfo {
     /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位),
     /// 该字段存储要被迁移到的目标处理器核心号
     migrate_to: AtomicI32,
-
-    /// 当前进程的状态
-    state: ProcessState,
-    /// 进程的调度策略
-    sched_policy: SchedPolicy,
+    inner_locked: RwLock<InnerSchedInfo>,
     /// 进程的调度优先级
     priority: SchedPriority,
     /// 当前进程的虚拟运行时间
@@ -1049,21 +1018,46 @@ pub struct ProcessSchedulerInfo {
     rt_time_slice: AtomicIsize,
 }
 
+#[derive(Debug)]
+pub struct InnerSchedInfo {
+    /// 当前进程的状态
+    state: ProcessState,
+    /// 进程的调度策略
+    sched_policy: SchedPolicy,
+}
+
+impl InnerSchedInfo {
+    pub fn state(&self) -> ProcessState {
+        return self.state;
+    }
+
+    pub fn set_state(&mut self, state: ProcessState) {
+        self.state = state;
+    }
+
+    pub fn policy(&self) -> SchedPolicy {
+        return self.sched_policy;
+    }
+}
+
 impl ProcessSchedulerInfo {
-    pub fn new(on_cpu: Option<u32>) -> RwLock<Self> {
+    #[inline(never)]
+    pub fn new(on_cpu: Option<u32>) -> Self {
         let cpu_id = match on_cpu {
             Some(cpu_id) => cpu_id as i32,
             None => -1,
         };
-        return RwLock::new(Self {
+        return Self {
             on_cpu: AtomicI32::new(cpu_id),
             migrate_to: AtomicI32::new(-1),
-            state: ProcessState::Blocked(false),
-            sched_policy: SchedPolicy::CFS,
+            inner_locked: RwLock::new(InnerSchedInfo {
+                state: ProcessState::Blocked(false),
+                sched_policy: SchedPolicy::CFS,
+            }),
             virtual_runtime: AtomicIsize::new(0),
             rt_time_slice: AtomicIsize::new(0),
             priority: SchedPriority::new(100).unwrap(),
-        });
+        };
     }
 
     pub fn on_cpu(&self) -> Option<u32> {
@@ -1100,16 +1094,38 @@ impl ProcessSchedulerInfo {
         }
     }
 
-    pub fn state(&self) -> ProcessState {
-        return self.state;
+    pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> {
+        return self.inner_locked.write_irqsave();
     }
 
-    pub fn set_state(&mut self, state: ProcessState) {
-        self.state = state;
+    pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> {
+        return self.inner_locked.read_irqsave();
     }
 
-    pub fn policy(&self) -> SchedPolicy {
-        return self.sched_policy;
+    pub fn inner_lock_try_read_irqsave(
+        &self,
+        times: u8,
+    ) -> Option<RwLockReadGuard<InnerSchedInfo>> {
+        for _ in 0..times {
+            if let Some(r) = self.inner_locked.try_read_irqsave() {
+                return Some(r);
+            }
+        }
+
+        return None;
+    }
+
+    pub fn inner_lock_try_upgradable_read_irqsave(
+        &self,
+        times: u8,
+    ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> {
+        for _ in 0..times {
+            if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() {
+                return Some(r);
+            }
+        }
+
+        return None;
     }
 
     pub fn virtual_runtime(&self) -> isize {

+ 11 - 11
kernel/src/sched/cfs.rs

@@ -9,7 +9,6 @@ use crate::{
     kBUG,
     libs::{
         rbtree::RBTree,
-        rwlock::RwLockReadGuard,
         spinlock::{SpinLock, SpinLockGuard},
     },
     process::{
@@ -150,16 +149,13 @@ impl SchedulerCFS {
     }
 
     /// @brief 时钟中断到来时,由sched的core模块中的函数,调用本函数,更新CFS进程的可执行时间
-    pub fn timer_update_jiffies(
-        &mut self,
-        sched_info_guard: &RwLockReadGuard<'_, ProcessSchedulerInfo>,
-    ) {
+    pub fn timer_update_jiffies(&mut self, sched_info: &ProcessSchedulerInfo) {
         let current_cpu_queue: &mut CFSQueue = self.cpu_queue[smp_get_processor_id() as usize];
         // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置进程的可执行时间
 
         let mut queue = None;
         for _ in 0..10 {
-            if let Ok(q) = current_cpu_queue.locked_queue.try_lock() {
+            if let Ok(q) = current_cpu_queue.locked_queue.try_lock_irqsave() {
                 queue = Some(q);
                 break;
             }
@@ -179,13 +175,13 @@ impl SchedulerCFS {
         drop(queue);
 
         // 更新当前进程的虚拟运行时间
-        sched_info_guard.increase_virtual_runtime(1);
+        sched_info.increase_virtual_runtime(1);
     }
 
     /// @brief 将进程加入cpu的cfs调度队列,并且重设其虚拟运行时间为当前队列的最小值
     pub fn enqueue_reset_vruntime(&mut self, pcb: Arc<ProcessControlBlock>) {
         let cpu_queue = &mut self.cpu_queue[pcb.sched_info().on_cpu().unwrap() as usize];
-        let queue = cpu_queue.locked_queue.lock();
+        let queue = cpu_queue.locked_queue.lock_irqsave();
         if queue.len() > 0 {
             pcb.sched_info()
                 .set_virtual_runtime(CFSQueue::min_vruntime(&queue).unwrap_or(0) as isize)
@@ -202,7 +198,7 @@ impl SchedulerCFS {
     }
     /// 获取某个cpu的运行队列中的进程数
     pub fn get_cfs_queue_len(&mut self, cpu_id: u32) -> usize {
-        let queue = self.cpu_queue[cpu_id as usize].locked_queue.lock();
+        let queue = self.cpu_queue[cpu_id as usize].locked_queue.lock_irqsave();
         return CFSQueue::get_cfs_queue_size(&queue);
     }
 }
@@ -225,13 +221,17 @@ impl Scheduler for SchedulerCFS {
 
         compiler_fence(core::sync::atomic::Ordering::SeqCst);
         // 如果当前不是running态,或者当前进程的虚拟运行时间大于等于下一个进程的,那就需要切换。
-        if (ProcessManager::current_pcb().sched_info().state() != ProcessState::Runnable)
+        let state = ProcessManager::current_pcb()
+            .sched_info()
+            .inner_lock_read_irqsave()
+            .state();
+        if (state != ProcessState::Runnable)
             || (ProcessManager::current_pcb().sched_info().virtual_runtime()
                 >= proc.sched_info().virtual_runtime())
         {
             compiler_fence(core::sync::atomic::Ordering::SeqCst);
             // 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
-            if ProcessManager::current_pcb().sched_info().state() == ProcessState::Runnable {
+            if state == ProcessState::Runnable {
                 sched_enqueue(ProcessManager::current_pcb(), false);
                 compiler_fence(core::sync::atomic::Ordering::SeqCst);
             }

+ 10 - 6
kernel/src/sched/core.rs

@@ -102,7 +102,9 @@ pub fn do_sched() -> Option<Arc<ProcessControlBlock>> {
     // 当前进程持有锁,不切换,避免死锁
     if ProcessManager::current_pcb().preempt_count() != 0 {
         let binding = ProcessManager::current_pcb();
-        let guard = binding.sched_info_try_upgradeable_irqsave(5);
+        let guard = binding
+            .sched_info()
+            .inner_lock_try_upgradable_read_irqsave(5);
         if unlikely(guard.is_none()) {
             return None;
         }
@@ -154,7 +156,7 @@ pub fn do_sched() -> Option<Arc<ProcessControlBlock>> {
 /// @param reset_time 是否重置虚拟运行时间
 pub fn sched_enqueue(pcb: Arc<ProcessControlBlock>, mut reset_time: bool) {
     compiler_fence(core::sync::atomic::Ordering::SeqCst);
-    if pcb.sched_info().state() != ProcessState::Runnable {
+    if pcb.sched_info().inner_lock_read_irqsave().state() != ProcessState::Runnable {
         return;
     }
     let cfs_scheduler = __get_cfs_scheduler();
@@ -173,7 +175,7 @@ pub fn sched_enqueue(pcb: Arc<ProcessControlBlock>, mut reset_time: bool) {
 
     assert!(pcb.sched_info().on_cpu().is_some());
 
-    match pcb.sched_info().policy() {
+    match pcb.sched_info().inner_lock_read_irqsave().policy() {
         SchedPolicy::CFS => {
             if reset_time {
                 cfs_scheduler.enqueue_reset_vruntime(pcb.clone());
@@ -199,17 +201,19 @@ pub extern "C" fn sched_init() {
 
 /// @brief 当时钟中断到达时,更新时间片
 /// 请注意,该函数只能被时钟中断处理程序调用
-pub extern "C" fn sched_update_jiffies() {
+#[inline(never)]
+pub fn sched_update_jiffies() {
     let binding = ProcessManager::current_pcb();
-    let guard = binding.try_sched_info(10);
+    let guard = binding.sched_info().inner_lock_try_read_irqsave(10);
     if unlikely(guard.is_none()) {
         return;
     }
     let guard = guard.unwrap();
     let policy = guard.policy();
+    drop(guard);
     match policy {
         SchedPolicy::CFS => {
-            __get_cfs_scheduler().timer_update_jiffies(&guard);
+            __get_cfs_scheduler().timer_update_jiffies(binding.sched_info());
         }
         SchedPolicy::FIFO | SchedPolicy::RR => {
             __get_rt_scheduler().timer_update_jiffies();

+ 2 - 2
kernel/src/sched/rt.rs

@@ -81,7 +81,7 @@ impl RTQueue {
         queue.push_front(pcb);
     }
     pub fn get_rt_queue_size(&mut self) -> usize {
-        let queue = self.locked_queue.lock();
+        let queue = self.locked_queue.lock_irqsave();
         return queue.len();
     }
 }
@@ -176,7 +176,7 @@ impl Scheduler for SchedulerRT {
         let proc: Arc<ProcessControlBlock> =
             self.pick_next_task_rt(cpu_id).expect("No RT process found");
         let priority = proc.sched_info().priority();
-        let policy = proc.sched_info().policy();
+        let policy = proc.sched_info().inner_lock_read_irqsave().policy();
         match policy {
             // 如果是fifo策略,则可以一直占有cpu直到有优先级更高的任务就绪(即使优先级相同也不行)或者主动放弃(等待资源)
             SchedPolicy::FIFO => {

+ 7 - 7
kernel/src/time/timekeeping.rs

@@ -100,7 +100,7 @@ impl Timekeeper {
     ///
     /// * 'clock' - 指定的时钟实际类型。初始为ClocksourceJiffies
     pub fn timekeeper_setup_internals(&self, clock: Arc<dyn Clocksource>) {
-        let mut timekeeper = self.0.write();
+        let mut timekeeper = self.0.write_irqsave();
         // 更新clock
         let mut clock_data = clock.clocksource_data();
         clock_data.watchdog_last = clock.read();
@@ -132,8 +132,9 @@ impl Timekeeper {
     /// # 获取当前时钟源距离上次检测走过的纳秒数
     #[allow(dead_code)]
     pub fn tk_get_ns(&self) -> u64 {
-        let timekeeper = self.0.read();
+        let timekeeper = self.0.read_irqsave();
         let clock = timekeeper.clock.clone().unwrap();
+        drop(timekeeper);
         let clock_now = clock.read();
         let clcok_data = clock.clocksource_data();
         let clock_delta = clock_now.div(clcok_data.watchdog_last).data() & clcok_data.mask.bits();
@@ -164,7 +165,7 @@ pub fn getnstimeofday() -> TimeSpec {
         tv_sec: 0,
     };
     loop {
-        match timekeeper().0.try_read() {
+        match timekeeper().0.try_read_irqsave() {
             None => continue,
             Some(tk) => {
                 _xtime = tk.xtime;
@@ -215,7 +216,7 @@ pub fn timekeeping_init() {
         .expect("clocksource_default_clock enable failed");
     timekeeper().timekeeper_setup_internals(clock);
     // 暂时不支持其他架构平台对时间的设置 所以使用x86平台对应值初始化
-    let mut timekeeper = timekeeper().0.write();
+    let mut timekeeper = timekeeper().0.write_irqsave();
     timekeeper.xtime.tv_nsec = ktime_get_real_ns();
 
     // 初始化wall time到monotonic的时间
@@ -236,7 +237,7 @@ pub fn timekeeping_init() {
 }
 
 /// # 使用当前时钟源增加wall time
-pub fn update_wall_time() {
+pub fn update_wall_time(delta_us: i64) {
     // kdebug!("enter update_wall_time, stack_use = {:}",stack_use);
     compiler_fence(Ordering::SeqCst);
     let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
@@ -264,8 +265,7 @@ pub fn update_wall_time() {
     // ================
     compiler_fence(Ordering::SeqCst);
 
-    // !!! todo: 这里是硬编码了HPET的500us中断,需要修改
-    __ADDED_USEC.fetch_add(500, Ordering::SeqCst);
+    __ADDED_USEC.fetch_add(delta_us, Ordering::SeqCst);
     compiler_fence(Ordering::SeqCst);
     let mut retry = 10;
 

+ 34 - 23
kernel/src/time/timer.rs

@@ -18,7 +18,7 @@ use crate::{
         InterruptArch,
     },
     kerror, kinfo,
-    libs::spinlock::SpinLock,
+    libs::spinlock::{SpinLock, SpinLockGuard},
     process::{ProcessControlBlock, ProcessManager},
 };
 
@@ -57,7 +57,9 @@ impl TimerFunction for WakeUpHelper {
 }
 
 #[derive(Debug)]
-pub struct Timer(SpinLock<InnerTimer>);
+pub struct Timer {
+    inner: SpinLock<InnerTimer>,
+}
 
 impl Timer {
     /// @brief 创建一个定时器(单位:ms)
@@ -68,22 +70,28 @@ impl Timer {
     ///
     /// @return 定时器结构体
     pub fn new(timer_func: Box<dyn TimerFunction>, expire_jiffies: u64) -> Arc<Self> {
-        let result: Arc<Timer> = Arc::new(Timer(SpinLock::new(InnerTimer {
-            expire_jiffies,
-            timer_func,
-            self_ref: Weak::default(),
-            triggered: false,
-        })));
+        let result: Arc<Timer> = Arc::new(Timer {
+            inner: SpinLock::new(InnerTimer {
+                expire_jiffies,
+                timer_func: Some(timer_func),
+                self_ref: Weak::default(),
+                triggered: false,
+            }),
+        });
 
-        result.0.lock().self_ref = Arc::downgrade(&result);
+        result.inner.lock().self_ref = Arc::downgrade(&result);
 
         return result;
     }
 
+    pub fn inner(&self) -> SpinLockGuard<InnerTimer> {
+        return self.inner.lock_irqsave();
+    }
+
     /// @brief 将定时器插入到定时器链表中
     pub fn activate(&self) {
-        let inner_guard = self.0.lock();
-        let mut timer_list = TIMER_LIST.lock();
+        let mut timer_list = TIMER_LIST.lock_irqsave();
+        let inner_guard = self.inner();
 
         // 链表为空,则直接插入
         if timer_list.is_empty() {
@@ -99,7 +107,7 @@ impl Timer {
         }
         let mut split_pos: usize = 0;
         for (pos, elt) in timer_list.iter().enumerate() {
-            if elt.0.lock().expire_jiffies > inner_guard.expire_jiffies {
+            if elt.inner().expire_jiffies > inner_guard.expire_jiffies {
                 split_pos = pos;
                 break;
             }
@@ -113,9 +121,11 @@ impl Timer {
 
     #[inline]
     fn run(&self) {
-        let mut timer = self.0.lock();
+        let mut timer = self.inner();
         timer.triggered = true;
-        let r = timer.timer_func.run();
+        let func = timer.timer_func.take();
+        drop(timer);
+        let r = func.map(|mut f| f.run()).unwrap_or(Ok(()));
         if unlikely(r.is_err()) {
             kerror!(
                 "Failed to run timer function: {self:?} {:?}",
@@ -126,14 +136,15 @@ impl Timer {
 
     /// ## 判断定时器是否已经触发
     pub fn timeout(&self) -> bool {
-        self.0.lock().triggered
+        self.inner().triggered
     }
 
     /// ## 取消定时器任务
     pub fn cancel(&self) -> bool {
+        let this_arc = self.inner().self_ref.upgrade().unwrap();
         TIMER_LIST
-            .lock()
-            .extract_if(|x| Arc::<Timer>::as_ptr(&x) == self as *const Timer)
+            .lock_irqsave()
+            .extract_if(|x| Arc::ptr_eq(&this_arc, x))
             .for_each(|p| drop(p));
         true
     }
@@ -145,7 +156,7 @@ pub struct InnerTimer {
     /// 定时器结束时刻
     pub expire_jiffies: u64,
     /// 定时器需要执行的函数结构体
-    pub timer_func: Box<dyn TimerFunction>,
+    pub timer_func: Option<Box<dyn TimerFunction>>,
     /// self_ref
     self_ref: Weak<Timer>,
     /// 判断该计时器是否触发
@@ -187,7 +198,7 @@ impl SoftirqVec for DoTimerSoftirq {
         // 最多只处理TIMER_RUN_CYCLE_THRESHOLD个计时器
         for _ in 0..TIMER_RUN_CYCLE_THRESHOLD {
             // kdebug!("DoTimerSoftirq run");
-            let timer_list = TIMER_LIST.try_lock();
+            let timer_list = TIMER_LIST.try_lock_irqsave();
             if timer_list.is_err() {
                 continue;
             }
@@ -201,7 +212,7 @@ impl SoftirqVec for DoTimerSoftirq {
             // kdebug!("to lock timer_list_front");
             let mut timer_list_front_guard = None;
             for _ in 0..10 {
-                let x = timer_list_front.0.try_lock();
+                let x = timer_list_front.inner.try_lock_irqsave();
                 if x.is_err() {
                     continue;
                 }
@@ -297,7 +308,7 @@ pub fn timer_get_first_expire() -> Result<u64, SystemError> {
                     return Ok(0);
                 } else {
                     // kdebug!("timer_list not empty");
-                    return Ok(timer_list.front().unwrap().0.lock().expire_jiffies);
+                    return Ok(timer_list.front().unwrap().inner().expire_jiffies);
                 }
             }
             // 加锁失败返回啥??
@@ -310,10 +321,10 @@ pub fn timer_get_first_expire() -> Result<u64, SystemError> {
 /// 更新系统时间片
 ///
 /// todo: 这里的实现有问题,貌似把HPET的500us当成了500个jiffies,然后update_wall_time()里面也硬编码了这个500us
-pub fn update_timer_jiffies(add_jiffies: u64) -> u64 {
+pub fn update_timer_jiffies(add_jiffies: u64, time_us: i64) -> u64 {
     let prev = TIMER_JIFFIES.fetch_add(add_jiffies, Ordering::SeqCst);
     compiler_fence(Ordering::SeqCst);
-    update_wall_time();
+    update_wall_time(time_us);
 
     compiler_fence(Ordering::SeqCst);
     return prev + add_jiffies;