Browse Source

`riscv`: 初始化irq (#560)

完成riscv的irqchip初始化的代码。

这是该功能的第一个PR。由于还需要实现timer驱动才能测试,因此该功能将会通过2~3个PR来完成。
LoGin 1 year ago
parent
commit
338f690326

+ 20 - 20
kernel/Cargo.toml

@@ -26,44 +26,44 @@ kvm = []
 # 运行时依赖项
 [dependencies]
 acpi = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/acpi-rs.git", rev = "fb69243dcf" }
-atomic_enum = "0.2.0"
-bit_field = "0.10"
-bitfield-struct = "0.5.3"
-bitflags = "1.3.2"
+atomic_enum = "=0.2.0"
+bit_field = "=0.10"
+bitfield-struct = "=0.5.3"
+bitflags = "=1.3.2"
 bitmap = { path = "crates/bitmap" }
 # 一个no_std的hashmap、hashset
-elf = { version = "0.7.2", default-features = false }
-hashbrown = "0.13.2"
+elf = { version = "=0.7.2", default-features = false }
+hashbrown = "=0.13.2"
 ida = { path = "src/libs/ida" }
 intertrait = { path = "src/libs/intertrait" }
 kdepends = { path = "crates/kdepends" }
 klog_types = { path = "crates/klog_types" }
-linkme = "0.2"
-num = { version = "0.4.0", default-features = false }
-num-derive = "0.3"
+linkme = "=0.2"
+num = { version = "=0.4.0", default-features = false }
+num-derive = "=0.3"
 num-traits = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/num-traits.git", rev="1597c1c", default-features = false }
-raw-cpuid = "11.0.1"
+
 smoltcp = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/smoltcp.git", rev = "9027825", default-features = false, features = ["log", "alloc",  "socket-raw", "socket-udp", "socket-tcp", "socket-icmp", "socket-dhcpv4", "socket-dns", "proto-ipv4", "proto-ipv6"]}
 system_error = { path = "crates/system_error" }
 unified-init = { path = "crates/unified-init" }
 virtio-drivers = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/virtio-drivers.git", rev = "f1d1cbb" }
-fdt = "0.1.5"
-uefi = { version = "0.26.0", features = ["alloc"] }
-uefi-raw = "0.5.0"
-paste = "1.0.14"
+fdt = "=0.1.5"
+uefi = { version = "=0.26.0", features = ["alloc"] }
+uefi-raw = "=0.5.0"
+paste = "=1.0.14"
 
 
 # target为x86_64时,使用下面的依赖
 [target.'cfg(target_arch = "x86_64")'.dependencies]
 mini-backtrace = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/mini-backtrace.git", rev = "e0b1d90940" }
-x86 = "0.52.0"
-x86_64 = "0.14.10"
-
+raw-cpuid = "11.0.1"
+x86 = "=0.52.0"
+x86_64 = "=0.14.10"
 
 # target为riscv64时,使用下面的依赖
 [target.'cfg(target_arch = "riscv64")'.dependencies]
-riscv = { version = "0.11.0", features = [ "s-mode" ] }
-sbi-rt = { version = "0.0.3", features = ["legacy"] }
+riscv = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/riscv.git", revision = "5c01a8320e", features = [ "s-mode" ] }
+sbi-rt = { version = "=0.0.3", features = ["legacy"] }
 
 
 # 构建时依赖项
@@ -71,7 +71,7 @@ sbi-rt = { version = "0.0.3", features = ["legacy"] }
 kernel_build = { path = "../build-scripts/kernel_build" }
 
 [dependencies.lazy_static]
-version = "1.4.0"
+version = "=1.4.0"
 # 由于在no_std环境,而lazy_static依赖了spin库,因此需要指定其使用no_std
 features = ["spin_no_std"]
 

+ 76 - 2
kernel/src/arch/riscv64/cpu.rs

@@ -1,9 +1,22 @@
-use crate::smp::cpu::ProcessorId;
+use alloc::vec::Vec;
+
+use crate::{
+    init::boot_params,
+    kdebug,
+    mm::percpu::{PerCpu, PerCpuVar},
+    smp::cpu::{ProcessorId, SmpCpuManager},
+};
 
 /// 获取当前cpu的id
 #[inline]
 pub fn current_cpu_id() -> ProcessorId {
-    unimplemented!("RiscV64 current_cpu_id")
+    let ptr: *const LocalContext = riscv::register::sscratch::read() as *const LocalContext;
+
+    if core::intrinsics::unlikely(ptr.is_null()) {
+        return boot_params().read_irqsave().arch.boot_hartid;
+    }
+
+    unsafe { (*ptr).current_cpu() }
 }
 
 /// 重置cpu
@@ -11,3 +24,64 @@ pub unsafe fn cpu_reset() -> ! {
     sbi_rt::system_reset(sbi_rt::WarmReboot, sbi_rt::NoReason);
     unimplemented!("RiscV64 reset failed, manual override expected ...")
 }
+
+static mut LOCAL_CONTEXT: Option<PerCpuVar<LocalContext>> = None;
+
+#[inline(always)]
+pub(super) fn local_context() -> &'static PerCpuVar<LocalContext> {
+    unsafe { LOCAL_CONTEXT.as_ref().unwrap() }
+}
+
+/// Per cpu的上下文数据
+///
+/// 每个CPU的sscratch寄存器指向这个结构体
+#[derive(Debug)]
+pub(super) struct LocalContext {
+    /// 当前cpu的id
+    current_cpu: ProcessorId,
+}
+
+impl LocalContext {
+    fn new(cpu: ProcessorId) -> Self {
+        Self { current_cpu: cpu }
+    }
+    pub fn current_cpu(&self) -> ProcessorId {
+        self.current_cpu
+    }
+
+    pub fn set_current_cpu(&mut self, cpu: ProcessorId) {
+        self.current_cpu = cpu;
+    }
+
+    fn sync_to_cpu(&self) {
+        let ptr = self as *const Self as usize;
+        riscv::register::sscratch::write(ptr);
+    }
+}
+
+/// 初始化本地上下文
+#[inline(never)]
+pub(super) fn init_local_context() {
+    kdebug!("init_local_context");
+    let mut data = Vec::new();
+
+    for i in 0..PerCpu::MAX_CPU_NUM {
+        data.push(LocalContext::new(ProcessorId::new(i)));
+    }
+    let ctx = PerCpuVar::new(data).unwrap();
+
+    unsafe {
+        LOCAL_CONTEXT = Some(ctx);
+    }
+
+    let hartid = boot_params().read().arch.boot_hartid;
+
+    let ctx = unsafe { local_context().force_get(hartid) };
+    ctx.sync_to_cpu();
+}
+
+impl SmpCpuManager {
+    pub fn arch_init(boot_cpu: ProcessorId) {
+        // todo: 读取所有可用的CPU
+    }
+}

+ 19 - 7
kernel/src/arch/riscv64/init/mod.rs

@@ -3,26 +3,33 @@ use system_error::SystemError;
 
 use crate::{
     arch::{driver::sbi::SbiDriver, mm::init::mm_early_init},
-    driver::{firmware::efi::init::efi_init, open_firmware::fdt::open_firmware_fdt_driver},
+    driver::{
+        firmware::efi::init::efi_init, irqchip::riscv_intc::riscv_intc_init,
+        open_firmware::fdt::open_firmware_fdt_driver,
+    },
     init::{boot_params, init::start_kernel},
     kdebug, kinfo,
     mm::{memblock::mem_block_manager, PhysAddr, VirtAddr},
     print, println,
+    smp::cpu::ProcessorId,
 };
 
-use super::driver::sbi::console_putstr;
+use super::{cpu::init_local_context, driver::sbi::console_putstr};
 
 #[derive(Debug)]
 pub struct ArchBootParams {
     /// 启动时的fdt物理地址
     pub fdt_paddr: PhysAddr,
     pub fdt_vaddr: Option<VirtAddr>,
+
+    pub boot_hartid: ProcessorId,
 }
 
 impl ArchBootParams {
     pub const DEFAULT: Self = ArchBootParams {
         fdt_paddr: PhysAddr::new(0),
         fdt_vaddr: None,
+        boot_hartid: ProcessorId::new(0),
     };
 
     pub fn arch_fdt(&self) -> VirtAddr {
@@ -34,7 +41,7 @@ impl ArchBootParams {
     }
 }
 
-static mut BOOT_HARTID: usize = 0;
+static mut BOOT_HARTID: u32 = 0;
 static mut BOOT_FDT_PADDR: PhysAddr = PhysAddr::new(0);
 
 #[no_mangle]
@@ -42,7 +49,7 @@ unsafe extern "C" fn kernel_main(hartid: usize, fdt_paddr: usize) -> ! {
     let fdt_paddr = PhysAddr::new(fdt_paddr);
 
     unsafe {
-        BOOT_HARTID = hartid;
+        BOOT_HARTID = hartid as u32;
         BOOT_FDT_PADDR = fdt_paddr;
     }
 
@@ -79,9 +86,14 @@ unsafe fn parse_dtb() {
 #[inline(never)]
 pub fn early_setup_arch() -> Result<(), SystemError> {
     SbiDriver::early_init();
-    let hartid: usize = unsafe { BOOT_HARTID };
+    let hartid = unsafe { BOOT_HARTID };
     let fdt_paddr = unsafe { BOOT_FDT_PADDR };
-    boot_params().write().arch.fdt_paddr = fdt_paddr;
+
+    let mut arch_boot_params_guard = boot_params().write();
+    arch_boot_params_guard.arch.fdt_paddr = fdt_paddr;
+    arch_boot_params_guard.arch.boot_hartid = ProcessorId::new(hartid);
+
+    drop(arch_boot_params_guard);
 
     kinfo!(
         "DragonOS kernel is running on hart {}, fdt address:{:?}",
@@ -109,7 +121,7 @@ pub fn early_setup_arch() -> Result<(), SystemError> {
 
 #[inline(never)]
 pub fn setup_arch() -> Result<(), SystemError> {
-    // todo
+    init_local_context();
     return Ok(());
 }
 

+ 7 - 2
kernel/src/arch/riscv64/interrupt/mod.rs

@@ -1,6 +1,9 @@
 use system_error::SystemError;
 
-use crate::exception::{InterruptArch, IrqFlags, IrqFlagsGuard, IrqNumber};
+use crate::{
+    driver::irqchip::riscv_intc::riscv_intc_init,
+    exception::{InterruptArch, IrqFlags, IrqFlagsGuard, IrqNumber},
+};
 
 pub mod ipi;
 
@@ -8,7 +11,9 @@ pub struct RiscV64InterruptArch;
 
 impl InterruptArch for RiscV64InterruptArch {
     unsafe fn arch_irq_init() -> Result<(), SystemError> {
-        todo!("RiscV64InterruptArch::arch_irq_init")
+        riscv_intc_init()?;
+
+        Ok(())
     }
     unsafe fn interrupt_enable() {
         riscv::interrupt::enable();

+ 5 - 1
kernel/src/arch/x86_64/cpu.rs

@@ -1,6 +1,6 @@
 use x86::cpuid::{cpuid, CpuIdResult};
 
-use crate::smp::cpu::ProcessorId;
+use crate::smp::cpu::{ProcessorId, SmpCpuManager};
 
 /// 获取当前cpu的apic id
 #[inline]
@@ -16,3 +16,7 @@ pub unsafe fn cpu_reset() -> ! {
     unsafe { x86::io::outb(0x64, 0xfe) };
     loop {}
 }
+
+impl SmpCpuManager {
+    pub fn arch_init(_boot_cpu: ProcessorId) {}
+}

+ 0 - 7
kernel/src/driver/base/init.rs

@@ -23,12 +23,5 @@ pub fn driver_init() -> Result<(), SystemError> {
     cpu_device_manager().init()?;
 
     // 至此,已完成设备驱动模型的初始化
-    // 接下来,初始化设备
-    actual_device_init()?;
-    return Ok(());
-}
-
-fn actual_device_init() -> Result<(), SystemError> {
-    // 应当使用unified_init来初始化
     return Ok(());
 }

+ 2 - 0
kernel/src/driver/irqchip/mod.rs

@@ -0,0 +1,2 @@
+#[cfg(target_arch = "riscv64")]
+pub mod riscv_intc;

+ 134 - 0
kernel/src/driver/irqchip/riscv_intc.rs

@@ -0,0 +1,134 @@
+use alloc::{string::ToString, sync::Arc};
+use system_error::SystemError;
+
+use crate::exception::{
+    handle::PerCpuDevIdIrqHandler,
+    irqchip::{IrqChip, IrqChipFlags},
+    irqdata::IrqData,
+    irqdesc::irq_desc_manager,
+    irqdomain::{irq_domain_manager, IrqDomain, IrqDomainOps},
+    HardwareIrqNumber, IrqNumber,
+};
+
+static mut RISCV_INTC_DOMAIN: Option<Arc<IrqDomain>> = None;
+static mut RISCV_INTC_CHIP: Option<Arc<RiscvIntcChip>> = None;
+
+#[inline(always)]
+pub fn riscv_intc_domain() -> &'static Option<Arc<IrqDomain>> {
+    unsafe { &RISCV_INTC_DOMAIN }
+}
+
+#[inline(always)]
+fn riscv_intc_chip() -> Option<&'static Arc<RiscvIntcChip>> {
+    unsafe { RISCV_INTC_CHIP.as_ref() }
+}
+
+#[derive(Debug)]
+struct RiscvIntcChip;
+
+impl IrqChip for RiscvIntcChip {
+    fn name(&self) -> &'static str {
+        "RISC-V INTC"
+    }
+
+    fn irq_disable(&self, _irq: &Arc<IrqData>) {}
+
+    fn irq_mask(&self, irq: &Arc<IrqData>) -> Result<(), SystemError> {
+        unsafe { riscv::register::sie::clear_bits(1 << irq.hardware_irq().data()) };
+        Ok(())
+    }
+
+    fn irq_unmask(&self, irq: &Arc<IrqData>) -> Result<(), SystemError> {
+        unsafe { riscv::register::sie::set_bits(1 << irq.hardware_irq().data()) };
+        Ok(())
+    }
+
+    fn irq_ack(&self, irq: &Arc<IrqData>) {
+        todo!()
+    }
+
+    fn can_mask_ack(&self) -> bool {
+        false
+    }
+
+    fn irq_eoi(&self, _irq: &Arc<IrqData>) {
+        /*
+         * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
+         * for the per-HART local interrupts and child irqchip drivers
+         * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
+         * chained handlers for the per-HART local interrupts.
+         *
+         * In the absence of irq_eoi(), the chained_irq_enter() and
+         * chained_irq_exit() functions (used by child irqchip drivers)
+         * will do unnecessary mask/unmask of per-HART local interrupts
+         * at the time of handling interrupts. To avoid this, we provide
+         * an empty irq_eoi() callback for RISC-V INTC irqchip.
+         */
+    }
+
+    fn can_set_affinity(&self) -> bool {
+        false
+    }
+
+    fn can_set_flow_type(&self) -> bool {
+        false
+    }
+
+    fn flags(&self) -> IrqChipFlags {
+        todo!()
+    }
+}
+
+#[derive(Debug)]
+struct RiscvIntcDomainOps;
+
+impl IrqDomainOps for RiscvIntcDomainOps {
+    fn map(
+        &self,
+        irq_domain: &Arc<IrqDomain>,
+        hwirq: HardwareIrqNumber,
+        virq: IrqNumber,
+    ) -> Result<(), SystemError> {
+        irq_desc_manager().set_percpu_devid_all(virq)?;
+        irq_domain_manager().domain_set_info(
+            irq_domain,
+            virq,
+            hwirq,
+            riscv_intc_chip().unwrap().clone() as Arc<dyn IrqChip>,
+            irq_domain.host_data(),
+            &PerCpuDevIdIrqHandler,
+            None,
+            None,
+        );
+
+        return Ok(());
+    }
+
+    fn unmap(&self, irq_domain: &Arc<IrqDomain>, virq: IrqNumber) {
+        todo!("riscv_intc_domain_ops::unmap");
+    }
+}
+
+#[inline(never)]
+pub unsafe fn riscv_intc_init() -> Result<(), SystemError> {
+    let intc_chip = Arc::new(RiscvIntcChip);
+
+    unsafe {
+        RISCV_INTC_CHIP = Some(intc_chip);
+    }
+
+    let intc_domain = irq_domain_manager()
+        .create_and_add_linear("riscv-intc".to_string(), &RiscvIntcDomainOps, 64)
+        .ok_or_else(|| {
+            kerror!("Failed to create riscv-intc domain");
+            SystemError::ENXIO
+        })?;
+
+    irq_domain_manager().set_default_domain(intc_domain.clone());
+
+    unsafe {
+        RISCV_INTC_DOMAIN = Some(intc_domain);
+    }
+
+    return Ok(());
+}

+ 1 - 0
kernel/src/driver/mod.rs

@@ -3,6 +3,7 @@ pub mod base;
 pub mod disk;
 pub mod firmware;
 pub mod input;
+pub mod irqchip;
 pub mod keyboard;
 pub mod net;
 pub mod open_firmware;

+ 71 - 2
kernel/src/exception/handle.rs

@@ -6,8 +6,9 @@ use system_error::SystemError;
 use crate::{
     arch::{interrupt::TrapFrame, CurrentIrqArch},
     exception::irqdesc::InnerIrqDesc,
-    libs::spinlock::SpinLockGuard,
+    libs::{once::Once, spinlock::SpinLockGuard},
     process::{ProcessFlags, ProcessManager},
+    smp::core::smp_get_processor_id,
 };
 
 use super::{
@@ -148,7 +149,7 @@ fn irq_may_run(desc_inner_guard: &SpinLockGuard<'_, InnerIrqDesc>) -> bool {
     return false;
 }
 
-fn mask_ack_irq(irq_data: &Arc<IrqData>) {
+pub(super) fn mask_ack_irq(irq_data: &Arc<IrqData>) {
     let chip = irq_data.chip_info_read_irqsave().chip();
     if chip.can_mask_ack() {
         chip.irq_mask_ack(&irq_data);
@@ -242,3 +243,71 @@ fn warn_no_thread(irq: IrqNumber, action_inner: &mut SpinLockGuard<'_, InnerIrqA
         action_inner.name()
     );
 }
+
+/// `handle_percpu_devid_irq` - 带有per-CPU设备id的perCPU本地中断处理程序
+///
+///
+/// * `desc`: 此中断的中断描述结构
+///
+/// 在没有锁定要求的SMP机器上的每个CPU中断。与linux的`handle_percpu_irq()`相同,但有以下额外内容:
+///
+/// `action->percpu_dev_id`是一个指向per-cpu变量的指针,这些变量
+/// 包含调用此处理程序的cpu的真实设备id
+#[derive(Debug)]
+pub struct PerCpuDevIdIrqHandler;
+
+impl IrqFlowHandler for PerCpuDevIdIrqHandler {
+    fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) {
+        let desc_inner_guard = irq_desc.inner();
+        let irq_data = desc_inner_guard.irq_data().clone();
+        let chip = irq_data.chip_info_read().chip();
+
+        chip.irq_ack(&irq_data);
+
+        let irq = irq_data.irq();
+
+        let action = desc_inner_guard.actions().first().cloned();
+
+        drop(desc_inner_guard);
+
+        if let Some(action) = action {
+            let action_inner = action.inner();
+            let per_cpu_devid = action_inner.per_cpu_dev_id().cloned();
+
+            let handler = action_inner.handler().unwrap();
+            drop(action_inner);
+
+            let _r = handler.handle(
+                irq,
+                None,
+                per_cpu_devid.map(|d| d as Arc<dyn IrqHandlerData>),
+            );
+        } else {
+            let cpu = smp_get_processor_id();
+
+            let enabled = irq_desc
+                .inner()
+                .percpu_enabled()
+                .as_ref()
+                .unwrap()
+                .get(cpu)
+                .unwrap_or(false);
+
+            if enabled {
+                irq_manager().irq_percpu_disable(irq_desc, &irq_data, &chip, cpu);
+            }
+            static ONCE: Once = Once::new();
+
+            ONCE.call_once(|| {
+                kerror!(
+                    "Spurious percpu irq {} on cpu {:?}, enabled: {}",
+                    irq.data(),
+                    cpu,
+                    enabled
+                );
+            });
+        }
+
+        chip.irq_eoi(&irq_data);
+    }
+}

+ 218 - 4
kernel/src/exception/irqchip.rs

@@ -1,21 +1,36 @@
-use core::{any::Any, fmt::Debug};
+use core::{any::Any, fmt::Debug, intrinsics::unlikely};
 
 use alloc::{
+    string::{String, ToString},
     sync::{Arc, Weak},
     vec::Vec,
 };
 use system_error::SystemError;
 
 use crate::{
-    libs::{cpumask::CpuMask, spinlock::SpinLock},
+    exception::{
+        dummychip::no_irq_chip,
+        handle::{bad_irq_handler, mask_ack_irq},
+        irqdata::IrqStatus,
+        irqdesc::irq_desc_manager,
+        manage::irq_manager,
+    },
+    libs::{
+        cpumask::CpuMask,
+        once::Once,
+        spinlock::{SpinLock, SpinLockGuard},
+    },
     mm::VirtAddr,
+    smp::cpu::ProcessorId,
 };
 
 use super::{
-    irqdata::{IrqData, IrqLineStatus},
+    irqdata::{IrqData, IrqHandlerData, IrqLineStatus},
+    irqdesc::{InnerIrqDesc, IrqAction, IrqDesc, IrqFlowHandler, IrqHandler, IrqReturn},
     irqdomain::IrqDomain,
     manage::IrqManager,
     msi::MsiMsg,
+    IrqNumber,
 };
 
 /// 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/include/linux/irq.h#506
@@ -48,7 +63,7 @@ pub trait IrqChip: Sync + Send + Any + Debug {
     ///
     /// 用于屏蔽中断
     ///
-    /// 如果返回ENOSYS,则表明irq_mask()不支持.
+    /// 如果返回ENOSYS,则表明irq_mask()不支持. 那么中断机制代码将调用irq_disable()。
     ///
     /// 如果返回错误,那么中断的屏蔽状态将不会改变。
     fn irq_mask(&self, _irq: &Arc<IrqData>) -> Result<(), SystemError> {
@@ -367,4 +382,203 @@ impl IrqManager {
 
         return Ok(());
     }
+
+    pub(super) fn __irq_set_handler(
+        &self,
+        irq: IrqNumber,
+        handler: &'static dyn IrqFlowHandler,
+        is_chained: bool,
+        name: Option<String>,
+    ) {
+        let r = irq_desc_manager().lookup_and_lock_bus(irq, false, false);
+        if r.is_none() {
+            return;
+        }
+
+        let irq_desc = r.unwrap();
+
+        let mut desc_inner = irq_desc.inner();
+        self.__irq_do_set_handler(&irq_desc, &mut desc_inner, Some(handler), is_chained, name);
+
+        drop(desc_inner);
+        irq_desc.chip_bus_sync_unlock();
+    }
+
+    fn __irq_do_set_handler(
+        &self,
+        desc: &Arc<IrqDesc>,
+        desc_inner: &mut SpinLockGuard<'_, InnerIrqDesc>,
+        mut handler: Option<&'static dyn IrqFlowHandler>,
+        is_chained: bool,
+        name: Option<String>,
+    ) {
+        if handler.is_none() {
+            handler = Some(bad_irq_handler());
+        } else {
+            let mut irq_data = Some(desc_inner.irq_data().clone());
+
+            /*
+             * 在具有中断域继承的domain中,我们可能会遇到这样的情况,
+             * 最外层的芯片还没有设置好,但是内部的芯片已经存在了。
+             * 我们选择安装处理程序,而不是退出,
+             * 但显然我们此时无法启用/启动中断。
+             */
+            while irq_data.is_some() {
+                let dt = irq_data.as_ref().unwrap().clone();
+
+                let chip_info = dt.chip_info_read_irqsave();
+
+                if !Arc::ptr_eq(&chip_info.chip(), &no_irq_chip()) {
+                    break;
+                }
+
+                /*
+                 * 如果最外层的芯片没有设置好,并且预期立即开始中断,
+                 * 则放弃。
+                 */
+                if unlikely(is_chained) {
+                    kwarn!(
+                        "Chained handler for irq {} is not supported",
+                        dt.irq().data()
+                    );
+                    return;
+                }
+
+                //  try the parent
+                let parent_data = dt.parent_data().map(|p| p.upgrade()).flatten();
+
+                irq_data = parent_data;
+            }
+
+            if unlikely(
+                irq_data.is_none()
+                    || Arc::ptr_eq(
+                        &irq_data.as_ref().unwrap().chip_info_read_irqsave().chip(),
+                        &no_irq_chip(),
+                    ),
+            ) {
+                kwarn!("No irq chip for irq {}", desc_inner.irq_data().irq().data());
+                return;
+            }
+        }
+        let handler = handler.unwrap();
+        if core::ptr::eq(handler, bad_irq_handler()) {
+            if Arc::ptr_eq(
+                &desc_inner.irq_data().chip_info_read_irqsave().chip(),
+                &no_irq_chip(),
+            ) {
+                let irq_data = desc_inner.irq_data();
+                mask_ack_irq(irq_data);
+
+                irq_data.irqd_set(IrqStatus::IRQD_IRQ_DISABLED);
+
+                if is_chained {
+                    desc_inner.clear_actions();
+                }
+                desc_inner.set_depth(1);
+            }
+        }
+        let chip = desc_inner.irq_data().chip_info_read_irqsave().chip();
+        desc.set_handler_no_lock_inner(handler, desc_inner.irq_data(), &chip);
+        desc_inner.set_name(name);
+
+        if !core::ptr::eq(handler, bad_irq_handler()) && is_chained {
+            let trigger_type = desc_inner.common_data().trigger_type();
+
+            /*
+             * 我们即将立即启动这个中断,
+             * 因此需要设置触发配置。
+             * 但是 .irq_set_type 回调可能已经覆盖了
+             * irqflowhandler,忽略了我们正在处理的
+             * 是一个链式中断。立即重置它,因为我们
+             * 确实知道更好的处理方式。
+             */
+
+            if trigger_type != IrqLineStatus::IRQ_TYPE_NONE {
+                irq_manager()
+                    .do_set_irq_trigger(desc.clone(), desc_inner, trigger_type)
+                    .ok();
+                desc.set_handler(handler);
+            }
+
+            desc_inner.set_noprobe();
+            desc_inner.set_norequest();
+            desc_inner.set_nothread();
+
+            desc_inner.clear_actions();
+            desc_inner.add_action(chained_action());
+
+            irq_manager()
+                .irq_activate_and_startup(desc, desc_inner, IrqManager::IRQ_RESEND)
+                .ok();
+        }
+
+        return;
+    }
+
+    pub fn irq_set_handler_data(
+        &self,
+        irq: IrqNumber,
+        data: Option<Arc<dyn IrqHandlerData>>,
+    ) -> Result<(), SystemError> {
+        let desc = irq_desc_manager().lookup(irq).ok_or(SystemError::EINVAL)?;
+        desc.inner().common_data().inner().set_handler_data(data);
+
+        return Ok(());
+    }
+
+    pub fn irq_percpu_disable(
+        &self,
+        desc: &Arc<IrqDesc>,
+        irq_data: &Arc<IrqData>,
+        irq_chip: &Arc<dyn IrqChip>,
+        cpu: ProcessorId,
+    ) {
+        if let Err(e) = irq_chip.irq_mask(irq_data) {
+            if e == SystemError::ENOSYS {
+                irq_chip.irq_disable(irq_data);
+            }
+        }
+
+        desc.inner()
+            .percpu_enabled_mut()
+            .as_mut()
+            .unwrap()
+            .set(cpu, false);
+    }
+}
+
+lazy_static! {
+    pub(super) static ref CHAINED_ACTION: Arc<IrqAction> = IrqAction::new(
+        IrqNumber::new(0),
+        "".to_string(),
+        Some(&ChainedActionHandler),
+        None,
+    );
+}
+
+#[allow(dead_code)]
+pub(super) fn chained_action() -> Arc<IrqAction> {
+    CHAINED_ACTION.clone()
+}
+
+/// Chained handlers 永远不应该在它们的IRQ上调用irqaction。如果发生这种情况,
+/// 这个默认irqaction将发出警告。
+#[derive(Debug)]
+struct ChainedActionHandler;
+
+impl IrqHandler for ChainedActionHandler {
+    fn handle(
+        &self,
+        irq: IrqNumber,
+        _static_data: Option<&dyn IrqHandlerData>,
+        _dynamic_data: Option<Arc<dyn IrqHandlerData>>,
+    ) -> Result<IrqReturn, SystemError> {
+        static ONCE: Once = Once::new();
+        ONCE.call_once(|| {
+            kwarn!("Chained irq {} should not call an action.", irq.data());
+        });
+
+        Ok(IrqReturn::NotHandled)
+    }
 }

+ 20 - 1
kernel/src/exception/irqdata.rs

@@ -288,11 +288,15 @@ impl IrqCommonData {
     pub fn set_affinity(&self, affinity: CpuMask) {
         self.inner.lock_irqsave().affinity = affinity;
     }
+
+    pub fn inner(&self) -> SpinLockGuard<InnerIrqCommonData> {
+        self.inner.lock_irqsave()
+    }
 }
 
 #[allow(dead_code)]
 #[derive(Debug)]
-struct InnerIrqCommonData {
+pub struct InnerIrqCommonData {
     /// status information for irq chip functions.
     state: IrqStatus,
     /// per-IRQ data for the irq_chip methods
@@ -309,6 +313,16 @@ impl InnerIrqCommonData {
     pub fn irqd_clear(&mut self, status: IrqStatus) {
         self.state.remove(status);
     }
+
+    #[allow(dead_code)]
+    pub fn set_handler_data(&mut self, handler_data: Option<Arc<dyn IrqHandlerData>>) {
+        self.handler_data = handler_data;
+    }
+
+    #[allow(dead_code)]
+    pub fn handler_data(&self) -> Option<Arc<dyn IrqHandlerData>> {
+        self.handler_data.clone()
+    }
 }
 
 /// 中断处理函数传入的数据
@@ -400,6 +414,11 @@ impl IrqLineStatus {
         }
         return Some(self.contains(Self::IRQ_TYPE_LEVEL_HIGH));
     }
+
+    #[allow(dead_code)]
+    pub fn is_per_cpu_devid(&self) -> bool {
+        self.contains(Self::IRQ_PER_CPU_DEVID)
+    }
 }
 bitflags! {
     /// 中断状态(存储在IrqCommonData)

+ 171 - 0
kernel/src/exception/irqdesc.rs

@@ -21,17 +21,21 @@ use crate::{
     },
     filesystem::kernfs::KernFSInode,
     libs::{
+        cpumask::CpuMask,
         mutex::{Mutex, MutexGuard},
         rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
         spinlock::{SpinLock, SpinLockGuard},
     },
+    mm::percpu::PerCpuVar,
     process::ProcessControlBlock,
     sched::completion::Completion,
+    smp::cpu::smp_cpu_manager,
 };
 
 use super::{
     dummychip::no_irq_chip,
     handle::bad_irq_handler,
+    irqchip::IrqChip,
     irqdata::{IrqCommonData, IrqData, IrqHandlerData, IrqLineStatus, IrqStatus},
     sysfs::{irq_sysfs_del, IrqKObjType},
     HardwareIrqNumber, InterruptArch, IrqNumber,
@@ -95,6 +99,8 @@ impl IrqDesc {
 
         let irq_desc = IrqDesc {
             inner: SpinLock::new(InnerIrqDesc {
+                percpu_affinity: None,
+                percpu_enabled: None,
                 common_data,
                 irq_data,
                 desc_internal_state: IrqDescState::empty(),
@@ -144,6 +150,24 @@ impl IrqDesc {
         self.chip_bus_sync_unlock();
     }
 
+    /// 设置中断处理程序(不对desc->inner)
+    ///
+    ///
+    /// ## Safety
+    ///
+    /// 需要保证irq_data和chip是当前irqdesc的
+    pub fn set_handler_no_lock_inner(
+        &self,
+        handler: &'static dyn IrqFlowHandler,
+        irq_data: &Arc<IrqData>,
+        chip: &Arc<dyn IrqChip>,
+    ) {
+        chip.irq_bus_lock(irq_data).ok();
+        let mut guard = self.handler.write_irqsave();
+        *guard = Some(handler);
+        chip.irq_bus_sync_unlock(irq_data).ok();
+    }
+
     pub fn handler(&self) -> Option<&'static dyn IrqFlowHandler> {
         let guard = self.handler.read_irqsave();
         *guard
@@ -248,6 +272,17 @@ impl IrqDesc {
             .ok();
     }
 
+    pub fn set_percpu_devid_flags(&self) {
+        self.modify_status(
+            IrqLineStatus::empty(),
+            IrqLineStatus::IRQ_NOAUTOEN
+                | IrqLineStatus::IRQ_PER_CPU
+                | IrqLineStatus::IRQ_NOTHREAD
+                | IrqLineStatus::IRQ_NOPROBE
+                | IrqLineStatus::IRQ_PER_CPU_DEVID,
+        );
+    }
+
     pub fn modify_status(&self, clear: IrqLineStatus, set: IrqLineStatus) {
         let mut desc_guard = self.inner();
         desc_guard.line_status.remove(clear);
@@ -323,6 +358,10 @@ pub struct InnerIrqDesc {
     kern_inode: Option<Arc<KernFSInode>>,
     kset: Option<Arc<KSet>>,
     parent_kobj: Option<Weak<dyn KObject>>,
+    /// per-cpu enabled mask
+    percpu_enabled: Option<CpuMask>,
+    /// per-cpu affinity
+    percpu_affinity: Option<CpuMask>,
     // wait_for_threads: EventWaitQueue
 }
 
@@ -331,6 +370,11 @@ impl InnerIrqDesc {
         self.name.as_ref()
     }
 
+    #[allow(dead_code)]
+    pub fn set_name(&mut self, name: Option<String>) {
+        self.name = name;
+    }
+
     pub fn can_request(&self) -> bool {
         !self.line_status.contains(IrqLineStatus::IRQ_NOREQUEST)
     }
@@ -345,6 +389,24 @@ impl InnerIrqDesc {
         self.line_status.remove(IrqLineStatus::IRQ_NOREQUEST);
     }
 
+    #[allow(dead_code)]
+    pub fn set_noprobe(&mut self) {
+        self.line_status.insert(IrqLineStatus::IRQ_NOPROBE);
+    }
+
+    #[allow(dead_code)]
+    pub fn clear_noprobe(&mut self) {
+        self.line_status.remove(IrqLineStatus::IRQ_NOPROBE);
+    }
+
+    pub fn set_nothread(&mut self) {
+        self.line_status.insert(IrqLineStatus::IRQ_NOTHREAD);
+    }
+
+    pub fn clear_nothread(&mut self) {
+        self.line_status.remove(IrqLineStatus::IRQ_NOTHREAD);
+    }
+
     pub fn nested_thread(&self) -> bool {
         self.line_status.contains(IrqLineStatus::IRQ_NESTED_THREAD)
     }
@@ -404,6 +466,14 @@ impl InnerIrqDesc {
         self.actions.push(action);
     }
 
+    pub fn clear_actions(&mut self) {
+        self.actions.clear();
+    }
+
+    pub fn remove_action(&mut self, action: &Arc<IrqAction>) {
+        self.actions.retain(|a| !Arc::ptr_eq(a, action));
+    }
+
     pub fn internal_state(&self) -> &IrqDescState {
         &self.desc_internal_state
     }
@@ -445,6 +515,22 @@ impl InnerIrqDesc {
     pub fn set_level(&mut self) {
         self.line_status.insert(IrqLineStatus::IRQ_LEVEL);
     }
+
+    pub fn percpu_enabled(&self) -> &Option<CpuMask> {
+        &self.percpu_enabled
+    }
+
+    pub fn percpu_enabled_mut(&mut self) -> &mut Option<CpuMask> {
+        &mut self.percpu_enabled
+    }
+
+    pub fn percpu_affinity(&self) -> &Option<CpuMask> {
+        &self.percpu_affinity
+    }
+
+    pub fn percpu_affinity_mut(&mut self) -> &mut Option<CpuMask> {
+        &mut self.percpu_affinity
+    }
 }
 
 impl KObject for IrqDesc {
@@ -548,6 +634,7 @@ impl IrqAction {
         let action: IrqAction = IrqAction {
             inner: SpinLock::new(InnerIrqAction {
                 dev_id: None,
+                per_cpu_dev_id: None,
                 handler,
                 thread_fn,
                 thread: None,
@@ -577,6 +664,8 @@ impl IrqAction {
 pub struct InnerIrqAction {
     /// cookie to identify the device
     dev_id: Option<Arc<DeviceId>>,
+    /// cookie to identify the device (per cpu)
+    per_cpu_dev_id: Option<PerCpuVar<Arc<DeviceId>>>,
     /// 中断处理程序
     handler: Option<&'static dyn IrqHandler>,
     /// interrupt handler function for threaded interrupts
@@ -603,6 +692,15 @@ impl InnerIrqAction {
         &mut self.dev_id
     }
 
+    pub fn per_cpu_dev_id(&self) -> Option<&Arc<DeviceId>> {
+        self.per_cpu_dev_id.as_ref().map(|v| v.get())
+    }
+
+    #[allow(dead_code)]
+    pub fn per_cpu_dev_id_mut(&mut self) -> Option<&mut Arc<DeviceId>> {
+        self.per_cpu_dev_id.as_mut().map(|v| v.get_mut())
+    }
+
     pub fn handler(&self) -> Option<&'static dyn IrqHandler> {
         self.handler
     }
@@ -798,6 +896,42 @@ impl IrqDescManager {
         self.irq_descs.get(&irq).map(|desc| desc.clone())
     }
 
+    /// 查找中断描述符并锁定总线(没有对irqdesc进行加锁)
+    #[allow(dead_code)]
+    pub fn lookup_and_lock_bus(
+        &self,
+        irq: IrqNumber,
+        check_global: bool,
+        check_percpu: bool,
+    ) -> Option<Arc<IrqDesc>> {
+        self.do_lookup_and_lock(irq, true, check_global, check_percpu)
+    }
+
+    fn do_lookup_and_lock(
+        &self,
+        irq: IrqNumber,
+        lock_bus: bool,
+        check_global: bool,
+        check_percpu: bool,
+    ) -> Option<Arc<IrqDesc>> {
+        let desc = self.lookup(irq)?;
+        if check_global || check_percpu {
+            if check_percpu && !desc.inner().line_status().is_per_cpu_devid() {
+                return None;
+            }
+
+            if check_global && desc.inner().line_status().is_per_cpu_devid() {
+                return None;
+            }
+        }
+
+        if lock_bus {
+            desc.chip_bus_lock();
+        }
+
+        return Some(desc);
+    }
+
     fn insert(&mut self, irq: IrqNumber, desc: Arc<IrqDesc>) {
         self.irq_descs.insert(irq, desc);
     }
@@ -815,4 +949,41 @@ impl IrqDescManager {
     pub fn iter_descs(&self) -> btree_map::Iter<'_, IrqNumber, Arc<IrqDesc>> {
         self.irq_descs.iter()
     }
+
+    /// 设置指定irq的可用cpu为所有cpu
+    pub fn set_percpu_devid_all(&self, irq: IrqNumber) -> Result<(), SystemError> {
+        self.set_percpu_devid(irq, None)
+    }
+
+    /// 设置指定irq的可用cpu
+    ///
+    /// 如果affinity为None,则表示设置为所有cpu
+    pub fn set_percpu_devid(
+        &self,
+        irq: IrqNumber,
+        affinity: Option<&CpuMask>,
+    ) -> Result<(), SystemError> {
+        let desc = self.lookup(irq).ok_or(SystemError::EINVAL)?;
+        let mut desc_inner = desc.inner();
+
+        if desc_inner.percpu_enabled().is_some() {
+            return Err(SystemError::EINVAL);
+        }
+
+        *desc_inner.percpu_enabled_mut() = Some(CpuMask::new());
+
+        if let Some(affinity) = affinity {
+            desc_inner.percpu_affinity_mut().replace(affinity.clone());
+        } else {
+            desc_inner
+                .percpu_affinity_mut()
+                .replace(smp_cpu_manager().possible_cpus().clone());
+        }
+
+        drop(desc_inner);
+
+        desc.set_percpu_devid_flags();
+
+        return Ok(());
+    }
 }

+ 137 - 6
kernel/src/exception/irqdomain.rs

@@ -15,8 +15,10 @@ use crate::{
 };
 
 use super::{
-    irqchip::{IrqChipGeneric, IrqGcFlags},
-    irqdata::IrqData,
+    dummychip::no_irq_chip,
+    irqchip::{IrqChip, IrqChipData, IrqChipGeneric, IrqGcFlags},
+    irqdata::{IrqData, IrqHandlerData},
+    irqdesc::IrqFlowHandler,
     HardwareIrqNumber, IrqNumber,
 };
 
@@ -49,6 +51,31 @@ impl IrqDomainManager {
         }
     }
 
+    /// 创建一个新的线性映射的irqdomain, 并将其添加到irqdomain管理器中
+    ///
+    /// 创建的irqdomain,中断号是线性的,即从0开始,依次递增
+    ///
+    /// ## 参数
+    ///
+    /// - `name` - 中断域的名字
+    /// - `ops` - 中断域的操作
+    /// - `irq_size` - 中断号的数量
+    #[allow(dead_code)]
+    pub fn create_and_add_linear(
+        &self,
+        name: String,
+        ops: &'static dyn IrqDomainOps,
+        irq_size: u32,
+    ) -> Option<Arc<IrqDomain>> {
+        self.create_and_add(
+            name,
+            ops,
+            IrqNumber::new(0),
+            HardwareIrqNumber::new(0),
+            irq_size,
+        )
+    }
+
     /// 创建一个新的irqdomain, 并将其添加到irqdomain管理器中
     ///
     /// ## 参数
@@ -272,6 +299,97 @@ impl IrqDomainManager {
             }
         }
     }
+
+    /// `irq_domain_set_info` - 在 @domain 中为 @virq 设置完整的数据
+    ///
+    /// ## 参数
+    ///
+    /// - `domain`: 要匹配的中断域
+    /// - `virq`: IRQ号
+    /// - `hwirq`: 硬件中断号
+    /// - `chip`: 相关的中断芯片
+    /// - `chip_data`: 相关的中断芯片数据
+    /// - `handler`: 中断流处理器
+    /// - `handler_data`: 中断流处理程序数据
+    /// - `handler_name`: 中断处理程序名称
+    pub fn domain_set_info(
+        &self,
+        domain: &Arc<IrqDomain>,
+        virq: IrqNumber,
+        hwirq: HardwareIrqNumber,
+        chip: Arc<dyn IrqChip>,
+        chip_data: Option<Arc<dyn IrqChipData>>,
+        flow_handler: &'static dyn IrqFlowHandler,
+        handler_data: Option<Arc<dyn IrqHandlerData>>,
+        handler_name: Option<String>,
+    ) {
+        let r = self.domain_set_hwirq_and_chip(domain, virq, hwirq, Some(chip), chip_data);
+        if r.is_err() {
+            return;
+        }
+        irq_manager().__irq_set_handler(virq, flow_handler, false, handler_name);
+        irq_manager().irq_set_handler_data(virq, handler_data).ok();
+    }
+
+    /// `domain_set_hwirq_and_chip` - 在 @domain 中为 @virq 设置 hwirq 和 irqchip
+    ///
+    /// ## 参数
+    ///
+    /// - `domain`: 要匹配的中断域
+    /// - `virq`: IRQ号
+    /// - `hwirq`: hwirq号
+    /// - `chip`: 相关的中断芯片
+    /// - `chip_data`: 相关的芯片数据
+    pub fn domain_set_hwirq_and_chip(
+        &self,
+        domain: &Arc<IrqDomain>,
+        virq: IrqNumber,
+        hwirq: HardwareIrqNumber,
+        chip: Option<Arc<dyn IrqChip>>,
+        chip_data: Option<Arc<dyn IrqChipData>>,
+    ) -> Result<(), SystemError> {
+        let irq_data: Arc<IrqData> = self
+            .domain_get_irq_data(domain, virq)
+            .ok_or(SystemError::ENOENT)?;
+        let mut inner = irq_data.inner();
+        let mut chip_info = irq_data.chip_info_write_irqsave();
+
+        inner.set_hwirq(hwirq);
+        if let Some(chip) = chip {
+            chip_info.set_chip(Some(chip));
+        } else {
+            chip_info.set_chip(Some(no_irq_chip()));
+        };
+
+        chip_info.set_chip_data(chip_data);
+
+        return Ok(());
+    }
+
+    /// `irq_domain_get_irq_data` - 获取与 @virq 和 @domain 关联的 irq_data
+    ///
+    /// ## 参数
+    ///
+    /// - `domain`: 要匹配的域
+    /// - `virq`: 要获取 irq_data 的IRQ号
+    pub fn domain_get_irq_data(
+        &self,
+        domain: &Arc<IrqDomain>,
+        virq: IrqNumber,
+    ) -> Option<Arc<IrqData>> {
+        let desc = irq_desc_manager().lookup(virq)?;
+        let mut irq_data = Some(desc.irq_data());
+
+        while irq_data.is_some() {
+            let dt = irq_data.unwrap();
+            if dt.domain().is_some() && Arc::ptr_eq(dt.domain().as_ref().unwrap(), domain) {
+                return Some(dt);
+            }
+            irq_data = dt.parent_data().map(|x| x.upgrade()).flatten();
+        }
+
+        return None;
+    }
 }
 
 struct InnerIrqDomainManager {
@@ -299,6 +417,8 @@ pub struct IrqDomain {
 #[allow(dead_code)]
 #[derive(Debug)]
 struct InnerIrqDomain {
+    /// this field not touched by the core code
+    host_data: Option<Arc<dyn IrqChipData>>,
     /// host per irq_domain flags
     flags: IrqDomainFlags,
     /// The number of mapped interrupts
@@ -335,6 +455,7 @@ impl IrqDomain {
             allocated_name: SpinLock::new(allocated_name),
             ops,
             inner: SpinLock::new(InnerIrqDomain {
+                host_data: None,
                 flags,
                 mapcount: 0,
                 bus_token,
@@ -380,6 +501,14 @@ impl IrqDomain {
     pub fn map_count(&self) -> u32 {
         self.revmap.read().map.len() as u32
     }
+
+    pub fn host_data(&self) -> Option<Arc<dyn IrqChipData>> {
+        self.inner.lock_irqsave().host_data.clone()
+    }
+
+    pub fn set_host_data(&self, host_data: Option<Arc<dyn IrqChipData>>) {
+        self.inner.lock_irqsave().host_data = host_data;
+    }
 }
 
 /// 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/include/linux/irqdomain.h#190
@@ -458,10 +587,12 @@ pub trait IrqDomainOps: Debug + Send + Sync {
     /// 匹配一个中断控制器设备节点到一个主机。
     fn match_node(
         &self,
-        irq_domain: &Arc<IrqDomain>,
-        device_node: &Arc<DeviceNode>,
-        bus_token: IrqDomainBusToken,
-    ) -> bool;
+        _irq_domain: &Arc<IrqDomain>,
+        _device_node: &Arc<DeviceNode>,
+        _bus_token: IrqDomainBusToken,
+    ) -> bool {
+        false
+    }
 
     /// 创建或更新一个虚拟中断号与一个硬件中断号之间的映射。
     /// 对于给定的映射,这只会被调用一次。

+ 16 - 6
kernel/src/exception/manage.rs

@@ -451,7 +451,7 @@ impl IrqManager {
             }
 
             // 激活中断。这种激活必须独立于IRQ_NOAUTOEN进行*desc_inner_guard.internal_state_mut() |= IrqDescState::IRQS_NOREQUEST;uest.
-            if let Err(e) = self.irq_activate(desc.clone(), &mut desc_inner_guard) {
+            if let Err(e) = self.irq_activate(&desc, &mut desc_inner_guard) {
                 return Err(err_out_unlock(
                     e,
                     desc_inner_guard,
@@ -499,7 +499,7 @@ impl IrqManager {
             {
                 // 如果没有设置IRQF_NOAUTOEN,则自动使能中断
                 self.irq_startup(
-                    desc.clone(),
+                    &desc,
                     &mut desc_inner_guard,
                     Self::IRQ_RESEND,
                     Self::IRQ_START_COND,
@@ -598,9 +598,19 @@ impl IrqManager {
             .ok();
     }
 
+    pub(super) fn irq_activate_and_startup(
+        &self,
+        desc: &Arc<IrqDesc>,
+        desc_inner_guard: &mut SpinLockGuard<'_, InnerIrqDesc>,
+        resend: bool,
+    ) -> Result<(), SystemError> {
+        self.irq_activate(desc, desc_inner_guard)?;
+        self.irq_startup(desc, desc_inner_guard, resend, Self::IRQ_START_FORCE)
+    }
+
     pub(super) fn irq_activate(
         &self,
-        _desc: Arc<IrqDesc>,
+        _desc: &Arc<IrqDesc>,
         desc_inner_guard: &mut SpinLockGuard<'_, InnerIrqDesc>,
     ) -> Result<(), SystemError> {
         let irq_data = desc_inner_guard.irq_data();
@@ -615,7 +625,7 @@ impl IrqManager {
     /// 设置CPU亲和性并开启中断
     pub(super) fn irq_startup(
         &self,
-        desc: Arc<IrqDesc>,
+        desc: &Arc<IrqDesc>,
         desc_inner_guard: &mut SpinLockGuard<'_, InnerIrqDesc>,
         resend: bool,
         force: bool,
@@ -636,7 +646,7 @@ impl IrqManager {
                         .flags()
                         .contains(IrqChipFlags::IRQCHIP_AFFINITY_PRE_STARTUP)
                     {
-                        self.irq_setup_affinity(&desc, desc_inner_guard).ok();
+                        self.irq_setup_affinity(desc, desc_inner_guard).ok();
                     }
 
                     ret = self.__irq_startup(desc_inner_guard);
@@ -647,7 +657,7 @@ impl IrqManager {
                         .flags()
                         .contains(IrqChipFlags::IRQCHIP_AFFINITY_PRE_STARTUP)
                     {
-                        self.irq_setup_affinity(&desc, desc_inner_guard).ok();
+                        self.irq_setup_affinity(desc, desc_inner_guard).ok();
                     }
                 }
                 IrqStartupResult::Managed => {

+ 10 - 2
kernel/src/init/init.rs

@@ -18,7 +18,7 @@ use crate::{
     mm::init::mm_init,
     process::{kthread::kthread_init, process_init, ProcessManager},
     sched::{core::sched_init, SchedArch},
-    smp::SMPArch,
+    smp::{early_smp_init, SMPArch},
     syscall::Syscall,
     time::{
         clocksource::clocksource_boot_finish, timekeeping::timekeeping_init, timer::timer_init,
@@ -47,12 +47,20 @@ fn do_start_kernel() {
 
     early_setup_arch().expect("setup_arch failed");
     unsafe { mm_init() };
+
     scm_reinit().unwrap();
     textui_init().unwrap();
     init_intertrait();
+
     vfs_init().expect("vfs init failed");
     driver_init().expect("driver init failed");
-    unsafe { acpi_init() };
+
+    #[cfg(target_arch = "x86_64")]
+    unsafe {
+        acpi_init()
+    };
+
+    early_smp_init().expect("early smp init failed");
     irq_init().expect("irq init failed");
     CurrentSMPArch::prepare_cpus().expect("prepare_cpus failed");
 

+ 14 - 4
kernel/src/libs/lib_ui/screen_manager.rs

@@ -409,12 +409,22 @@ pub fn scm_disable_put_to_window() {
 /// 当内存管理单元被初始化之后,重新处理帧缓冲区问题
 #[inline(never)]
 pub fn scm_reinit() -> Result<(), SystemError> {
-    let r = true_scm_reinit();
-    if r.is_err() {
-        send_to_default_serial8250_port("scm reinit failed.\n\0".as_bytes());
+    #[cfg(target_arch = "x86_64")]
+    {
+        let r = true_scm_reinit();
+        if r.is_err() {
+            send_to_default_serial8250_port("scm reinit failed.\n\0".as_bytes());
+        }
+        return r;
+    }
+
+    #[cfg(not(target_arch = "x86_64"))]
+    {
+        return Ok(());
     }
-    return r;
 }
+
+#[allow(dead_code)]
 fn true_scm_reinit() -> Result<(), SystemError> {
     video_refresh_manager()
         .video_reinitialize(false)

+ 1 - 0
kernel/src/libs/lib_ui/textui.rs

@@ -1085,6 +1085,7 @@ pub fn textui_putstr(
 /// 初始化text ui框架
 #[inline(never)]
 pub fn textui_init() -> Result<i32, SystemError> {
+    #[cfg(target_arch = "x86_64")]
     textui_framwork_init();
 
     return Ok(0);

+ 11 - 2
kernel/src/mm/percpu.rs

@@ -3,8 +3,9 @@ use core::sync::atomic::AtomicU32;
 use alloc::vec::Vec;
 
 use crate::{
-    include::bindings::bindings::smp_get_total_cpu, libs::lazy_init::Lazy,
-    smp::core::smp_get_processor_id,
+    include::bindings::bindings::smp_get_total_cpu,
+    libs::lazy_init::Lazy,
+    smp::{core::smp_get_processor_id, cpu::ProcessorId},
 };
 
 /// 系统中的CPU数量
@@ -83,6 +84,14 @@ impl<T> PerCpuVar<T> {
         let cpu_id = smp_get_processor_id();
         &mut self.inner[cpu_id.data() as usize]
     }
+
+    pub unsafe fn force_get(&self, cpu_id: ProcessorId) -> &T {
+        &self.inner[cpu_id.data() as usize]
+    }
+
+    pub unsafe fn force_get_mut(&mut self, cpu_id: ProcessorId) -> &mut T {
+        &mut self.inner[cpu_id.data() as usize]
+    }
 }
 
 /// PerCpu变量是线程安全的,因为每个CPU都有自己的变量。

+ 50 - 0
kernel/src/smp/cpu/mod.rs

@@ -1,5 +1,7 @@
 use core::sync::atomic::AtomicU32;
 
+use crate::libs::cpumask::CpuMask;
+
 mod c_adapter;
 
 int_like!(ProcessorId, AtomicProcessorId, u32, AtomicU32);
@@ -7,3 +9,51 @@ int_like!(ProcessorId, AtomicProcessorId, u32, AtomicU32);
 impl ProcessorId {
     pub const INVALID: ProcessorId = ProcessorId::new(u32::MAX);
 }
+
+static mut SMP_CPU_MANAGER: Option<SmpCpuManager> = None;
+
+#[inline]
+pub fn smp_cpu_manager() -> &'static SmpCpuManager {
+    unsafe { SMP_CPU_MANAGER.as_ref().unwrap() }
+}
+
+pub struct SmpCpuManager {
+    possible_cpus: CpuMask,
+}
+
+impl SmpCpuManager {
+    fn new() -> Self {
+        let possible_cpus = CpuMask::new();
+        Self { possible_cpus }
+    }
+
+    /// 设置可用的CPU
+    ///
+    /// # Safety
+    ///
+    /// - 该函数不会检查CPU的有效性,调用者需要保证CPU的有效性。
+    /// - 由于possible_cpus是一个全局变量,且为了性能考虑,并不会加锁
+    ///     访问,因此该函数只能在初始化阶段调用。
+    pub unsafe fn set_possible_cpu(&self, cpu: ProcessorId, value: bool) {
+        // 强制获取mut引用,因为该函数只能在初始化阶段调用
+        let p = (self as *const Self as *mut Self).as_mut().unwrap();
+
+        p.possible_cpus.set(cpu, value);
+    }
+
+    /// 获取可用的CPU
+    #[allow(dead_code)]
+    pub fn possible_cpus(&self) -> &CpuMask {
+        &self.possible_cpus
+    }
+}
+
+pub fn smp_cpu_manager_init(boot_cpu: ProcessorId) {
+    unsafe {
+        SMP_CPU_MANAGER = Some(SmpCpuManager::new());
+    }
+
+    unsafe { smp_cpu_manager().set_possible_cpu(boot_cpu, true) };
+
+    SmpCpuManager::arch_init(boot_cpu);
+}

+ 12 - 1
kernel/src/smp/mod.rs

@@ -5,7 +5,10 @@ use crate::{
     exception::ipi::{IpiKind, IpiTarget},
 };
 
-use self::cpu::ProcessorId;
+use self::{
+    core::smp_get_processor_id,
+    cpu::{smp_cpu_manager_init, ProcessorId},
+};
 
 pub mod c_adapter;
 pub mod core;
@@ -29,3 +32,11 @@ pub trait SMPArch {
     /// 该函数需要标记为 `#[inline(never)]`
     fn init() -> Result<(), SystemError>;
 }
+
+/// 早期SMP初始化
+#[inline(never)]
+pub fn early_smp_init() -> Result<(), SystemError> {
+    smp_cpu_manager_init(smp_get_processor_id());
+
+    return Ok(());
+}