浏览代码

feat(prototyper): implement basic functions of the SBI PMU extension (#122)

* feat(prototyper): add PMU extension and implement `sbi_pmu_num_counters`

Signed-off-by: guttatus <[email protected]>

* feat(prototyper): implemented `sbi_pmu_counter_get_info` and other functions

Signed-off-by: guttatus <[email protected]>

* feat(prototyper): Implement PMU `counter_start` and more functions

Signed-off-by: Zongyao Chen [email protected]

* feat(prototyper): add the basic functions of `PMU` extension

Signed-off-by: guttatus <[email protected]>

* feat(prototyper): enhance PMU extension with improved documentation and code organization

- Refactored comments and documentation for clarity in `pmu.rs`, detailing the purpose of various functions and constants.
- Improved the structure of the `Cargo.toml` file for better readability and organization of dependencies.
- Added new constants and inline documentation to clarify the functionality of performance counters.

Signed-off-by: Zongyao Chen [email protected]

* feat(prototyper): add pmu extension to SBI structure

Signed-off-by: guttatus <[email protected]>

* feat(prototyper): add firmware event statistics to the corresponding firmware event

Signed-off-by: guttatus <[email protected]>

* fix(prototyper): fix event comparison bug and add pmu test in test-kernel

Signed-off-by: guttatus <[email protected]>

* fix(prototyper): fixed the bug that only can parse the pmu node under the root of the device tree

Signed-off-by: guttatus <[email protected]>

* fix(prototyper): fix the typo of misc

Signed-off-by: guttatus <[email protected]>

* refactor(prototyper): use `seq!` rewrite some repeat and match

Signed-off-by: Woshiluo Luo <[email protected]>

---------

Signed-off-by: guttatus <[email protected]>
Signed-off-by: Zongyao Chen [email protected]
Signed-off-by: Woshiluo Luo <[email protected]>
Co-authored-by: chenzongyao200127 <[email protected]>
Co-authored-by: Woshiluo Luo <[email protected]>
Luo Jia / Zhouqi Jiang 5 天之前
父节点
当前提交
11d7b6c2e2

+ 15 - 7
prototyper/prototyper/Cargo.toml

@@ -16,15 +16,23 @@ uart16550 = "0.0.1"
 riscv-decode = "0.2.1"
 cfg-if = "1.0.0"
 buddy_system_allocator = "0.11.0"
-rustsbi = { version = "0.4.0", features = ["machine"], path = "../../library/rustsbi" }
-sbi-spec = { version = "0.0.8", features = ["legacy"], path = "../../library/sbi-spec" }
+rustsbi = { version = "0.4.0", features = [
+    "machine",
+], path = "../../library/rustsbi" }
+sbi-spec = { version = "0.0.8", features = [
+    "legacy",
+], path = "../../library/sbi-spec" }
 serde = { version = "1.0.202", default-features = false, features = ["derive"] }
-fast-trap = { version = "0.1.0",  features = ["riscv-m"] }
-serde-device-tree = { git = "https://github.com/rustsbi/serde-device-tree", rev = "e7f9404f",  default-features = false }
-uart_xilinx = { git = "https://github.com/duskmoon314/uart-rs/" }
-xuantie-riscv = { git= "https://github.com/rustsbi/xuantie" }
-bouffalo-hal = { git = "https://github.com/rustsbi/bouffalo-hal", rev = "968b949", features = ["bl808"] }
+fast-trap = { version = "0.1.0", features = ["riscv-m"] }
+serde-device-tree = { git = "https://github.com/rustsbi/serde-device-tree", rev = "2a5d6ab7", default-features = false }
+uart_xilinx = { git = "https://github.com/duskmoon314/uart-rs/", rev = "12be9142" }
+xuantie-riscv = { git = "https://github.com/rustsbi/xuantie", rev = "7a521c04" }
+bouffalo-hal = { git = "https://github.com/rustsbi/bouffalo-hal", rev = "968b949", features = [
+    "bl808",
+] }
 static-toml = "1"
+seq-macro = "0.3.5"
+pastey = "0.1.0"
 
 [[bin]]
 name = "rustsbi-prototyper"

+ 22 - 0
prototyper/prototyper/src/devicetree.rs

@@ -2,6 +2,7 @@ use serde::Deserialize;
 use serde_device_tree::{
     Dtb, DtbPtr,
     buildin::{Node, NodeSeq, Reg, StrSeq},
+    value::riscv_pmu::{EventToMhpmcounters, EventToMhpmevent, RawEventToMhpcounters},
 };
 
 use core::ops::Range;
@@ -52,6 +53,16 @@ pub struct Memory<'a> {
     pub reg: Reg<'a>,
 }
 
+#[derive(Deserialize)]
+pub struct Pmu<'a> {
+    #[serde(rename = "riscv,event-to-mhpmevent")]
+    pub event_to_mhpmevent: Option<EventToMhpmevent<'a>>,
+    #[serde(rename = "riscv,event-to-mhpmcounters")]
+    pub event_to_mhpmcounters: Option<EventToMhpmcounters<'a>>,
+    #[serde(rename = "riscv,raw-event-to-mhpmcounters")]
+    pub raw_event_to_mhpmcounters: Option<RawEventToMhpcounters<'a>>,
+}
+
 /// Errors that can occur during device tree parsing.
 pub enum ParseDeviceTreeError {
     /// Invalid device tree format.
@@ -90,3 +101,14 @@ pub fn get_compatible_and_range<'de>(node: &Node) -> Option<(StrSeq<'de>, Range<
         None
     }
 }
+
+pub fn get_compatible<'de>(node: &Node) -> Option<StrSeq<'de>> {
+    let compatible = node
+        .get_prop("compatible")
+        .map(|prop_item| prop_item.deserialize::<StrSeq<'de>>());
+    if let Some(compatible) = compatible {
+        Some(compatible)
+    } else {
+        None
+    }
+}

+ 2 - 2
prototyper/prototyper/src/macros.rs

@@ -28,13 +28,13 @@ macro_rules! has_csr {
     ($($x: expr)*) => {{
             use core::arch::asm;
             use riscv::register::mtvec;
-            use crate::sbi::early_trap::expected_trap;
+            use crate::sbi::early_trap::light_expected_trap;
             let res: usize;
             unsafe {
                 // Backup old mtvec
                 let mtvec = mtvec::read().bits();
                 // Write expected_trap
-                mtvec::write(expected_trap as _, mtvec::TrapMode::Direct);
+                mtvec::write(light_expected_trap as _, mtvec::TrapMode::Direct);
                 asm!("addi a0, zero, 0",
                     "addi a1, zero, 0",
                     "csrr a2, {}",

+ 23 - 10
prototyper/prototyper/src/main.rs

@@ -24,9 +24,10 @@ use core::arch::{asm, naked_asm};
 use crate::platform::PLATFORM;
 use crate::riscv::csr::menvcfg;
 use crate::riscv::current_hartid;
-use crate::sbi::extensions::{
-    Extension, PrivilegedVersion, hart_extension_probe, hart_privileged_version,
-    privileged_version_detection,
+use crate::sbi::features::hart_mhpm_mask;
+use crate::sbi::features::{
+    Extension, PrivilegedVersion, hart_extension_probe, hart_features_detection,
+    hart_privileged_version,
 };
 use crate::sbi::hart_context::NextStage;
 use crate::sbi::heap::sbi_heap_init;
@@ -66,10 +67,17 @@ extern "C" fn rust_main(_hart_id: usize, opaque: usize, nonstandard_a2: usize) {
         let hart_id = current_hartid();
         info!("{:<30}: {}", "Boot HART ID", hart_id);
 
-        // Detection Priv Version
-        privileged_version_detection();
+        // Detection Hart Features
+        hart_features_detection();
+        // Other harts task entry.
+        trap_stack::prepare_for_trap();
         let priv_version = hart_privileged_version(hart_id);
-        info!("{:<30}: {:?}", "Boot HART Privileged Version", priv_version);
+        let mhpm_mask = hart_mhpm_mask(hart_id);
+        info!(
+            "{:<30}: {:?}",
+            "Boot HART Privileged Version:", priv_version
+        );
+        info!("{:<30}: {:#08x}", "Boot HART MHPM Mask:", mhpm_mask);
 
         // Start kernel.
         local_remote_hsm().start(NextStage {
@@ -79,12 +87,14 @@ extern "C" fn rust_main(_hart_id: usize, opaque: usize, nonstandard_a2: usize) {
         });
 
         info!(
-            "Redirecting hart {} to 0x{:0>16x} in {:?} mode.",
+            "Redirecting hart {} to {:#016x} in {:?} mode.",
             current_hartid(),
             next_addr,
             mpp
         );
     } else {
+        // Detection Hart feature
+        hart_features_detection();
         // Other harts task entry.
         trap_stack::prepare_for_trap();
 
@@ -94,8 +104,6 @@ extern "C" fn rust_main(_hart_id: usize, opaque: usize, nonstandard_a2: usize) {
         }
 
         firmware::set_pmp(unsafe { PLATFORM.info.memory_range.as_ref().unwrap() });
-        // Detection Priv Version
-        privileged_version_detection();
     }
     // Clear all pending IPIs.
     ipi::clear_all();
@@ -113,7 +121,12 @@ extern "C" fn rust_main(_hart_id: usize, opaque: usize, nonstandard_a2: usize) {
         medeleg::clear_load_misaligned();
         medeleg::clear_store_misaligned();
         medeleg::clear_illegal_instruction();
-        if hart_privileged_version(current_hartid()) >= PrivilegedVersion::Version1_12 {
+
+        let hart_priv_version = hart_privileged_version(current_hartid());
+        if hart_priv_version >= PrivilegedVersion::Version1_11 {
+            asm!("csrw mcountinhibit, {}", in(reg) !0b10);
+        }
+        if hart_priv_version >= PrivilegedVersion::Version1_12 {
             // Configure environment features based on available extensions.
             if hart_extension_probe(current_hartid(), Extension::Sstc) {
                 menvcfg::set_bits(

+ 147 - 67
prototyper/prototyper/src/platform/mod.rs

@@ -22,13 +22,13 @@ use crate::platform::console::{
 use crate::platform::reset::SIFIVETEST_COMPATIBLE;
 use crate::sbi::SBI;
 use crate::sbi::console::SbiConsole;
-use crate::sbi::extensions;
+use crate::sbi::features::extension_detection;
 use crate::sbi::hsm::SbiHsm;
 use crate::sbi::ipi::SbiIpi;
 use crate::sbi::logger;
+use crate::sbi::pmu::{EventToCounterMap, RawEventToCounterMap};
 use crate::sbi::reset::SbiReset;
 use crate::sbi::rfence::SbiRFence;
-use crate::sbi::trap_stack;
 
 mod clint;
 mod console;
@@ -78,13 +78,6 @@ impl Platform {
     }
 
     pub fn init(&mut self, fdt_address: usize) {
-        self.info_init(fdt_address);
-        self.sbi_init();
-        trap_stack::prepare_for_trap();
-        self.ready.swap(true, Ordering::Release);
-    }
-
-    fn info_init(&mut self, fdt_address: usize) {
         let dtb = parse_device_tree(fdt_address).unwrap_or_else(fail::device_tree_format);
         let dtb = dtb.share();
 
@@ -92,9 +85,50 @@ impl Platform {
             .unwrap_or_else(fail::device_tree_deserialize_root);
         let tree: Tree = root.deserialize();
 
-        // Get console device, init sbi console and logger
+        // Get console device, init sbi console and logger.
         self.sbi_find_and_init_console(&root);
+        // Get clint and reset device, init sbi ipi, reset, hsm and rfence.
+        self.sbi_init_ipi_reset_hsm_rfence(&root);
+        // Initialize pmu extension
+        self.sbi_init_pmu(&root);
+        // Get other info
+        self.sbi_misc_init(&tree);
 
+        self.ready.swap(true, Ordering::Release);
+    }
+
+    fn sbi_find_and_init_console(&mut self, root: &serde_device_tree::buildin::Node) {
+        //  Get console device info
+        if let Some(stdout_path) = root.chosen_stdout_path() {
+            if let Some(node) = root.find(stdout_path) {
+                let info = get_compatible_and_range(&node);
+                if let Some((compatible, regs)) = info {
+                    for device_id in compatible.iter() {
+                        if UART16650U8_COMPATIBLE.contains(&device_id) {
+                            self.info.console = Some((regs.start, MachineConsoleType::Uart16550U8));
+                        }
+                        if UART16650U32_COMPATIBLE.contains(&device_id) {
+                            self.info.console =
+                                Some((regs.start, MachineConsoleType::Uart16550U32));
+                        }
+                        if UARTAXILITE_COMPATIBLE.contains(&device_id) {
+                            self.info.console = Some((regs.start, MachineConsoleType::UartAxiLite));
+                        }
+                        if UARTBFLB_COMPATIBLE.contains(&device_id) {
+                            self.info.console = Some((regs.start, MachineConsoleType::UartBflb));
+                        }
+                    }
+                }
+            }
+        }
+
+        // init console and logger
+        self.sbi_console_init();
+        logger::Logger::init().unwrap();
+        info!("Hello RustSBI!");
+    }
+
+    fn sbi_init_ipi_reset_hsm_rfence(&mut self, root: &serde_device_tree::buildin::Node) {
         // Get ipi and reset device info
         let mut find_device = |node: &serde_device_tree::buildin::Node| {
             let info = get_compatible_and_range(node);
@@ -120,7 +154,73 @@ impl Platform {
             }
         };
         root.search(&mut find_device);
+        self.sbi_ipi_init();
+        self.sbi_hsm_init();
+        self.sbi_reset_init();
+        self.sbi_rfence_init();
+    }
+
+    fn sbi_init_pmu(&mut self, root: &serde_device_tree::buildin::Node) {
+        let mut pmu_node: Option<Pmu> = None;
+        let mut find_pmu = |node: &serde_device_tree::buildin::Node| {
+            let info = get_compatible(node);
+            if let Some(compatible_strseq) = info {
+                let compatible_iter = compatible_strseq.iter();
+                for compatible in compatible_iter {
+                    if compatible == "riscv,pmu" {
+                        pmu_node = Some(node.deserialize::<Pmu>());
+                    }
+                }
+            }
+        };
+        root.search(&mut find_pmu);
+
+        if let Some(ref pmu) = pmu_node {
+            let sbi_pmu = self.sbi.pmu.get_or_insert_default();
+            if let Some(ref event_to_mhpmevent) = pmu.event_to_mhpmevent {
+                let len = event_to_mhpmevent.len();
+                for idx in 0..len {
+                    let event = event_to_mhpmevent.get_event_id(idx);
+                    let mhpmevent = event_to_mhpmevent.get_selector_value(idx);
+                    sbi_pmu.insert_event_to_mhpmevent(event, mhpmevent);
+                    debug!(
+                        "pmu: insert event: 0x{:08x}, mhpmevent: {:#016x}",
+                        event, mhpmevent
+                    );
+                }
+            }
 
+            if let Some(ref event_to_mhpmcounters) = pmu.event_to_mhpmcounters {
+                let len = event_to_mhpmcounters.len();
+                for idx in 0..len {
+                    let events = event_to_mhpmcounters.get_event_idx_range(idx);
+                    let mhpmcounters = event_to_mhpmcounters.get_counter_bitmap(idx);
+                    let event_to_counter =
+                        EventToCounterMap::new(mhpmcounters, *events.start(), *events.end());
+                    debug!("pmu: insert event_to_mhpmcounter: {:x?}", event_to_counter);
+                    sbi_pmu.insert_event_to_mhpmcounter(event_to_counter);
+                }
+            }
+
+            if let Some(ref raw_evnet_to_mhpmcounters) = pmu.raw_event_to_mhpmcounters {
+                let len = raw_evnet_to_mhpmcounters.len();
+                for idx in 0..len {
+                    let raw_event_select = raw_evnet_to_mhpmcounters.get_event_idx_base(idx);
+                    let select_mask = raw_evnet_to_mhpmcounters.get_event_idx_mask(idx);
+                    let counters_mask = raw_evnet_to_mhpmcounters.get_counter_bitmap(idx);
+                    let raw_event_to_counter =
+                        RawEventToCounterMap::new(counters_mask, raw_event_select, select_mask);
+                    debug!(
+                        "pmu: insert raw_event_to_mhpmcounter: {:x?}",
+                        raw_event_to_counter
+                    );
+                    sbi_pmu.insert_raw_event_to_mhpmcounter(raw_event_to_counter);
+                }
+            }
+        }
+    }
+
+    fn sbi_misc_init(&mut self, tree: &Tree) {
         // Get memory info
         // TODO: More than one memory node or range?
         let memory_reg = tree
@@ -137,7 +237,7 @@ impl Platform {
         self.info.cpu_num = Some(tree.cpus.cpu.len());
 
         // Get model info
-        if let Some(model) = tree.model {
+        if let Some(ref model) = tree.model {
             let model = model.iter().next().unwrap_or("<unspecified>");
             self.info.model = model.to_string();
         } else {
@@ -146,7 +246,7 @@ impl Platform {
         }
 
         // TODO: Need a better extension initialization method
-        extensions::init(&tree.cpus.cpu);
+        extension_detection(&tree.cpus.cpu);
 
         // Find which hart is enabled by fdt
         let mut cpu_list: CpuEnableList = [false; NUM_HART_MAX];
@@ -160,44 +260,6 @@ impl Platform {
         self.info.cpu_enabled = Some(cpu_list);
     }
 
-    fn sbi_init(&mut self) {
-        self.sbi_ipi_init();
-        self.sbi_hsm_init();
-        self.sbi_reset_init();
-        self.sbi_rfence_init();
-    }
-
-    fn sbi_find_and_init_console(&mut self, root: &serde_device_tree::buildin::Node) {
-        //  Get console device info
-        if let Some(stdout_path) = root.chosen_stdout_path() {
-            if let Some(node) = root.find(stdout_path) {
-                let info = get_compatible_and_range(&node);
-                if let Some((compatible, regs)) = info {
-                    for device_id in compatible.iter() {
-                        if UART16650U8_COMPATIBLE.contains(&device_id) {
-                            self.info.console = Some((regs.start, MachineConsoleType::Uart16550U8));
-                        }
-                        if UART16650U32_COMPATIBLE.contains(&device_id) {
-                            self.info.console =
-                                Some((regs.start, MachineConsoleType::Uart16550U32));
-                        }
-                        if UARTAXILITE_COMPATIBLE.contains(&device_id) {
-                            self.info.console = Some((regs.start, MachineConsoleType::UartAxiLite));
-                        }
-                        if UARTBFLB_COMPATIBLE.contains(&device_id) {
-                            self.info.console = Some((regs.start, MachineConsoleType::UartBflb));
-                        }
-                    }
-                }
-            }
-        }
-
-        // init console and logger
-        self.sbi_console_init();
-        logger::Logger::init().unwrap();
-        info!("Hello RustSBI!");
-    }
-
     fn sbi_console_init(&mut self) {
         if let Some((base, console_type)) = self.info.console {
             self.sbi.console = match console_type {
@@ -310,6 +372,7 @@ impl Platform {
         self.print_reset_info();
         self.print_hsm_info();
         self.print_rfence_info();
+        self.print_pmu_info();
     }
 
     #[inline]
@@ -318,7 +381,7 @@ impl Platform {
             Some((base, device)) => {
                 info!(
                     "{:<30}: {:?} (Base Address: 0x{:x})",
-                    "Platform IPI Device", device, base
+                    "Platform IPI Extension", device, base
                 );
             }
             None => warn!("{:<30}: Not Available", "Platform IPI Device"),
@@ -331,7 +394,7 @@ impl Platform {
             Some((base, device)) => {
                 info!(
                     "{:<30}: {:?} (Base Address: 0x{:x})",
-                    "Platform Console Device", device, base
+                    "Platform Console Extension", device, base
                 );
             }
             None => warn!("{:<30}: Not Available", "Platform Console Device"),
@@ -343,30 +406,18 @@ impl Platform {
         if let Some(base) = self.info.reset {
             info!(
                 "{:<30}: Available (Base Address: 0x{:x})",
-                "Platform Reset Device", base
+                "Platform Reset Extension", base
             );
         } else {
             warn!("{:<30}: Not Available", "Platform Reset Device");
         }
     }
 
-    #[inline]
-    fn print_memory_info(&self) {
-        if let Some(memory_range) = &self.info.memory_range {
-            info!(
-                "{:<30}: 0x{:x} - 0x{:x}",
-                "Memory range", memory_range.start, memory_range.end
-            );
-        } else {
-            warn!("{:<30}: Not Available", "Memory range");
-        }
-    }
-
     #[inline]
     fn print_hsm_info(&self) {
         info!(
             "{:<30}: {}",
-            "Platform HSM Device",
+            "Platform HSM Extension",
             if self.have_hsm() {
                 "Available"
             } else {
@@ -379,7 +430,7 @@ impl Platform {
     fn print_rfence_info(&self) {
         info!(
             "{:<30}: {}",
-            "Platform RFence Device",
+            "Platform RFence Extension",
             if self.have_rfence() {
                 "Available"
             } else {
@@ -388,6 +439,31 @@ impl Platform {
         );
     }
 
+    #[inline]
+    fn print_pmu_info(&self) {
+        info!(
+            "{:<30}: {}",
+            "Platform PMU Extension",
+            if self.have_pmu() {
+                "Available"
+            } else {
+                "Not Available"
+            }
+        );
+    }
+
+    #[inline]
+    fn print_memory_info(&self) {
+        if let Some(memory_range) = &self.info.memory_range {
+            info!(
+                "{:<30}: 0x{:x} - 0x{:x}",
+                "Memory range", memory_range.start, memory_range.end
+            );
+        } else {
+            warn!("{:<30}: Not Available", "Memory range");
+        }
+    }
+
     #[inline]
     fn print_additional_info(&self) {
         if !self.ready.load(Ordering::Acquire) {
@@ -426,6 +502,10 @@ impl Platform {
         self.sbi.rfence.is_some()
     }
 
+    pub fn have_pmu(&self) -> bool {
+        self.sbi.pmu.is_some()
+    }
+
     pub fn ready(&self) -> bool {
         self.ready.load(Ordering::Acquire)
     }

+ 59 - 8
prototyper/prototyper/src/riscv/csr.rs

@@ -1,13 +1,46 @@
 #![allow(unused)]
 
-/// CSR addresses for timer registers.
-///
-/// Time value (lower 32 bits).
-pub const CSR_TIME: u32 = 0xc01;
-/// Time value (upper 32 bits).
-pub const CSR_TIMEH: u32 = 0xc81;
-/// Supervisor timer compare value.
-pub const CSR_STIMECMP: u32 = 0x14D;
+use pastey::paste;
+use seq_macro::seq;
+
+/// CSR addresses
+pub const CSR_STIMECMP: u16 = 0x14D;
+pub const CSR_MCOUNTEREN: u16 = 0x306;
+pub const CSR_MENVCFG: u16 = 0x30a;
+pub const CSR_MCYCLE: u16 = 0xb00;
+pub const CSR_MINSTRET: u16 = 0xb02;
+seq!(N in 3..32 {
+    pub const CSR_MHPMCOUNTER~N: u16 = 0xb00 + N;
+});
+pub const CSR_MCYCLEH: u16 = 0xb80;
+pub const CSR_MINSTRETH: u16 = 0xb82;
+seq!(N in 3..32 {
+    paste! {
+        pub const [<CSR_MHPMCOUNTER ~N H>]: u16 = 0xb80 + N;
+    }
+});
+/* User Counters/Timers */
+pub const CSR_CYCLE: u16 = 0xc00;
+pub const CSR_TIME: u16 = 0xc01;
+pub const CSR_INSTRET: u16 = 0xc02;
+seq!(N in 3..32 {
+    pub const CSR_HPMCOUNTER~N: u16 = 0xc00 + N;
+});
+/// MHPMEVENT
+pub const CSR_MCOUNTINHIBIT: u16 = 0x320;
+pub const CSR_MCYCLECFG: u16 = 0x321;
+pub const CSR_MINSTRETCFG: u16 = 0x322;
+seq!(N in 3..32 {
+    pub const CSR_MHPMEVENT~N: u16 = 0x320 + N;
+});
+
+// For RV32
+pub const CSR_CYCLEH: u16 = 0xc80;
+pub const CSR_TIMEH: u16 = 0xc81;
+pub const CSR_INSTRETH: u16 = 0xc82;
+seq!(N in 3..32 {
+    paste!{ pub const [<CSR_HPMCOUNTER ~N H>]: u16 = 0xc80 + N; }
+});
 
 /// Machine environment configuration register (menvcfg) bit fields.
 pub mod menvcfg {
@@ -61,3 +94,21 @@ pub mod stimecmp {
         }
     }
 }
+
+pub mod mcycle {
+    use core::arch::asm;
+    pub fn write(value: u64) {
+        unsafe {
+            asm!("csrrw zero, mcycle, {}", in(reg) value, options(nomem));
+        }
+    }
+}
+
+pub mod minstret {
+    use core::arch::asm;
+    pub fn write(value: u64) {
+        unsafe {
+            asm!("csrrw zero, minstret, {}", in(reg) value, options(nomem));
+        }
+    }
+}

+ 100 - 1
prototyper/prototyper/src/sbi/early_trap.rs

@@ -1,4 +1,6 @@
+use core::arch::asm;
 use core::arch::naked_asm;
+use riscv::register::mtvec;
 
 /// When you expected some insts will cause trap, use this.
 /// If trap happened, a0 will set to 1, otherwise will be 0.
@@ -7,7 +9,7 @@ use core::arch::naked_asm;
 // TODO: Support save trap info.
 #[naked]
 #[repr(align(16))]
-pub(crate) unsafe extern "C" fn expected_trap() {
+pub(crate) unsafe extern "C" fn light_expected_trap() {
     unsafe {
         naked_asm!(
             "add a0, zero, zero",
@@ -20,3 +22,100 @@ pub(crate) unsafe extern "C" fn expected_trap() {
         )
     }
 }
+
+#[repr(C)]
+pub struct TrapInfo {
+    pub mepc: usize,
+    pub mcause: usize,
+    pub mtval: usize,
+}
+
+impl Default for TrapInfo {
+    fn default() -> Self {
+        Self {
+            mepc: 0,
+            mcause: 0,
+            mtval: 0,
+        }
+    }
+}
+
+#[naked]
+#[repr(align(16))]
+pub(crate) unsafe extern "C" fn expected_trap() {
+    unsafe {
+        naked_asm!(
+            "csrr a4, mepc",
+            "sd a4, 0*8(a3)",
+            "csrr a4, mcause",
+            "sd a4, 1*8(a3)",
+            "csrr a4, mtval",
+            "sd a4, 2*8(a3)",
+            "csrr a4, mepc",
+            "addi a4, a4, 4",
+            "csrw mepc, a4",
+            "mret",
+        )
+    }
+}
+
+pub(crate) unsafe fn csr_read_allow<const CSR_NUM: u16>(trap_info: *mut TrapInfo) -> usize {
+    let tinfo = trap_info as usize;
+    let mut ret: usize;
+    // Backup old mtvec
+    let mtvec = mtvec::read().bits();
+    unsafe {
+        core::ptr::write_volatile(&mut (*trap_info).mcause, usize::MAX);
+        // Write expected_trap
+        mtvec::write(expected_trap as _, mtvec::TrapMode::Direct);
+
+        asm!(
+            "add a3, {tinfo}, zero",
+            "add a4, {tinfo}, zero",
+            "csrr {ret}, {csr}",
+            tinfo = in(reg) tinfo,
+            ret = out(reg) ret,
+            csr = const CSR_NUM,
+            options(nostack, preserves_flags)
+        );
+        asm!("csrw mtvec, {}", in(reg) mtvec);
+    }
+    ret
+}
+
+pub(crate) unsafe fn csr_write_allow<const CSR_NUM: u16>(trap_info: *mut TrapInfo, value: usize) {
+    let tinfo = trap_info as usize;
+    // Backup old mtvec
+    let mtvec = mtvec::read().bits();
+    unsafe {
+        core::ptr::write_volatile(&mut (*trap_info).mcause, usize::MAX);
+        // Write expected_trap
+        mtvec::write(expected_trap as _, mtvec::TrapMode::Direct);
+
+        asm!(
+            "add a3, {tinfo}, zero",
+            "add a4, {tinfo}, zero",
+            "csrw {csr}, {value}",
+            tinfo = in(reg) tinfo,
+            csr = const CSR_NUM,
+            value = in(reg) value,
+            options(nostack, preserves_flags)
+        );
+        asm!("csrw mtvec, {}", in(reg) mtvec);
+    }
+}
+
+pub(crate) unsafe fn csr_swap<const CSR_NUM: u16>(val: usize) -> usize {
+    let ret: usize;
+
+    unsafe {
+        asm!(
+            "csrrw {ret}, {csr}, {val}",
+            csr = const CSR_NUM,
+            val = in(reg) val,
+            ret = out(reg) ret,
+            options(nostack, preserves_flags)
+        );
+    }
+    ret
+}

+ 63 - 41
prototyper/prototyper/src/sbi/extensions.rs → prototyper/prototyper/src/sbi/features.rs

@@ -1,11 +1,18 @@
+use seq_macro::seq;
 use serde_device_tree::buildin::NodeSeq;
 
+use crate::riscv::csr::*;
 use crate::riscv::current_hartid;
-use crate::sbi::trap_stack::ROOT_STACK;
+use crate::sbi::early_trap::{TrapInfo, csr_read_allow, csr_write_allow};
+use crate::sbi::trap_stack::{hart_context, hart_context_mut};
+
+use super::early_trap::csr_swap;
 
 pub struct HartFeatures {
     extension: [bool; Extension::COUNT],
     privileged_version: PrivilegedVersion,
+    mhpm_mask: u32,
+    mhpm_bits: u32,
 }
 
 #[derive(Copy, Clone)]
@@ -37,26 +44,22 @@ impl Extension {
     }
 }
 
+/// access hart feature
 pub fn hart_extension_probe(hart_id: usize, ext: Extension) -> bool {
-    unsafe {
-        ROOT_STACK
-            .get_mut(hart_id)
-            .map(|x| x.hart_context().features.extension[ext.index()])
-            .unwrap()
-    }
+    hart_context(hart_id).features.extension[ext.index()]
 }
 
 pub fn hart_privileged_version(hart_id: usize) -> PrivilegedVersion {
-    unsafe {
-        ROOT_STACK
-            .get_mut(hart_id)
-            .map(|x| x.hart_context().features.privileged_version)
-            .unwrap()
-    }
+    hart_context(hart_id).features.privileged_version
 }
 
+pub fn hart_mhpm_mask(hart_id: usize) -> u32 {
+    hart_context(hart_id).features.mhpm_mask
+}
+
+/// Hart features detection
 #[cfg(not(feature = "nemu"))]
-pub fn init(cpus: &NodeSeq) {
+pub fn extension_detection(cpus: &NodeSeq) {
     use crate::devicetree::Cpu;
     for cpu_iter in cpus.iter() {
         let cpu = cpu_iter.deserialize::<Cpu>();
@@ -74,23 +77,13 @@ pub fn init(cpus: &NodeSeq) {
                 hart_exts[ext.index()] = isa.contains(ext.as_str());
             })
         }
-
-        unsafe {
-            ROOT_STACK
-                .get_mut(hart_id)
-                .map(|stack| stack.hart_context().features.extension = hart_exts)
-                .unwrap()
-        }
+        hart_context_mut(hart_id).features.extension = hart_exts;
     }
 }
 
-pub fn privileged_version_detection() {
+fn privileged_version_detection() {
     let mut current_priv_ver = PrivilegedVersion::Unknown;
     {
-        const CSR_MCOUNTEREN: u64 = 0x306;
-        const CSR_MCOUNTINHIBIT: u64 = 0x320;
-        const CSR_MENVCFG: u64 = 0x30a;
-
         if has_csr!(CSR_MCOUNTEREN) {
             current_priv_ver = PrivilegedVersion::Version1_10;
             if has_csr!(CSR_MCOUNTINHIBIT) {
@@ -101,12 +94,48 @@ pub fn privileged_version_detection() {
             }
         }
     }
-    unsafe {
-        ROOT_STACK
-            .get_mut(current_hartid())
-            .map(|stack| stack.hart_context().features.privileged_version = current_priv_ver)
-            .unwrap()
+    hart_context_mut(current_hartid())
+        .features
+        .privileged_version = current_priv_ver;
+}
+
+fn mhpm_detection() {
+    // The standard specifies that mcycle,minstret,mtime must be implemented
+    let mut current_mhpm_mask: u32 = 0b111;
+    let mut trap_info: TrapInfo = TrapInfo::default();
+
+    fn check_mhpm_csr<const CSR_NUM: u16>(trap_info: *mut TrapInfo, mhpm_mask: &mut u32) {
+        unsafe {
+            let old_value = csr_read_allow::<CSR_NUM>(trap_info);
+            if (*trap_info).mcause == usize::MAX {
+                csr_write_allow::<CSR_NUM>(trap_info, 1);
+                if (*trap_info).mcause == usize::MAX && csr_swap::<CSR_NUM>(old_value) == 1 {
+                    (*mhpm_mask) |= 1 << (CSR_NUM - CSR_MCYCLE);
+                }
+            }
+        }
+    }
+
+    macro_rules! m_check_mhpm_csr {
+        ($csr_num:expr, $trap_info:expr, $value:expr) => {
+            check_mhpm_csr::<$csr_num>($trap_info, $value)
+        };
     }
+
+    // CSR_MHPMCOUNTER3:   0xb03
+    // CSR_MHPMCOUNTER31:  0xb1f
+    seq!(csr_num in 0xb03..=0xb1f{
+        m_check_mhpm_csr!(csr_num, &mut trap_info, &mut current_mhpm_mask);
+    });
+
+    hart_context_mut(current_hartid()).features.mhpm_mask = current_mhpm_mask;
+    // TODO: at present, rustsbi prptotyper only supports 64bit.
+    hart_context_mut(current_hartid()).features.mhpm_bits = 64;
+}
+
+pub fn hart_features_detection() {
+    privileged_version_detection();
+    mhpm_detection();
 }
 
 #[cfg(feature = "nemu")]
@@ -114,16 +143,9 @@ pub fn init(cpus: &NodeSeq) {
     for hart_id in 0..cpus.len() {
         let mut hart_exts = [false; Extension::COUNT];
         hart_exts[Extension::Sstc.index()] = true;
-        unsafe {
-            ROOT_STACK
-                .get_mut(hart_id)
-                .map(|stack| {
-                    stack.hart_context().features = HartFeatures {
-                        extension: hart_exts,
-                        privileged_version: PrivilegedVersion::Version1_12,
-                    }
-                })
-                .unwrap()
+        hart_context(hart_id).features = HartFeatures {
+            extension: hart_exts,
+            privileged_version: PrivilegedVersion::Version1_12,
         }
     }
 }

+ 6 - 1
prototyper/prototyper/src/sbi/hart_context.rs

@@ -1,4 +1,4 @@
-use crate::sbi::extensions::HartFeatures;
+use crate::sbi::features::HartFeatures;
 use crate::sbi::hsm::HsmCell;
 use crate::sbi::rfence::RFenceCell;
 use core::ptr::NonNull;
@@ -6,6 +6,8 @@ use core::sync::atomic::AtomicU8;
 use fast_trap::FlowContext;
 use riscv::register::mstatus;
 
+use super::pmu::PmuState;
+
 /// Context for managing hart (hardware thread) state and operations.
 pub(crate) struct HartContext {
     /// Trap context for handling exceptions and interrupts.
@@ -18,6 +20,8 @@ pub(crate) struct HartContext {
     pub ipi_type: AtomicU8,
     /// Supported hart features.
     pub features: HartFeatures,
+    /// PMU State
+    pub pmu_state: PmuState,
 }
 
 impl HartContext {
@@ -26,6 +30,7 @@ impl HartContext {
     pub fn init(&mut self) {
         self.hsm = HsmCell::new();
         self.rfence = RFenceCell::new();
+        self.pmu_state = PmuState::new();
     }
 
     /// Get a non-null pointer to the trap context.

+ 4 - 14
prototyper/prototyper/src/sbi/hsm.rs

@@ -11,6 +11,8 @@ use crate::riscv::current_hartid;
 use crate::sbi::hart_context::NextStage;
 use crate::sbi::trap_stack::ROOT_STACK;
 
+use super::trap_stack::hart_context;
+
 /// Special state indicating a hart is in the process of starting.
 const HART_STATE_START_PENDING_EXT: usize = usize::MAX;
 
@@ -152,24 +154,12 @@ impl<T: core::fmt::Debug> RemoteHsmCell<'_, T> {
 
 /// Gets the local HSM cell for the current hart.
 pub(crate) fn local_hsm() -> LocalHsmCell<'static, NextStage> {
-    unsafe {
-        ROOT_STACK
-            .get_unchecked_mut(current_hartid())
-            .hart_context()
-            .hsm
-            .local()
-    }
+    unsafe { hart_context(current_hartid()).hsm.local() }
 }
 
 /// Gets a remote view of the current hart's HSM cell.
 pub(crate) fn local_remote_hsm() -> RemoteHsmCell<'static, NextStage> {
-    unsafe {
-        ROOT_STACK
-            .get_unchecked_mut(current_hartid())
-            .hart_context()
-            .hsm
-            .remote()
-    }
+    hart_context(current_hartid()).hsm.remote()
 }
 
 /// Gets a remote view of any hart's HSM cell.

+ 8 - 16
prototyper/prototyper/src/sbi/ipi.rs

@@ -1,13 +1,15 @@
+use super::pmu::pmu_firmware_counter_increment;
 use crate::platform::PLATFORM;
 use crate::riscv::csr::stimecmp;
 use crate::riscv::current_hartid;
-use crate::sbi::extensions::{Extension, hart_extension_probe};
+use crate::sbi::features::{Extension, hart_extension_probe};
 use crate::sbi::hsm::remote_hsm;
 use crate::sbi::rfence;
-use crate::sbi::trap_stack::ROOT_STACK;
+use crate::sbi::trap_stack::hart_context;
 use alloc::boxed::Box;
 use core::sync::atomic::Ordering::Relaxed;
 use rustsbi::{HartMask, SbiRet};
+use sbi_spec::pmu::firmware_event;
 use spin::Mutex;
 
 /// IPI type for supervisor software interrupt.
@@ -46,6 +48,7 @@ impl rustsbi::Timer for SbiIpi {
     /// Set timer value for current hart.
     #[inline]
     fn set_timer(&self, stime_value: u64) {
+        pmu_firmware_counter_increment(firmware_event::SET_TIMER);
         let hart_id = current_hartid();
         let uses_sstc = hart_extension_probe(hart_id, Extension::Sstc);
 
@@ -69,6 +72,7 @@ impl rustsbi::Ipi for SbiIpi {
     /// Send IPI to specified harts.
     #[inline]
     fn send_ipi(&self, hart_mask: rustsbi::HartMask) -> SbiRet {
+        pmu_firmware_counter_increment(firmware_event::IPI_SENT);
         let mut hart_mask = hart_mask;
 
         for hart_id in 0..=self.max_hart_id {
@@ -230,24 +234,12 @@ impl SbiIpi {
 
 /// Set IPI type for specified hart.
 pub fn set_ipi_type(hart_id: usize, event_id: u8) -> u8 {
-    unsafe {
-        ROOT_STACK
-            .get_unchecked_mut(hart_id)
-            .hart_context()
-            .ipi_type
-            .fetch_or(event_id, Relaxed)
-    }
+    hart_context(hart_id).ipi_type.fetch_or(event_id, Relaxed)
 }
 
 /// Get and reset IPI type for current hart.
 pub fn get_and_reset_ipi_type() -> u8 {
-    unsafe {
-        ROOT_STACK
-            .get_unchecked_mut(current_hartid())
-            .hart_context()
-            .ipi_type
-            .swap(0, Relaxed)
-    }
+    hart_context(current_hartid()).ipi_type.swap(0, Relaxed)
 }
 
 /// Clear machine software interrupt pending for current hart.

+ 6 - 1
prototyper/prototyper/src/sbi/mod.rs

@@ -3,11 +3,12 @@ use rustsbi::RustSBI;
 pub mod console;
 pub mod hsm;
 pub mod ipi;
+pub mod pmu;
 pub mod reset;
 pub mod rfence;
 
 pub mod early_trap;
-pub mod extensions;
+pub mod features;
 pub mod fifo;
 pub mod hart_context;
 pub mod heap;
@@ -18,6 +19,7 @@ pub mod trap_stack;
 use console::SbiConsole;
 use hsm::SbiHsm;
 use ipi::SbiIpi;
+use pmu::SbiPmu;
 use reset::SbiReset;
 use rfence::SbiRFence;
 
@@ -35,6 +37,8 @@ pub struct SBI {
     pub reset: Option<SbiReset>,
     #[rustsbi(fence)]
     pub rfence: Option<SbiRFence>,
+    #[rustsbi(pmu)]
+    pub pmu: Option<SbiPmu>,
 }
 
 impl SBI {
@@ -45,6 +49,7 @@ impl SBI {
             hsm: None,
             reset: None,
             rfence: None,
+            pmu: None,
         }
     }
 }

+ 1112 - 0
prototyper/prototyper/src/sbi/pmu.rs

@@ -0,0 +1,1112 @@
+use alloc::collections::BTreeMap;
+use alloc::vec::Vec;
+use riscv::register::*;
+use rustsbi::{Pmu, SbiRet};
+use sbi_spec::binary::SharedPtr;
+use sbi_spec::pmu::shmem_size::SIZE;
+use sbi_spec::pmu::*;
+
+use crate::riscv::csr::*;
+use crate::{riscv::current_hartid, sbi::features::hart_mhpm_mask};
+
+use super::features::{PrivilegedVersion, hart_privileged_version};
+use super::trap_stack::{hart_context, hart_context_mut};
+
+/// Maximum number of hardware performance counters supported.
+const PMU_HARDWARE_COUNTER_MAX: usize = 32;
+/// Maximum number of firmware-managed counters supported.
+const PMU_FIRMWARE_COUNTER_MAX: usize = 16;
+/// Marker value for inactive/invalid event indices.
+const PMU_EVENT_IDX_INVALID: usize = usize::MAX;
+
+/// PMU state tracking hardware and firmware performance counters
+#[repr(C)]
+pub struct PmuState {
+    active_event: [usize; PMU_HARDWARE_COUNTER_MAX + PMU_FIRMWARE_COUNTER_MAX],
+    /// Bitmap of active firmware counters (1 bit per counter)
+    fw_counter_state: usize,
+    /// Values for firmware-managed counters
+    fw_counter: [u64; PMU_FIRMWARE_COUNTER_MAX],
+    hw_counters_num: usize,
+    total_counters_num: usize,
+}
+
+impl PmuState {
+    /// Creates a new PMU state with default configuration.
+    pub fn new() -> Self {
+        let mhpm_mask = hart_mhpm_mask(current_hartid());
+        let hw_counters_num = mhpm_mask.count_ones() as usize;
+        let total_counters_num = hw_counters_num + PMU_FIRMWARE_COUNTER_MAX;
+
+        let mut active_event =
+            [PMU_EVENT_IDX_INVALID; PMU_HARDWARE_COUNTER_MAX + PMU_FIRMWARE_COUNTER_MAX];
+        // Standard mappings for fixed counters
+        active_event[1] = 0x0; // time (memory-mapped)
+
+        Self {
+            active_event,
+            fw_counter_state: 0,
+            fw_counter: [0; PMU_FIRMWARE_COUNTER_MAX],
+            hw_counters_num,
+            total_counters_num,
+        }
+    }
+
+    /// Returns the number of hardware counters available.
+    #[inline(always)]
+    pub fn get_hw_counter_num(&self) -> usize {
+        self.hw_counters_num
+    }
+
+    /// Returns the total number of counters (hardware + firmware).
+    #[inline(always)]
+    pub fn get_total_counters_num(&self) -> usize {
+        self.total_counters_num
+    }
+
+    /// Gets the event index associated with a counter.
+    #[inline]
+    pub fn get_event_idx(&self, counter_idx: usize, firmware_event: bool) -> Option<EventIdx> {
+        if counter_idx >= self.total_counters_num {
+            return None;
+        }
+        if firmware_event && counter_idx < self.hw_counters_num {
+            return None;
+        }
+
+        Some(EventIdx::new(self.active_event[counter_idx]))
+    }
+
+    /// Gets the value of a firmware counter.
+    #[inline]
+    pub fn get_fw_counter(&self, counter_idx: usize) -> Option<u64> {
+        if counter_idx < self.hw_counters_num || counter_idx >= self.total_counters_num {
+            return None;
+        }
+        let fw_idx = counter_idx - self.hw_counters_num;
+        // Safety: fw_idx is guaranteed to be within bounds (0..FIRMWARE_COUNTER_MAX)
+        unsafe { Some(*self.fw_counter.get_unchecked(fw_idx)) }
+    }
+
+    /// start a firmware counter with a optional new value.
+    #[inline]
+    fn start_fw_counter(
+        &mut self,
+        counter_idx: usize,
+        initial_value: u64,
+        is_update_value: bool,
+    ) -> Result<(), StartCounterErr> {
+        if counter_idx < self.hw_counters_num || counter_idx >= self.total_counters_num {
+            return Err(StartCounterErr::OffsetInvalid);
+        }
+        let fw_idx = counter_idx - self.hw_counters_num;
+
+        if self.fw_counter_state & (1 << fw_idx) != 0 {
+            return Err(StartCounterErr::AlreadyStart);
+        }
+
+        if is_update_value {
+            self.fw_counter[fw_idx] = initial_value;
+        }
+        self.fw_counter_state |= 1 << fw_idx; // Mark as active
+        Ok(())
+    }
+
+    /// stop a firmware counter
+    #[inline]
+    fn stop_fw_counter(
+        &mut self,
+        counter_idx: usize,
+        is_reset: bool,
+    ) -> Result<(), StopCounterErr> {
+        if counter_idx < self.hw_counters_num || counter_idx >= self.total_counters_num {
+            return Err(StopCounterErr::OffsetInvalid);
+        }
+        let fw_idx = counter_idx - self.hw_counters_num;
+
+        if self.fw_counter_state & (1 << fw_idx) == 0 {
+            return Err(StopCounterErr::AlreadyStop);
+        }
+
+        if is_reset {
+            self.active_event[counter_idx] = PMU_EVENT_IDX_INVALID;
+        }
+        self.fw_counter_state &= !(1 << fw_idx); // Mark as stop
+        Ok(())
+    }
+
+    #[inline]
+    pub fn is_firmware_event_start(&self, counter_idx: usize) -> bool {
+        if counter_idx < self.hw_counters_num || counter_idx >= self.total_counters_num {
+            return false;
+        }
+        let fw_idx = counter_idx - self.hw_counters_num;
+        self.fw_counter_state & (1 << fw_idx) != 0
+    }
+}
+
+pub struct SbiPmu {
+    event_to_mhpmevent: Option<BTreeMap<u32, u64>>,
+    event_to_mhpmcounter: Option<Vec<EventToCounterMap>>,
+    raw_event_to_mhpmcounter: Option<Vec<RawEventToCounterMap>>,
+}
+
+impl Pmu for SbiPmu {
+    /// Returns the total number of available performance counters
+    ///
+    /// Implements SBI PMU extension function (FID #0)
+    #[inline]
+    fn num_counters(&self) -> usize {
+        hart_context(current_hartid())
+            .pmu_state
+            .get_total_counters_num()
+    }
+
+    /// DONE:
+    /// Function: Get details of a counter (FID #1)
+    #[inline]
+    fn counter_get_info(&self, counter_idx: usize) -> SbiRet {
+        if counter_idx >= self.num_counters() {
+            return SbiRet::invalid_param();
+        }
+
+        let pmu_state = &hart_context(current_hartid()).pmu_state;
+        if counter_idx < pmu_state.get_hw_counter_num() {
+            let mask = hart_mhpm_mask(current_hartid());
+
+            // Find the corresponding hardware counter using bit manipulation
+            // This is more efficient than iterating through all possible offsets
+            let mut remaining_mask = mask;
+            let mut count = 0;
+
+            while remaining_mask != 0 {
+                if count == counter_idx {
+                    // Found the counter - get its CSR offset
+                    let offset = remaining_mask.trailing_zeros() as u16;
+                    return SbiRet::success(
+                        CounterInfo::with_hardware_info(CSR_CYCLE + offset, 63).inner(),
+                    );
+                }
+                remaining_mask &= remaining_mask - 1;
+                count += 1;
+            }
+            return SbiRet::invalid_param();
+        }
+
+        SbiRet::success(CounterInfo::with_firmware_info().inner())
+    }
+
+    /// Find and configure a matching counter (FID #2)
+    #[inline]
+    fn counter_config_matching(
+        &self,
+        counter_idx_base: usize,
+        counter_idx_mask: usize,
+        config_flags: usize,
+        event_idx: usize,
+        event_data: u64,
+    ) -> SbiRet {
+        let flags = match flags::CounterCfgFlags::from_bits(config_flags) {
+            Some(flags) => flags,
+            None => return SbiRet::invalid_param(), // Reserved bits are set
+        };
+
+        let event = EventIdx::new(event_idx);
+        let pmu_state = &mut hart_context_mut(current_hartid()).pmu_state;
+        let is_firmware_event = event.is_firmware_event();
+
+        if counter_idx_base >= pmu_state.total_counters_num
+            || (counter_idx_mask & ((1 << pmu_state.total_counters_num) - 1)) == 0
+            || !event.check_event_type()
+            || (is_firmware_event && !event.firmware_event_valid())
+        {
+            return SbiRet::invalid_param();
+        }
+
+        let skip_match = flags.contains(flags::CounterCfgFlags::SKIP_MATCH);
+
+        let counter_idx;
+
+        if skip_match {
+            // If SKIP_MATCH is set, use the first counter in the mask without searching
+            if let Some(ctr_idx) = CounterMask::new(counter_idx_base, counter_idx_mask).next() {
+                if pmu_state.active_event[ctr_idx] == PMU_EVENT_IDX_INVALID {
+                    return SbiRet::invalid_param();
+                }
+                counter_idx = ctr_idx;
+            } else {
+                return SbiRet::invalid_param();
+            }
+        } else {
+            let match_result: Result<usize, SbiRet>;
+            if event.is_firmware_event() {
+                match_result = self.find_firmware_counter(
+                    counter_idx_base,
+                    counter_idx_mask,
+                    event_idx,
+                    pmu_state,
+                );
+            } else {
+                match_result = self.find_hardware_counter(
+                    counter_idx_base,
+                    counter_idx_mask,
+                    event_idx,
+                    event_data,
+                    pmu_state,
+                );
+            }
+            match match_result {
+                Ok(ctr_idx) => {
+                    counter_idx = ctr_idx;
+                }
+                Err(err) => {
+                    return err;
+                }
+            }
+            pmu_state.active_event[counter_idx] = event_idx;
+        }
+
+        if configure_counter(pmu_state, counter_idx, event, flags) {
+            return SbiRet::success(counter_idx);
+        }
+
+        return SbiRet::not_supported();
+    }
+
+    /// Start one or more counters (FID #3)
+    /// Note: The next two functions contain redundant logic and should be refactored.
+    #[inline]
+    fn counter_start(
+        &self,
+        counter_idx_base: usize,
+        counter_idx_mask: usize,
+        start_flags: usize,
+        initial_value: u64,
+    ) -> SbiRet {
+        let flags = match flags::CounterStartFlags::from_bits(start_flags) {
+            Some(flags) => flags,
+            None => return SbiRet::invalid_param(),
+        };
+
+        let pmu_state = &mut hart_context_mut(current_hartid()).pmu_state;
+        let is_update_value = flags.contains(flags::CounterStartFlags::INIT_VALUE);
+
+        if counter_idx_base >= pmu_state.total_counters_num
+            || (counter_idx_mask & ((1 << pmu_state.total_counters_num) - 1)) == 0
+        {
+            return SbiRet::invalid_param();
+        }
+
+        if flags.contains(flags::CounterStartFlags::INIT_SNAPSHOT) {
+            return SbiRet::no_shmem();
+        }
+
+        for counter_idx in CounterMask::new(counter_idx_base, counter_idx_mask) {
+            if counter_idx >= pmu_state.total_counters_num {
+                return SbiRet::invalid_param();
+            }
+
+            let start_result = if counter_idx >= pmu_state.get_hw_counter_num() {
+                pmu_state.start_fw_counter(counter_idx, initial_value, is_update_value)
+            } else {
+                let mhpm_offset = get_mhpm_csr_offset(counter_idx).unwrap();
+                start_hardware_counter(mhpm_offset, initial_value, is_update_value)
+            };
+            match start_result {
+                Ok(_) => {}
+                Err(StartCounterErr::AlreadyStart) => {
+                    return SbiRet::already_started();
+                }
+                Err(StartCounterErr::OffsetInvalid) => {
+                    return SbiRet::invalid_param();
+                }
+            }
+        }
+        SbiRet::success(0)
+    }
+
+    /// Stop one or more counters (FID #4)
+    #[inline]
+    fn counter_stop(
+        &self,
+        counter_idx_base: usize,
+        counter_idx_mask: usize,
+        stop_flags: usize,
+    ) -> SbiRet {
+        let flags = match flags::CounterStopFlags::from_bits(stop_flags) {
+            Some(flags) => flags,
+            None => return SbiRet::invalid_param(),
+        };
+
+        let pmu_state = &mut hart_context_mut(current_hartid()).pmu_state;
+        let is_reset = flags.contains(flags::CounterStopFlags::RESET);
+
+        if counter_idx_base >= pmu_state.total_counters_num
+            || (counter_idx_mask & ((1 << pmu_state.total_counters_num) - 1)) == 0
+        {
+            return SbiRet::invalid_param();
+        }
+
+        if flags.contains(flags::CounterStopFlags::TAKE_SNAPSHOT) {
+            return SbiRet::no_shmem();
+        }
+
+        for counter_idx in CounterMask::new(counter_idx_base, counter_idx_mask) {
+            if counter_idx >= pmu_state.total_counters_num {
+                return SbiRet::invalid_param();
+            }
+
+            let stop_result = if counter_idx >= pmu_state.get_hw_counter_num() {
+                pmu_state.stop_fw_counter(counter_idx, is_reset)
+            } else {
+                // If RESET flag is set, mark the counter as inactive
+                if is_reset {
+                    pmu_state.active_event[counter_idx] = PMU_EVENT_IDX_INVALID;
+                }
+                let mhpm_offset = get_mhpm_csr_offset(counter_idx).unwrap();
+                stop_hardware_counter(mhpm_offset, is_reset)
+            };
+            match stop_result {
+                Ok(_) => {}
+                Err(StopCounterErr::OffsetInvalid) => return SbiRet::invalid_param(),
+                Err(StopCounterErr::AlreadyStop) => return SbiRet::already_stopped(),
+            }
+        }
+        SbiRet::success(0)
+    }
+
+    /// Reads a firmware counter value
+    /// Function: Read a firmware counter (FID #5).
+    #[inline]
+    fn counter_fw_read(&self, counter_idx: usize) -> SbiRet {
+        let pmu_state = &hart_context(current_hartid()).pmu_state;
+        match pmu_state.get_event_idx(counter_idx, true) {
+            Some(event_id) if event_id.firmware_event_valid() => {
+                if event_id.event_code() == firmware_event::PLATFORM {
+                    // TODO: Handle platform-specific PMU events
+                    return SbiRet::invalid_param();
+                }
+                match pmu_state.get_fw_counter(counter_idx) {
+                    Some(value) => SbiRet::success(value as usize),
+                    None => SbiRet::invalid_param(),
+                }
+            }
+            _ => SbiRet::invalid_param(),
+        }
+    }
+
+    /// Function: Read a firmware counter high bits (FID #6).
+    #[inline]
+    fn counter_fw_read_hi(&self, _counter_idx: usize) -> SbiRet {
+        // The Specification states the this function always return zero in sbiret.value for RV64 (or higher) systems.
+        // Currently RustSBI Prototyper only supports RV64 systems
+        SbiRet::success(0)
+    }
+
+    /// Function: Set PMU snapshot shared memory (FID #7).
+    #[inline]
+    fn snapshot_set_shmem(&self, shmem: SharedPtr<[u8; SIZE]>, flags: usize) -> SbiRet {
+        // Optional function, `not_supported` is returned if not implemented.
+        let _ = (shmem, flags);
+        SbiRet::not_supported()
+    }
+}
+
+impl Default for SbiPmu {
+    fn default() -> Self {
+        Self {
+            event_to_mhpmevent: None,
+            event_to_mhpmcounter: None,
+            raw_event_to_mhpmcounter: None,
+        }
+    }
+}
+
+impl SbiPmu {
+    fn find_firmware_counter(
+        &self,
+        counter_idx_base: usize,
+        counter_idx_mask: usize,
+        event_idx: usize,
+        pmu_state: &PmuState,
+    ) -> Result<usize, SbiRet> {
+        // TODO: support `PLATFORM` event
+        let event = EventIdx::new(event_idx);
+        if !event.firmware_event_valid() {
+            return Err(SbiRet::not_supported());
+        }
+
+        //  TODO: If all firmware events are implemented,
+        // this condition should be removed.
+        if event.event_code() <= 21 {
+            if !PMU_FIRMWARE_EVENT_SUPPORTED[event.event_code()] {
+                return Err(SbiRet::not_supported());
+            }
+        }
+
+        for counter_idx in CounterMask::new(counter_idx_base, counter_idx_mask) {
+            // If counter idx is not a firmware counter index, skip this index
+            if counter_idx < pmu_state.get_hw_counter_num()
+                || counter_idx >= pmu_state.get_total_counters_num()
+            {
+                continue;
+            }
+            // If the firmware counter at this index is already occupied, skip this index
+            if pmu_state.active_event[counter_idx] != PMU_EVENT_IDX_INVALID {
+                continue;
+            }
+            return Ok(counter_idx);
+        }
+        return Err(SbiRet::not_supported());
+    }
+
+    fn find_hardware_counter(
+        &self,
+        counter_idx_base: usize,
+        counter_idx_mask: usize,
+        event_idx: usize,
+        event_data: u64,
+        pmu_state: &PmuState,
+    ) -> Result<usize, SbiRet> {
+        let event = EventIdx::new(event_idx);
+        let mut hw_counters_mask = 0;
+        // Find the counters available for the event.
+        if event.is_raw_event() {
+            if let Some(ref raw_event_map_vec) = self.raw_event_to_mhpmcounter {
+                for raw_event_map in raw_event_map_vec {
+                    if raw_event_map.have_event(event_data) {
+                        hw_counters_mask = raw_event_map.get_counter_mask();
+                        break;
+                    }
+                }
+            } else {
+                return Err(SbiRet::not_supported());
+            }
+        } else {
+            // event is general event or cache event
+            if let Some(ref sbi_hw_event_map_vec) = self.event_to_mhpmcounter {
+                for sbi_hw_event_map in sbi_hw_event_map_vec {
+                    if sbi_hw_event_map.have_event(event_idx as u32) {
+                        hw_counters_mask = sbi_hw_event_map.get_counter_mask();
+                        break;
+                    }
+                }
+            } else {
+                return Err(SbiRet::not_supported());
+            }
+        }
+        // mcycle, time, minstret cannot be used for other events.
+        let mhpm_mask = hart_mhpm_mask(current_hartid());
+        let can_use_counter_mask = hw_counters_mask & mhpm_mask;
+
+        // Find a counter that meets the conditions from a set of counters
+        for counter_idx in CounterMask::new(counter_idx_base, counter_idx_mask) {
+            if counter_idx >= pmu_state.hw_counters_num {
+                continue;
+            }
+
+            // If the counter idx corresponding to the hardware counter index cannot be used by the event,
+            // or has already been used, skip this counter idx
+            let mhpm_offset = get_mhpm_csr_offset(counter_idx).unwrap();
+            // Find a unused counter
+            if (can_use_counter_mask >> mhpm_offset) & 0x1 == 0
+                || pmu_state.active_event[counter_idx] != PMU_EVENT_IDX_INVALID
+            {
+                continue;
+            }
+            // If mcycle is selected but the event is not SBI_PMU_HW_CPU_CYCLES,
+            // or minstret is selected but the event is not SBI_PMU_HW_INSTRUCTIONS, skip
+            if (mhpm_offset == 0 && event_idx != 1) || (mhpm_offset == 2 && event_idx != 2) {
+                continue;
+            }
+            // If the counter idx corresponding to the hardware counter index has already started counting, skip the counter
+            if hart_privileged_version(current_hartid()) >= PrivilegedVersion::Version1_11 {
+                let inhibit = riscv::register::mcountinhibit::read();
+                if (inhibit.bits() & (1 << mhpm_offset)) == 0 {
+                    continue;
+                }
+            }
+
+            // Found a counter that meets the conditions - write the event value to the corresponding mhpmevent
+            self.pmu_update_hardware_mhpmevent(mhpm_offset, event_idx, event_data)?;
+            return Ok(counter_idx);
+        }
+        Err(SbiRet::not_supported())
+    }
+
+    fn pmu_update_hardware_mhpmevent(
+        &self,
+        mhpm_offset: u16,
+        event_idx: usize,
+        event_data: u64,
+    ) -> Result<(), SbiRet> {
+        // If the event is SBI_PMU_HW_CPU_CYCLES and mcycle is selected,
+        // or the event is SBI_PMU_HW_INSTRUCTIONS and minstret is selected, return directly
+        if (mhpm_offset == 0 && event_idx == 1) || (mhpm_offset == 2 && event_idx == 2) {
+            return Ok(());
+        }
+        // Validate counter offset range (only mhpmcounter3-31 are configurable)
+        if mhpm_offset == 1 || mhpm_offset > 31 {
+            return Err(SbiRet::not_supported());
+        }
+
+        let event = EventIdx::new(event_idx);
+
+        // Determine the value to write to mhpmevent CSR
+        let mhpmevent_val = if event.is_raw_event() {
+            // For raw events, use the provided event_data directly
+            event_data
+        } else if let Some(ref event_to_mhpmevent) = self.event_to_mhpmevent {
+            // For standard events, look up the corresponding mhpmevent value
+            *event_to_mhpmevent
+                .get(&(event_idx as u32))
+                .ok_or(SbiRet::not_supported())?
+        } else if self.event_to_mhpmcounter.is_some() {
+            // Handle QEMU compatibility case:
+            // When only event_to_mhpmcounter is available (like in QEMU),
+            // use the event index directly as the raw event value
+            event_idx as u64
+        } else {
+            // No mapping available for this event
+            return Err(SbiRet::not_supported());
+        };
+
+        write_mhpmevent(mhpm_offset, mhpmevent_val);
+        Ok(())
+    }
+
+    pub fn insert_event_to_mhpmevent(&mut self, event: u32, mhpmevent: u64) {
+        let event_to_mhpmevent_map = self.event_to_mhpmevent.get_or_insert_default();
+
+        //TODO: When https://github.com/rust-lang/rust/issues/82766 is stable, change this to `try_insert`
+        if let Some(mhpmevent_mapped) = event_to_mhpmevent_map.get(&event) {
+            error!(
+                "Try to map event:0x{:08x} to mhpmevent:0x{:016x}, but the event has been mapped to mhpmevent:{}, please check the device tree file",
+                event, mhpmevent, mhpmevent_mapped
+            );
+        } else {
+            event_to_mhpmevent_map.insert(event, mhpmevent);
+        }
+    }
+
+    pub fn insert_event_to_mhpmcounter(&mut self, event_to_counter: EventToCounterMap) {
+        let event_to_mhpmcounter_map = self.event_to_mhpmcounter.get_or_insert_default();
+        for event_to_mhpmcounter in event_to_mhpmcounter_map.iter() {
+            if event_to_mhpmcounter.is_overlop(&event_to_counter) {
+                error!(
+                    "The mapping of event_to_mhpmcounter {:?} and {:?} overlap, please check the device tree file",
+                    event_to_mhpmcounter, event_to_counter
+                );
+                return;
+            }
+        }
+        event_to_mhpmcounter_map.push(event_to_counter);
+    }
+
+    pub fn insert_raw_event_to_mhpmcounter(&mut self, raw_event_to_counter: RawEventToCounterMap) {
+        let raw_event_to_mhpmcounter_map = self.raw_event_to_mhpmcounter.get_or_insert_default();
+        for raw_event_to_mhpmcounter in raw_event_to_mhpmcounter_map.iter() {
+            if raw_event_to_mhpmcounter.is_overlop(&raw_event_to_counter) {
+                error!(
+                    "The mapping of raw_event_to_mhpmcounter {:?} and {:?} overlap, please check the device tree file",
+                    raw_event_to_mhpmcounter, raw_event_to_counter
+                );
+                return;
+            }
+        }
+        raw_event_to_mhpmcounter_map.push(raw_event_to_counter);
+    }
+}
+
+/// Configures a counter to monitor an event based on the given flags.
+///
+/// Returns `true` if configuration succeeds, `false` otherwise.
+#[inline]
+fn configure_counter(
+    pmu_state: &mut PmuState,
+    counter_idx: usize,
+    event: EventIdx,
+    flags: flags::CounterCfgFlags,
+) -> bool {
+    let auto_start = flags.contains(flags::CounterCfgFlags::AUTO_START);
+    let clear_value = flags.contains(flags::CounterCfgFlags::CLEAR_VALUE);
+    if event.is_firmware_event() {
+        let firmware_event_idx = counter_idx - pmu_state.hw_counters_num;
+        if clear_value {
+            pmu_state.fw_counter[firmware_event_idx] = 0;
+        }
+        if auto_start {
+            pmu_state.fw_counter_state |= 1 << firmware_event_idx;
+        }
+    } else {
+        let mhpm_offset = get_mhpm_csr_offset(counter_idx).unwrap();
+        if clear_value {
+            write_mhpmcounter(mhpm_offset, 0);
+        }
+        if auto_start {
+            return start_hardware_counter(mhpm_offset, 0, false).is_ok();
+        }
+    }
+    true
+}
+
+/// Get the offset of the mhpmcounter CSR corresponding to counter_idx relative to mcycle
+fn get_mhpm_csr_offset(counter_idx: usize) -> Option<u16> {
+    let mhpm_mask = hart_mhpm_mask(current_hartid());
+    let mut count = 0;
+    for offset in 0..32 {
+        if (mhpm_mask >> offset) & 1 == 1 {
+            if count == counter_idx {
+                return Some(offset as u16);
+            }
+            count += 1;
+        }
+    }
+    None
+}
+
+/// Start Hardware Counter
+enum StartCounterErr {
+    OffsetInvalid,
+    AlreadyStart,
+}
+
+/// Starts a hardware performance counter specified by the offset.
+fn start_hardware_counter(
+    mhpm_offset: u16,
+    new_value: u64,
+    is_update_value: bool,
+) -> Result<(), StartCounterErr> {
+    if mhpm_offset == 1 || mhpm_offset > 31 {
+        return Err(StartCounterErr::OffsetInvalid);
+    }
+
+    if hart_privileged_version(current_hartid()) < PrivilegedVersion::Version1_11 {
+        if is_update_value {
+            write_mhpmcounter(mhpm_offset, new_value);
+        }
+        return Ok(());
+    }
+
+    // Check if counter is already running by testing the inhibit bit
+    // A zero bit in mcountinhibit means the counter is running
+    if mcountinhibit::read().bits() & (1 << mhpm_offset) == 0 {
+        return Err(StartCounterErr::AlreadyStart);
+    }
+
+    if is_update_value {
+        write_mhpmcounter(mhpm_offset, new_value);
+    }
+
+    unsafe {
+        match mhpm_offset {
+            0 => mcountinhibit::clear_cy(),
+            2 => mcountinhibit::clear_ir(),
+            _ => mcountinhibit::clear_hpm(mhpm_offset as usize),
+        }
+    }
+    Ok(())
+}
+
+/// Stop Hardware Counter
+enum StopCounterErr {
+    OffsetInvalid,
+    AlreadyStop,
+}
+
+/// Stops a hardware performance counter specified by the offset.
+fn stop_hardware_counter(mhpm_offset: u16, is_reset: bool) -> Result<(), StopCounterErr> {
+    if mhpm_offset == 1 || mhpm_offset > 31 {
+        return Err(StopCounterErr::OffsetInvalid);
+    }
+
+    if is_reset && mhpm_offset >= 3 && mhpm_offset <= 31 {
+        write_mhpmevent(mhpm_offset, 0);
+    }
+
+    if hart_privileged_version(current_hartid()) < PrivilegedVersion::Version1_11 {
+        return Ok(());
+    }
+
+    if mcountinhibit::read().bits() & (1 << mhpm_offset) != 0 {
+        return Err(StopCounterErr::AlreadyStop);
+    }
+
+    unsafe {
+        match mhpm_offset {
+            0 => mcountinhibit::set_cy(),
+            2 => mcountinhibit::set_ir(),
+            _ => mcountinhibit::set_hpm(mhpm_offset as usize),
+        }
+    }
+    Ok(())
+}
+
+/// Write MHPMEVENT or MHPMCOUNTER
+fn write_mhpmevent(mhpm_offset: u16, mhpmevent_val: u64) {
+    let csr = CSR_MHPMEVENT3 + mhpm_offset - 3;
+
+    // Handle MHPMEVENT3-31
+    if csr >= CSR_MHPMEVENT3 && csr <= CSR_MHPMEVENT31 {
+        // Convert CSR value to register index (3-31)
+        let idx = csr - CSR_MHPMEVENT3 + 3;
+
+        // Use seq_macro to generate all valid indices from 3 to 31
+        seq_macro::seq!(N in 3..=31 {
+            match idx {
+                #(
+                    N => pastey::paste!{ [<mhpmevent ~N>]::write(mhpmevent_val as usize) },
+                )*
+                _ =>{}
+            }
+        });
+    }
+}
+
+fn write_mhpmcounter(mhpm_offset: u16, mhpmcounter_val: u64) {
+    let counter_idx = mhpm_offset;
+
+    let csr = CSR_MHPMCOUNTER3 + mhpm_offset - 3;
+    // Special cases for cycle and instret
+    if csr == CSR_MCYCLE {
+        crate::riscv::csr::mcycle::write(mhpmcounter_val);
+        return;
+    } else if csr == CSR_MINSTRET {
+        crate::riscv::csr::minstret::write(mhpmcounter_val);
+        return;
+    }
+
+    // Only handle valid counter indices (3-31)
+    if counter_idx >= 3 && counter_idx <= 31 {
+        // Call the macro with all valid indices
+        seq_macro::seq!(N in 3..=31 {
+            match counter_idx {
+                #(
+                    N => pastey::paste!{ [<mhpmcounter ~N>]::write(mhpmcounter_val as usize) },
+                )*
+                _ =>{}
+            }
+        });
+    }
+}
+
+/// Wrap for counter info
+struct CounterInfo {
+    /// Packed representation of counter information:
+    /// - Bits [11:0]: CSR number for hardware counters
+    /// - Bits [17:12]: Counter width (typically 63 for RV64)
+    /// - MSB: Set for firmware counters, clear for hardware counters
+    inner: usize,
+}
+
+#[allow(unused)]
+impl CounterInfo {
+    const CSR_MASK: usize = 0xFFF; // Bits [11:0]
+    const WIDTH_MASK: usize = 0x3F << 12; // Bits [17:12]
+    const FIRMWARE_FLAG: usize = 1 << (size_of::<usize>() * 8 - 1); // MSB
+
+    #[inline]
+    pub const fn new() -> Self {
+        Self { inner: 0 }
+    }
+
+    #[inline]
+    pub fn set_csr(&mut self, csr_num: u16) {
+        self.inner = (self.inner & !Self::CSR_MASK) | ((csr_num as usize) & Self::CSR_MASK);
+    }
+
+    #[inline]
+    pub fn set_width(&mut self, width: u8) {
+        self.inner = (self.inner & !Self::WIDTH_MASK) | (((width as usize) & 0x3F) << 12);
+    }
+
+    #[inline]
+    pub const fn with_hardware_info(csr_num: u16, width: u8) -> Self {
+        Self {
+            inner: ((csr_num as usize) & Self::CSR_MASK) | (((width as usize) & 0x3F) << 12),
+        }
+    }
+
+    #[inline]
+    pub const fn with_firmware_info() -> Self {
+        Self {
+            inner: Self::FIRMWARE_FLAG,
+        }
+    }
+
+    #[inline]
+    pub const fn inner(self) -> usize {
+        self.inner
+    }
+}
+
+impl Default for CounterInfo {
+    #[inline]
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+#[derive(Clone, Copy)]
+pub struct EventIdx {
+    /// Packed representation of event information:
+    /// - Bits [15:0]: Event code
+    /// - Bits [19:16]: Event type
+    inner: usize,
+}
+
+#[allow(unused)]
+impl EventIdx {
+    #[inline]
+    const fn new(event_idx: usize) -> Self {
+        Self { inner: event_idx }
+    }
+
+    #[inline]
+    fn from_firmwarw_event(firmware_event: usize) -> Self {
+        Self {
+            inner: 0xf << 16 | firmware_event,
+        }
+    }
+
+    #[inline]
+    fn raw(&self) -> usize {
+        self.inner
+    }
+
+    #[inline]
+    const fn event_type(&self) -> usize {
+        (self.inner >> 16) & 0xF
+    }
+
+    #[inline]
+    const fn event_code(&self) -> usize {
+        self.inner & 0xFFFF
+    }
+
+    /// Extracts the cache ID for HARDWARE_CACHE events (13 bits, [15:3])
+    #[inline]
+    const fn cache_id(&self) -> usize {
+        (self.inner >> 3) & 0x1FFF
+    }
+
+    /// Extracts the cache operation ID (2 bits, [2:1])
+    #[inline]
+    const fn cache_op_id(&self) -> usize {
+        (self.inner >> 1) & 0x3
+    }
+
+    /// Extracts the cache result ID (1 bit, [0])
+    #[inline]
+    const fn cache_result_id(&self) -> usize {
+        self.inner & 0x1
+    }
+
+    #[inline]
+    const fn is_general_event(&self) -> bool {
+        self.event_type() == event_type::HARDWARE_GENERAL
+    }
+
+    #[inline]
+    const fn is_cache_event(&self) -> bool {
+        self.event_type() == event_type::HARDWARE_CACHE
+    }
+
+    #[inline]
+    const fn is_raw_event_v1(&self) -> bool {
+        self.event_type() == event_type::HARDWARE_RAW
+    }
+
+    #[inline]
+    const fn is_raw_event_v2(&self) -> bool {
+        self.event_type() == event_type::HARDWARE_RAW_V2
+    }
+
+    #[inline]
+    const fn is_raw_event(&self) -> bool {
+        self.is_raw_event_v1() || self.is_raw_event_v2()
+    }
+
+    #[inline]
+    const fn is_firmware_event(&self) -> bool {
+        self.event_type() == event_type::FIRMWARE
+    }
+
+    #[inline]
+    fn check_event_type(self) -> bool {
+        let event_type = self.event_type();
+        let event_code = self.event_code();
+
+        match event_type {
+            event_type::HARDWARE_GENERAL => event_code <= hardware_event::REF_CPU_CYCLES,
+            event_type::HARDWARE_CACHE => {
+                self.cache_id() <= cache_event::NODE
+                    && self.cache_op_id() <= cache_operation::PREFETCH
+                    && self.cache_result_id() <= cache_result::MISS
+            }
+            event_type::HARDWARE_RAW | event_type::HARDWARE_RAW_V2 => event_code == 0,
+            event_type::FIRMWARE => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    fn firmware_event_valid(self) -> bool {
+        let event_type = self.event_type();
+        let event_code = self.event_code();
+        if event_type != event_type::FIRMWARE {
+            return false;
+        }
+        if (event_code > firmware_event::HFENCE_VVMA_ASID_RECEIVED
+            && event_code < firmware_event::PLATFORM)
+            || event_code >= firmware_event::PLATFORM
+        {
+            // TODO:Currently RustSBI Prototyper does not support PLATFORM practice
+            return false;
+        }
+        true
+    }
+}
+
+/// event to mhpmcounter map
+#[derive(Debug)]
+pub struct EventToCounterMap {
+    counters_mask: u32,   // Bitmask of supported counters
+    event_start_idx: u32, // Start of event code range
+    event_end_idx: u32,   // End of event code range
+}
+
+impl EventToCounterMap {
+    pub fn new(counters_mask: u32, event_start_idx: u32, event_end_idx: u32) -> Self {
+        Self {
+            counters_mask,
+            event_start_idx,
+            event_end_idx,
+        }
+    }
+
+    #[inline]
+    pub const fn have_event(&self, event_idx: u32) -> bool {
+        event_idx >= self.event_start_idx && event_idx <= self.event_end_idx
+    }
+
+    #[inline]
+    pub fn get_counter_mask(&self) -> u32 {
+        self.counters_mask
+    }
+
+    #[inline]
+    pub fn is_overlop(&self, other_map: &EventToCounterMap) -> bool {
+        if (self.event_end_idx < other_map.event_start_idx
+            && self.event_end_idx < other_map.event_end_idx)
+            || (self.event_start_idx > other_map.event_start_idx
+                && self.event_start_idx > other_map.event_end_idx)
+        {
+            return false;
+        }
+        true
+    }
+}
+
+#[derive(Debug)]
+pub struct RawEventToCounterMap {
+    counters_mask: u32,    // Bitmask of supported counters
+    raw_event_select: u64, // Value to program into mhpmeventX
+    select_mask: u64,      // Mask for selecting bits (optional use)
+}
+
+impl RawEventToCounterMap {
+    pub fn new(counters_mask: u32, raw_event_select: u64, select_mask: u64) -> Self {
+        Self {
+            counters_mask,
+            raw_event_select,
+            select_mask,
+        }
+    }
+
+    #[inline]
+    pub const fn have_event(&self, event_idx: u64) -> bool {
+        self.raw_event_select == (event_idx & self.select_mask)
+    }
+
+    #[inline]
+    pub const fn get_counter_mask(&self) -> u32 {
+        self.counters_mask
+    }
+
+    #[inline]
+    pub const fn is_overlop(&self, other_map: &RawEventToCounterMap) -> bool {
+        self.select_mask == other_map.select_mask
+            && self.raw_event_select == other_map.raw_event_select
+    }
+}
+
+struct CounterMask {
+    counter_idx_base: usize,
+    counter_idx_mask: usize,
+}
+
+impl CounterMask {
+    pub fn new(counter_idx_base: usize, counter_idx_mask: usize) -> Self {
+        Self {
+            counter_idx_base,
+            counter_idx_mask,
+        }
+    }
+}
+
+impl Iterator for CounterMask {
+    type Item = usize;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.counter_idx_mask == 0 {
+            return None;
+        } else {
+            let low_bit = self.counter_idx_mask.trailing_zeros();
+            let hart_id = usize::try_from(low_bit).unwrap() + self.counter_idx_base;
+            self.counter_idx_mask &= !(1usize << low_bit);
+            Some(hart_id)
+        }
+    }
+}
+
+// TODO: If all firmware events are implemented,
+// `PMU_FIRMWARE_EVENT_SUPPORTED` should be removed.
+const PMU_FIRMWARE_EVENT_SUPPORTED: [bool; 22] = [
+    true,  // SBI_PMU_FW_MISALIGNED_LOAD
+    true,  // SBI_PMU_FW_MISALIGNED_STORE
+    false, // SBI_PMU_FW_ACCESS_LOAD
+    false, // SBI_PMU_FW_ACCESS_STORE
+    true,  // SBI_PMU_FW_ILLEGAL_INSN
+    true,  // SBI_PMU_FW_SET_TIMER
+    true,  // SBI_PMU_FW_IPI_SENT
+    true,  // SBI_PMU_FW_IPI_RECEIVED
+    true,  // SBI_PMU_FW_FENCE_I_SENT
+    true,  // SBI_PMU_FW_FENCE_I_RECEIVED
+    true,  // SBI_PMU_FW_SFENCE_VMA_SENT
+    true,  // SBI_PMU_FW_SFENCE_VMA_RECEIVED
+    true,  // SBI_PMU_FW_SFENCE_VMA_ASID_SENT
+    true,  // SBI_PMU_FW_SFENCE_VMA_ASID_RECEIVED
+    false, // SBI_PMU_FW_HFENCE_GVMA_SENT
+    false, // SBI_PMU_FW_HFENCE_GVMA_RECEIVED
+    false, // SBI_PMU_FW_HFENCE_GVMA_VMID_SENT
+    false, // SBI_PMU_FW_HFENCE_GVMA_VMID_RECEIVED
+    false, // SBI_PMU_FW_HFENCE_VVMA_SENT
+    false, // SBI_PMU_FW_HFENCE_VVMA_RECEIVED
+    false, // SBI_PMU_FW_HFENCE_VVMA_ASID_SENT
+    false, // SBI_PMU_FW_HFENCE_VVMA_ASID_RECEIVED
+];
+
+pub fn pmu_firmware_counter_increment(firmware_event: usize) {
+    let pmu_state = &mut hart_context_mut(current_hartid()).pmu_state;
+    let counter_idx_start = pmu_state.hw_counters_num;
+    for counter_idx in counter_idx_start..counter_idx_start + PMU_FIRMWARE_COUNTER_MAX {
+        let fw_idx = counter_idx - counter_idx_start;
+        if pmu_state.active_event[counter_idx]
+            == EventIdx::from_firmwarw_event(firmware_event).raw()
+            && pmu_state.is_firmware_event_start(counter_idx)
+        {
+            pmu_state.fw_counter[fw_idx] += 1;
+        }
+    }
+}

+ 9 - 0
prototyper/prototyper/src/sbi/rfence.rs

@@ -1,4 +1,5 @@
 use rustsbi::{HartMask, SbiRet};
+use sbi_spec::pmu::firmware_event;
 use spin::Mutex;
 
 use crate::cfg::{PAGE_SIZE, TLB_FLUSH_LIMIT};
@@ -10,6 +11,8 @@ use core::arch::asm;
 
 use core::sync::atomic::{AtomicU32, Ordering};
 
+use super::pmu::pmu_firmware_counter_increment;
+
 /// Cell for managing remote fence operations between harts.
 pub(crate) struct RFenceCell {
     // Queue of fence operations with source hart ID
@@ -198,6 +201,7 @@ fn remote_fence_process(rfence_ctx: RFenceContext, hart_mask: HartMask) -> SbiRe
 impl rustsbi::Fence for SbiRFence {
     /// Remote instruction fence for specified harts.
     fn remote_fence_i(&self, hart_mask: HartMask) -> SbiRet {
+        pmu_firmware_counter_increment(firmware_event::FENCE_I_SENT);
         remote_fence_process(
             RFenceContext {
                 start_addr: 0,
@@ -212,6 +216,7 @@ impl rustsbi::Fence for SbiRFence {
 
     /// Remote supervisor fence for virtual memory on specified harts.
     fn remote_sfence_vma(&self, hart_mask: HartMask, start_addr: usize, size: usize) -> SbiRet {
+        pmu_firmware_counter_increment(firmware_event::SFENCE_VMA_SENT);
         let flush_size = match validate_address_range(start_addr, size) {
             Ok(size) => size,
             Err(e) => return e,
@@ -237,6 +242,7 @@ impl rustsbi::Fence for SbiRFence {
         size: usize,
         asid: usize,
     ) -> SbiRet {
+        pmu_firmware_counter_increment(firmware_event::SFENCE_VMA_ASID_SENT);
         let flush_size = match validate_address_range(start_addr, size) {
             Ok(size) => size,
             Err(e) => return e,
@@ -263,11 +269,13 @@ pub fn rfence_single_handler() {
         match ctx.op {
             // Handle instruction fence
             RFenceType::FenceI => unsafe {
+                pmu_firmware_counter_increment(firmware_event::FENCE_I_RECEIVED);
                 asm!("fence.i");
                 remote_rfence(id).unwrap().sub();
             },
             // Handle virtual memory address fence
             RFenceType::SFenceVma => {
+                pmu_firmware_counter_increment(firmware_event::SFENCE_VMA_RECEIVED);
                 // If the flush size is greater than the maximum limit then simply flush all
                 if (ctx.start_addr == 0 && ctx.size == 0)
                     || (ctx.size == usize::MAX)
@@ -288,6 +296,7 @@ pub fn rfence_single_handler() {
             }
             // Handle virtual memory address fence with ASID
             RFenceType::SFenceVmaAsid => {
+                pmu_firmware_counter_increment(firmware_event::SFENCE_VMA_ASID_RECEIVED);
                 let asid = ctx.asid;
                 // If the flush size is greater than the maximum limit then simply flush all
                 if (ctx.start_addr == 0 && ctx.size == 0)

+ 5 - 2
prototyper/prototyper/src/sbi/trap/handler.rs

@@ -2,6 +2,7 @@ use fast_trap::{EntireContext, EntireContextSeparated, EntireResult, FastContext
 use riscv::register::{mepc, mie, mstatus, mtval, satp, sstatus};
 use riscv_decode::{Instruction, decode};
 use rustsbi::RustSBI;
+use sbi_spec::pmu::firmware_event;
 
 use crate::platform::PLATFORM;
 use crate::riscv::csr::{CSR_TIME, CSR_TIMEH};
@@ -9,6 +10,7 @@ use crate::riscv::current_hartid;
 use crate::sbi::console;
 use crate::sbi::hsm::local_hsm;
 use crate::sbi::ipi;
+use crate::sbi::pmu::pmu_firmware_counter_increment;
 use crate::sbi::rfence;
 
 use super::helper::*;
@@ -34,6 +36,7 @@ pub fn msoft_ipi_handler() {
     let ipi_type = get_and_reset_ipi_type();
     // Handle supervisor software interrupt
     if (ipi_type & ipi::IPI_TYPE_SSOFT) != 0 {
+        pmu_firmware_counter_increment(firmware_event::IPI_RECEIVED);
         unsafe {
             riscv::register::mip::set_ssoft();
         }
@@ -67,7 +70,7 @@ pub fn msoft_handler(ctx: FastContext) -> FastResult {
             riscv::asm::wfi();
             ctx.restore()
         }
-        // Handle RFence
+        // Handle IPI and RFence
         _ => {
             msoft_ipi_handler();
             ctx.restore()
@@ -157,7 +160,7 @@ pub extern "C" fn illegal_instruction_handler(raw_ctx: EntireContext) -> EntireR
 
     let inst = decode(mtval::read() as u32);
     match inst {
-        Ok(Instruction::Csrrs(csr)) => match csr.csr() {
+        Ok(Instruction::Csrrs(csr)) => match csr.csr() as u16 {
             CSR_TIME => {
                 save_reg_x(
                     &mut ctx,

+ 5 - 1
prototyper/prototyper/src/sbi/trap/mod.rs

@@ -2,7 +2,7 @@ pub mod boot;
 pub mod handler;
 
 mod helper;
-
+use super::pmu::pmu_firmware_counter_increment;
 use crate::fail::unsupported_trap;
 
 use fast_trap::{FastContext, FastResult};
@@ -11,6 +11,7 @@ use riscv::register::{
     mcause::{self, Trap},
     mepc, mip, mstatus,
 };
+use sbi_spec::pmu::firmware_event;
 
 /// Fast trap handler for all trap.
 pub extern "C" fn fast_handler(
@@ -55,6 +56,7 @@ pub extern "C" fn fast_handler(
                 }
                 // Handle illegal instructions
                 Trap::Exception(Exception::IllegalInstruction) => {
+                    pmu_firmware_counter_increment(firmware_event::ILLEGAL_INSN);
                     if mstatus::read().mpp() == mstatus::MPP::Machine {
                         panic!("Cannot handle illegal instruction exception from M-MODE");
                     }
@@ -62,10 +64,12 @@ pub extern "C" fn fast_handler(
                     ctx.continue_with(handler::illegal_instruction_handler, ())
                 }
                 Trap::Exception(Exception::LoadMisaligned) => {
+                    pmu_firmware_counter_increment(firmware_event::MISALIGNED_LOAD);
                     save_regs(&mut ctx);
                     ctx.continue_with(handler::load_misaligned_handler, ())
                 }
                 Trap::Exception(Exception::StoreMisaligned) => {
+                    pmu_firmware_counter_increment(firmware_event::MISALIGNED_STORE);
                     save_regs(&mut ctx);
                     ctx.continue_with(handler::store_misaligned_handler, ())
                 }

+ 16 - 2
prototyper/prototyper/src/sbi/trap_stack.rs

@@ -42,6 +42,14 @@ pub(crate) fn prepare_for_trap() {
     };
 }
 
+pub fn hart_context_mut(hart_id: usize) -> &'static mut HartContext {
+    unsafe { ROOT_STACK.get_mut(hart_id).unwrap().hart_context_mut() }
+}
+
+pub fn hart_context(hart_id: usize) -> &'static HartContext {
+    unsafe { ROOT_STACK.get(hart_id).unwrap().hart_context() }
+}
+
 /// Stack type for each hart.
 ///
 /// Memory layout:
@@ -58,15 +66,21 @@ impl Stack {
 
     /// Gets mutable reference to hart context at bottom of stack.
     #[inline]
-    pub fn hart_context(&mut self) -> &mut HartContext {
+    pub fn hart_context_mut(&mut self) -> &mut HartContext {
         unsafe { &mut *self.0.as_mut_ptr().cast() }
     }
 
+    /// Gets immutable reference to hart context at bottom of stack.
+    #[inline]
+    pub fn hart_context(&self) -> &HartContext {
+        unsafe { &*self.0.as_ptr().cast() }
+    }
+
     /// Initializes stack for trap handling.
     /// - Sets up hart context.
     /// - Creates and loads FreeTrapStack with the stack range.
     fn load_as_stack(&'static mut self) {
-        let hart = self.hart_context();
+        let hart = self.hart_context_mut();
         let context_ptr = hart.context_ptr();
         hart.init();
 

+ 3 - 0
prototyper/test-kernel/Cargo.toml

@@ -10,6 +10,9 @@ publish = false
 
 [dependencies]
 sbi-testing = { features = ["log"], path = "../../library/sbi-testing" }
+sbi-spec = { version = "0.0.8", features = [
+    "legacy",
+], path = "../../library/sbi-spec" }
 log = "0.4"
 riscv = "0.12.1"
 spin = "0.9"

+ 283 - 2
prototyper/test-kernel/src/main.rs

@@ -10,7 +10,13 @@ use core::{
     arch::{asm, naked_asm},
     ptr::null,
 };
-use sbi_testing::sbi;
+use riscv::register::cycle;
+use sbi_spec::{
+    binary::{CounterMask, HartMask, SbiRet},
+    pmu::firmware_event,
+};
+use sbi_testing::sbi::{self, ConfigFlags, StartFlags, StopFlags};
+// use sbi_spec::pmu::*;
 use uart16550::Uart16550;
 
 const RISCV_HEAD_FLAGS: u64 = 0;
@@ -108,7 +114,172 @@ extern "C" fn rust_main(hartid: usize, dtb_pa: usize) -> ! {
         hart_mask_base: 0,
         delay: frequency,
     };
-    if testing.test() {
+    let test_result = testing.test();
+
+    // PMU test, only available in qemu-system-riscv64 single core
+    let counters_num = sbi::pmu_num_counters();
+    println!("[pmu] counters number: {}", counters_num);
+    for idx in 0..counters_num {
+        let counter_info = sbi::pmu_counter_get_info(idx);
+        let counter_info = CounterInfo::new(counter_info.value);
+        if counter_info.is_firmware_counter() {
+            println!("[pmu] counter index:{:>2}, is a firmware counter", idx);
+        } else {
+            println!(
+                "[pmu] counter index:{:>2}, csr num: {:#03x}, width: {}",
+                idx,
+                counter_info.get_csr(),
+                counter_info.get_width()
+            );
+        }
+    }
+
+    /* PMU test for hardware event */
+    let counter_mask = CounterMask::from_mask_base(0x7ffff, 0);
+    let result = sbi::pmu_counter_config_matching(counter_mask, Flag::new(0b110), 0x2, 0);
+    assert!(result.is_ok());
+    let result = sbi::pmu_counter_config_matching(counter_mask, Flag::new(0b110), 0x10019, 0);
+    assert!(result.is_ok());
+    let result = sbi::pmu_counter_config_matching(counter_mask, Flag::new(0b110), 0x1001b, 0);
+    assert!(result.is_ok());
+    let result = sbi::pmu_counter_config_matching(counter_mask, Flag::new(0b110), 0x10021, 0);
+    assert!(result.is_ok());
+    let result = sbi::pmu_counter_config_matching(counter_mask, Flag::new(0b110), 0x3, 0);
+    assert_eq!(result, SbiRet::not_supported());
+
+    // `SBI_PMU_HW_CPU_CYCLES` event test
+    let result = sbi::pmu_counter_config_matching(counter_mask, Flag::new(0b010), 0x1, 0);
+    assert!(result.is_ok());
+    // the counter index should be 0(mcycle)
+    assert_eq!(result.value, 0);
+    let cycle_counter_idx = result.value;
+    let cycle_num = cycle::read64();
+    assert_eq!(cycle_num, 0);
+    // Start counting `SBI_PMU_HW_CPU_CYCLES` events
+    let start_result = sbi::pmu_counter_start(
+        CounterMask::from_mask_base(0x1, cycle_counter_idx),
+        Flag::new(0x1),
+        0xffff,
+    );
+    assert!(start_result.is_ok());
+    let cycle_num = cycle::read64();
+    assert!(cycle_num >= 0xffff);
+    // Stop counting `SBI_PMU_HW_CPU_CYCLES` events
+    let stop_result = sbi::pmu_counter_stop(
+        CounterMask::from_mask_base(0x1, cycle_counter_idx),
+        Flag::new(0x0),
+    );
+    assert!(stop_result.is_ok());
+    let old_cycle_num = cycle::read64();
+    let mut _j = 0;
+    for i in 0..1000 {
+        _j += i
+    }
+    let new_cycle_num = cycle::read64();
+    assert_eq!(old_cycle_num, new_cycle_num);
+    // Restart counting `SBI_PMU_HW_CPU_CYCLES` events
+    let start_result = sbi::pmu_counter_start(
+        CounterMask::from_mask_base(0x1, cycle_counter_idx),
+        Flag::new(0x0),
+        0,
+    );
+    assert!(start_result.is_ok());
+    let mut _j = 0;
+    for i in 0..1000 {
+        _j += i
+    }
+    let restart_cycle_num = cycle::read64();
+    assert!(restart_cycle_num > new_cycle_num);
+
+    /* PMU test for firmware  event */
+    let counter_mask = CounterMask::from_mask_base(0x7ffffffff, 0);
+
+    // Mapping a counter to the `SBI_PMU_FW_ACCESS_LOAD` event should result in unsupported
+    let result = sbi::pmu_counter_config_matching(
+        counter_mask,
+        Flag::new(0b010),
+        EventIdx::new_firmware_event(firmware_event::ACCESS_LOAD).raw(),
+        0,
+    );
+    assert_eq!(result, SbiRet::not_supported());
+
+    // Map a counter to the `SBI_PMU_FW_IPI_SENT` event.
+    // This counter should be a firmware counter and its value should be initialized to 0.
+    let result = sbi::pmu_counter_config_matching(
+        counter_mask,
+        Flag::new(0b010),
+        EventIdx::new_firmware_event(firmware_event::IPI_SENT).raw(),
+        0,
+    );
+    assert!(result.is_ok());
+    assert!(result.value >= 19);
+    let ipi_counter_idx = result.value;
+    let ipi_num = sbi::pmu_counter_fw_read(ipi_counter_idx);
+    assert!(ipi_num.is_ok());
+    assert_eq!(ipi_num.value, 0);
+
+    // Start counting `SBI_PMU_FW_IPI_SENT` events and assign an initial value of 25 to the event counter
+    let start_result = sbi::pmu_counter_start(
+        CounterMask::from_mask_base(0x1, ipi_counter_idx),
+        Flag::new(0x1),
+        25,
+    );
+    assert!(start_result.is_ok());
+    // Read the value of the `SBI_PMU_FW_IPI_SENT` event counter, which should be 25
+    let ipi_num = sbi::pmu_counter_fw_read(ipi_counter_idx);
+    assert!(ipi_num.is_ok());
+    assert_eq!(ipi_num.value, 25);
+
+    // Send IPI to other core, and the `SBI_PMU_FW_IPI_SENT` event counter value increases by one
+    let send_ipi_result = sbi::send_ipi(HartMask::from_mask_base(0b10, 0));
+    assert_eq!(send_ipi_result, SbiRet::invalid_param());
+
+    // Read the value of the `SBI_PMU_FW_IPI_SENT` event counter, which should be 26
+    let ipi_num = sbi::pmu_counter_fw_read(ipi_counter_idx);
+    assert!(ipi_num.is_ok());
+    assert_eq!(ipi_num.value, 26);
+
+    // Stop counting `SBI_PMU_FW_IPI_SENT` events
+    let stop_result = sbi::pmu_counter_stop(
+        CounterMask::from_mask_base(0x1, ipi_counter_idx),
+        Flag::new(0x0),
+    );
+    assert!(stop_result.is_ok());
+
+    // Restop counting `SBI_PMU_FW_IPI_SENT` events, the result should be already stop
+    let stop_result = sbi::pmu_counter_stop(
+        CounterMask::from_mask_base(0x1, ipi_counter_idx),
+        Flag::new(0x0),
+    );
+    assert_eq!(stop_result, SbiRet::already_stopped());
+
+    // Send IPI to other core, `SBI_PMU_FW_IPI_SENT` event counter should not change
+    let send_ipi_result = sbi::send_ipi(HartMask::from_mask_base(0b10, 0));
+    assert_eq!(send_ipi_result, SbiRet::invalid_param());
+
+    // Read the value of the `SBI_PMU_FW_IPI_SENT` event counter, which should be 26
+    let ipi_num = sbi::pmu_counter_fw_read(ipi_counter_idx);
+    assert!(ipi_num.is_ok());
+    assert_eq!(ipi_num.value, 26);
+
+    // Restart counting `SBI_PMU_FW_IPI_SENT` events
+    let start_result = sbi::pmu_counter_start(
+        CounterMask::from_mask_base(0x1, ipi_counter_idx),
+        Flag::new(0x0),
+        0,
+    );
+    assert!(start_result.is_ok());
+
+    // Send IPI to other core, and the `SBI_PMU_FW_IPI_SENT` event counter value increases by one
+    let send_ipi_result = sbi::send_ipi(HartMask::from_mask_base(0b10, 0));
+    assert_eq!(send_ipi_result, SbiRet::invalid_param());
+
+    // Read the value of the `SBI_PMU_FW_IPI_SENT` event counter, which should be 27
+    let ipi_num = sbi::pmu_counter_fw_read(ipi_counter_idx);
+    assert!(ipi_num.is_ok());
+    assert_eq!(ipi_num.value, 27);
+
+    if test_result {
         sbi::system_reset(sbi::Shutdown, sbi::NoReason);
     } else {
         sbi::system_reset(sbi::Shutdown, sbi::SystemFailure);
@@ -211,3 +382,113 @@ impl rcore_console::Console for Console {
         unsafe { UART.get().write(s.as_bytes()) };
     }
 }
+
+struct Flag {
+    inner: usize,
+}
+
+impl ConfigFlags for Flag {
+    fn raw(&self) -> usize {
+        self.inner
+    }
+}
+
+impl StartFlags for Flag {
+    fn raw(&self) -> usize {
+        self.inner
+    }
+}
+
+impl StopFlags for Flag {
+    fn raw(&self) -> usize {
+        self.inner
+    }
+}
+
+impl Flag {
+    pub fn new(flag: usize) -> Self {
+        Self { inner: flag }
+    }
+}
+
+/// Wrap for counter info
+struct CounterInfo {
+    /// Packed representation of counter information:
+    /// - Bits [11:0]: CSR number for hardware counters
+    /// - Bits [17:12]: Counter width (typically 63 for RV64)
+    /// - MSB: Set for firmware counters, clear for hardware counters
+    inner: usize,
+}
+
+#[allow(unused)]
+impl CounterInfo {
+    const CSR_MASK: usize = 0xFFF; // Bits [11:0]
+    const WIDTH_MASK: usize = 0x3F << 12; // Bits [17:12]
+    const FIRMWARE_FLAG: usize = 1 << (size_of::<usize>() * 8 - 1); // MSB
+
+    #[inline]
+    pub const fn new(counter_info: usize) -> Self {
+        Self {
+            inner: counter_info,
+        }
+    }
+
+    #[inline]
+    pub fn set_csr(&mut self, csr_num: u16) {
+        self.inner = (self.inner & !Self::CSR_MASK) | ((csr_num as usize) & Self::CSR_MASK);
+    }
+
+    #[inline]
+    pub fn get_csr(&self) -> usize {
+        self.inner & Self::CSR_MASK
+    }
+
+    #[inline]
+    pub fn set_width(&mut self, width: u8) {
+        self.inner = (self.inner & !Self::WIDTH_MASK) | (((width as usize) & 0x3F) << 12);
+    }
+
+    #[inline]
+    pub fn get_width(&self) -> usize {
+        (self.inner & Self::WIDTH_MASK) >> 12
+    }
+
+    #[inline]
+    pub fn is_firmware_counter(&self) -> bool {
+        self.inner & Self::FIRMWARE_FLAG != 0
+    }
+
+    #[inline]
+    pub const fn with_hardware_info(csr_num: u16, width: u8) -> Self {
+        Self {
+            inner: ((csr_num as usize) & Self::CSR_MASK) | (((width as usize) & 0x3F) << 12),
+        }
+    }
+
+    #[inline]
+    pub const fn with_firmware_info() -> Self {
+        Self {
+            inner: Self::FIRMWARE_FLAG,
+        }
+    }
+
+    #[inline]
+    pub const fn inner(self) -> usize {
+        self.inner
+    }
+}
+
+struct EventIdx {
+    inner: usize,
+}
+
+impl EventIdx {
+    fn raw(&self) -> usize {
+        self.inner
+    }
+
+    fn new_firmware_event(event_code: usize) -> Self {
+        let inner = 0xf << 16 | event_code;
+        Self { inner }
+    }
+}