Godones před 7 měsíci
rodič
revize
0840f2072a

+ 21 - 0
.gitignore

@@ -0,0 +1,21 @@
+### Rust template
+# Generated by Cargo
+# will have compiled files and executables
+debug/
+target/
+
+# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
+# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
+Cargo.lock
+
+# These are backup files generated by rustfmt
+**/*.rs.bk
+
+# MSVC Windows builds of rustc generate these, which store debugging information
+*.pdb
+
+### rust-analyzer template
+# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
+rust-project.json
+
+.idea

+ 24 - 0
Cargo.toml

@@ -0,0 +1,24 @@
+[workspace]
+members = [
+    "aya-log",
+    "aya"
+]
+
+resolver = "2"
+
+
+[workspace.dependencies]
+thiserror = { version = "1", default-features = false }
+lazy_static = { version = "1", default-features = false }
+bitflags = { version = "2.2.1", default-features = false }
+object = { version = "0.36", default-features = false }
+bytes = { version = "1", default-features = false }
+assert_matches = { version = "1.5.0", default-features = false }
+log = { version = "0.4", default-features = false }
+libc = { version = "0.2.105", default-features = false }
+async-io = { version = "2.0", default-features = false }
+tokio = { version = "1.24.0", default-features = false }
+env_logger = { version = "0.11", default-features = false }
+testing_logger = { version = "0.1.1", default-features = false }
+aya-log-common = { git = "https://github.com/aya-rs/aya", rev = "3d57d35", default-features = false }
+aya-obj = { git = "https://github.com/aya-rs/aya", rev = "3d57d35", features = ["std"] }

+ 201 - 0
LICENSE-Apache

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 21 - 0
LICENSE-MIT

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 os-module
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 140 - 0
aya-log/CHANGELOG.md


+ 26 - 0
aya-log/Cargo.toml

@@ -0,0 +1,26 @@
+[package]
+name = "aya-log"
+version = "0.2.0"
+description = "A logging library for eBPF programs."
+keywords = ["bpf", "ebpf", "log", "logging"]
+readme = "README.md"
+documentation = "https://docs.rs/aya-log"
+edition = "2021"
+
+
+[dependencies]
+aya = { path = "../aya", features = ["async_tokio"] }
+aya-log-common = { workspace = true }
+bytes = { workspace = true }
+log = { workspace = true }
+thiserror = { workspace = true }
+tokio = { workspace = true, features = ["rt"] }
+
+
+[dev-dependencies]
+env_logger = { workspace = true }
+testing_logger = { workspace = true }
+
+
+[lib]
+path = "src/lib.rs"

+ 63 - 0
aya-log/README.md

@@ -0,0 +1,63 @@
+# aya-log - a logging library for eBPF programs
+
+## Overview
+
+`aya-log` is a logging library for eBPF programs written using [aya]. Think of
+it as the [log] crate for eBPF.
+
+## Installation
+
+### User space
+
+Add `aya-log` to `Cargo.toml`:
+
+```toml
+[dependencies]
+aya-log = { git = "https://github.com/aya-rs/aya", branch = "main" }
+```
+
+### eBPF side
+
+Add `aya-log-ebpf` to `Cargo.toml`:
+
+```toml
+[dependencies]
+aya-log-ebpf = { git = "https://github.com/aya-rs/aya", branch = "main" }
+```
+
+## Example
+
+Here's an example that uses `aya-log` in conjunction with the [env_logger] crate
+to log eBPF messages to the terminal.
+
+### User space code
+
+```rust
+use aya_log::EbpfLogger;
+
+env_logger::init();
+
+// Will log using the default logger, which is TermLogger in this case
+EbpfLogger::init(&mut bpf).unwrap();
+```
+
+### eBPF code
+
+```rust
+use aya_log_ebpf::info;
+
+fn try_xdp_firewall(ctx: XdpContext) -> Result<u32, ()> {
+    if let Some(port) = tcp_dest_port(&ctx)? {
+        if block_port(port) {
+            info!(&ctx, "❌ blocked incoming connection on port: {}", port);
+            return Ok(XDP_DROP);
+        }
+    }
+
+    Ok(XDP_PASS)
+}
+```
+
+[aya]: https://github.com/aya-rs/aya
+[log]: https://docs.rs/log
+[env_logger]: https://docs.rs/env_logger

+ 1 - 0
aya-log/release.toml

@@ -0,0 +1 @@
+shared-version = true

+ 1063 - 0
aya-log/src/lib.rs

@@ -0,0 +1,1063 @@
+//! A logging framework for eBPF programs.
+//!
+//! This is the user space side of the [Aya] logging framework. For the eBPF
+//! side, see the `aya-log-ebpf` crate.
+//!
+//! `aya-log` provides the [EbpfLogger] type, which reads log records created by
+//! `aya-log-ebpf` and logs them using the [log] crate. Any logger that
+//! implements the [Log] trait can be used with this crate.
+//!
+//! # Example:
+//!
+//! This example uses the [env_logger] crate to log messages to the terminal.
+//!
+//! ```no_run
+//! # let mut bpf = aya::Ebpf::load(&[]).unwrap();
+//! use aya_log::EbpfLogger;
+//!
+//! // initialize env_logger as the default logger
+//! env_logger::init();
+//!
+//! // start reading aya-log records and log them using the default logger
+//! EbpfLogger::init(&mut bpf).unwrap();
+//! ```
+//!
+//! With the following eBPF code:
+//!
+//! ```ignore
+//! # let ctx = ();
+//! use aya_log_ebpf::{debug, error, info, trace, warn};
+//!
+//! error!(&ctx, "this is an error message 🚨");
+//! warn!(&ctx, "this is a warning message ⚠️");
+//! info!(&ctx, "this is an info message ℹ️");
+//! debug!(&ctx, "this is a debug message ️🐝");
+//! trace!(&ctx, "this is a trace message 🔍");
+//! ```
+//! Outputs:
+//!
+//! ```text
+//! 21:58:55 [ERROR] xxx: [src/main.rs:35] this is an error message 🚨
+//! 21:58:55 [WARN] xxx: [src/main.rs:36] this is a warning message ⚠️
+//! 21:58:55 [INFO] xxx: [src/main.rs:37] this is an info message ℹ️
+//! 21:58:55 [DEBUG] (7) xxx: [src/main.rs:38] this is a debug message ️🐝
+//! 21:58:55 [TRACE] (7) xxx: [src/main.rs:39] this is a trace message 🔍
+//! ```
+//!
+//! [Aya]: https://docs.rs/aya
+//! [env_logger]: https://docs.rs/env_logger
+//! [Log]: https://docs.rs/log/0.4.14/log/trait.Log.html
+//! [log]: https://docs.rs/log
+//!
+use std::{
+    fmt::{LowerHex, UpperHex},
+    io, mem,
+    net::{Ipv4Addr, Ipv6Addr},
+    ptr, str,
+    sync::Arc,
+};
+const MAP_NAME: &str = "AYA_LOGS";
+
+use aya::{
+    loaded_programs,
+    maps::{
+        perf::{AsyncPerfEventArray, Events, PerfBufferError},
+        Map, MapData, MapError, MapInfo,
+    },
+    programs::ProgramError,
+    util::online_cpus,
+    Ebpf, Pod,
+};
+use aya_log_common::{
+    Argument, DisplayHint, Level, LogValueLength, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS,
+};
+use bytes::BytesMut;
+use log::{error, Log, Record};
+use thiserror::Error;
+
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+struct RecordFieldWrapper(RecordField);
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+struct ArgumentWrapper(Argument);
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+struct DisplayHintWrapper(DisplayHint);
+
+unsafe impl Pod for RecordFieldWrapper {}
+unsafe impl Pod for ArgumentWrapper {}
+unsafe impl Pod for DisplayHintWrapper {}
+
+/// Log messages generated by `aya_log_ebpf` using the [log] crate.
+///
+/// For more details see the [module level documentation](crate).
+pub struct EbpfLogger;
+
+/// Log messages generated by `aya_log_ebpf` using the [log] crate.
+#[deprecated(since = "0.2.1", note = "Use `aya_log::EbpfLogger` instead")]
+pub type BpfLogger = EbpfLogger;
+
+impl EbpfLogger {
+    /// Starts reading log records created with `aya-log-ebpf` and logs them
+    /// with the default logger. See [log::logger].
+    pub fn init(bpf: &mut Ebpf) -> Result<EbpfLogger, Error> {
+        EbpfLogger::init_with_logger(bpf, log::logger())
+    }
+
+    /// Starts reading log records created with `aya-log-ebpf` and logs them
+    /// with the given logger.
+    pub fn init_with_logger<T: Log + 'static>(
+        bpf: &mut Ebpf,
+        logger: T,
+    ) -> Result<EbpfLogger, Error> {
+        let map = bpf.take_map(MAP_NAME).ok_or(Error::MapNotFound)?;
+        Self::read_logs_async(map, logger)?;
+        Ok(EbpfLogger {})
+    }
+
+    /// Attaches to an existing `aya-log-ebpf` instance.
+    ///
+    /// Attaches to the logs produced by `program_id`. Can be used to read logs generated by a
+    /// pinned program. The log records will be written to the default logger. See [log::logger].
+    pub fn init_from_id(program_id: u32) -> Result<EbpfLogger, Error> {
+        Self::init_from_id_with_logger(program_id, log::logger())
+    }
+
+    /// Attaches to an existing `aya-log-ebpf` instance and logs with the given logger.
+    ///
+    /// Attaches to the logs produced by `program_id`. Can be used to read logs generated by a
+    /// pinned program. The log records will be written to the given logger.
+    pub fn init_from_id_with_logger<T: Log + 'static>(
+        program_id: u32,
+        logger: T,
+    ) -> Result<EbpfLogger, Error> {
+        let program_info = loaded_programs()
+            .filter_map(|info| info.ok())
+            .find(|info| info.id() == program_id)
+            .ok_or(Error::ProgramNotFound)?;
+        let map = program_info
+            .map_ids()
+            .map_err(Error::ProgramError)?
+            .iter()
+            .filter_map(|id| MapInfo::from_id(*id).ok())
+            .find(|map_info| match map_info.name_as_str() {
+                Some(name) => name == MAP_NAME,
+                None => false,
+            })
+            .ok_or(Error::MapNotFound)?;
+        let map = MapData::from_id(map.id()).map_err(Error::MapError)?;
+
+        Self::read_logs_async(Map::PerfEventArray(map), logger)?;
+
+        Ok(EbpfLogger {})
+    }
+
+    fn read_logs_async<T: Log + 'static>(map: Map, logger: T) -> Result<(), Error> {
+        let mut logs: AsyncPerfEventArray<_> = map.try_into()?;
+
+        let logger = Arc::new(logger);
+        for cpu_id in online_cpus().map_err(Error::InvalidOnlineCpu)? {
+            let mut buf = logs.open(cpu_id, None)?;
+
+            let log = logger.clone();
+            tokio::spawn(async move {
+                let mut buffers = vec![BytesMut::with_capacity(LOG_BUF_CAPACITY); 10];
+
+                loop {
+                    let Events { read, lost: _ } = buf.read_events(&mut buffers).await.unwrap();
+
+                    for buf in buffers.iter().take(read) {
+                        log_buf(buf.as_ref(), &*log).unwrap();
+                    }
+                }
+            });
+        }
+        Ok(())
+    }
+}
+
+pub trait Formatter<T> {
+    fn format(v: T) -> String;
+}
+
+pub struct DefaultFormatter;
+impl<T> Formatter<T> for DefaultFormatter
+where
+    T: ToString,
+{
+    fn format(v: T) -> String {
+        v.to_string()
+    }
+}
+
+pub struct LowerHexFormatter;
+impl<T> Formatter<T> for LowerHexFormatter
+where
+    T: LowerHex,
+{
+    fn format(v: T) -> String {
+        format!("{v:x}")
+    }
+}
+
+pub struct LowerHexDebugFormatter;
+impl<T> Formatter<&[T]> for LowerHexDebugFormatter
+where
+    T: LowerHex,
+{
+    fn format(v: &[T]) -> String {
+        let mut s = String::new();
+        for v in v {
+            let () = core::fmt::write(&mut s, format_args!("{v:x}")).unwrap();
+        }
+        s
+    }
+}
+
+pub struct UpperHexFormatter;
+impl<T> Formatter<T> for UpperHexFormatter
+where
+    T: UpperHex,
+{
+    fn format(v: T) -> String {
+        format!("{v:X}")
+    }
+}
+
+pub struct UpperHexDebugFormatter;
+impl<T> Formatter<&[T]> for UpperHexDebugFormatter
+where
+    T: UpperHex,
+{
+    fn format(v: &[T]) -> String {
+        let mut s = String::new();
+        for v in v {
+            let () = core::fmt::write(&mut s, format_args!("{v:X}")).unwrap();
+        }
+        s
+    }
+}
+
+pub struct Ipv4Formatter;
+impl<T> Formatter<T> for Ipv4Formatter
+where
+    T: Into<Ipv4Addr>,
+{
+    fn format(v: T) -> String {
+        v.into().to_string()
+    }
+}
+
+pub struct Ipv6Formatter;
+impl<T> Formatter<T> for Ipv6Formatter
+where
+    T: Into<Ipv6Addr>,
+{
+    fn format(v: T) -> String {
+        v.into().to_string()
+    }
+}
+
+pub struct LowerMacFormatter;
+impl Formatter<[u8; 6]> for LowerMacFormatter {
+    fn format(v: [u8; 6]) -> String {
+        format!(
+            "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
+            v[0], v[1], v[2], v[3], v[4], v[5]
+        )
+    }
+}
+
+pub struct UpperMacFormatter;
+impl Formatter<[u8; 6]> for UpperMacFormatter {
+    fn format(v: [u8; 6]) -> String {
+        format!(
+            "{:02X}:{:02X}:{:02X}:{:02X}:{:02X}:{:02X}",
+            v[0], v[1], v[2], v[3], v[4], v[5]
+        )
+    }
+}
+
+trait Format {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()>;
+}
+
+impl Format for &[u8] {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::LowerHex) => Ok(LowerHexDebugFormatter::format(self)),
+            Some(DisplayHint::UpperHex) => Ok(UpperHexDebugFormatter::format(self)),
+            _ => Err(()),
+        }
+    }
+}
+
+impl Format for u32 {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)),
+            Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)),
+            Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)),
+            Some(DisplayHint::Ip) => Ok(Ipv4Formatter::format(*self)),
+            Some(DisplayHint::LowerMac) => Err(()),
+            Some(DisplayHint::UpperMac) => Err(()),
+            _ => Ok(DefaultFormatter::format(self)),
+        }
+    }
+}
+
+impl Format for Ipv4Addr {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Ok(Ipv4Formatter::format(*self)),
+            Some(DisplayHint::LowerHex) => Err(()),
+            Some(DisplayHint::UpperHex) => Err(()),
+            Some(DisplayHint::Ip) => Ok(Ipv4Formatter::format(*self)),
+            Some(DisplayHint::LowerMac) => Err(()),
+            Some(DisplayHint::UpperMac) => Err(()),
+            None => Ok(Ipv4Formatter::format(*self)),
+        }
+    }
+}
+
+impl Format for Ipv6Addr {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Ok(Ipv6Formatter::format(*self)),
+            Some(DisplayHint::LowerHex) => Err(()),
+            Some(DisplayHint::UpperHex) => Err(()),
+            Some(DisplayHint::Ip) => Ok(Ipv6Formatter::format(*self)),
+            Some(DisplayHint::LowerMac) => Err(()),
+            Some(DisplayHint::UpperMac) => Err(()),
+            None => Ok(Ipv6Formatter::format(*self)),
+        }
+    }
+}
+
+impl Format for [u8; 4] {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Ok(Ipv4Formatter::format(*self)),
+            Some(DisplayHint::LowerHex) => Err(()),
+            Some(DisplayHint::UpperHex) => Err(()),
+            Some(DisplayHint::Ip) => Ok(Ipv4Formatter::format(*self)),
+            Some(DisplayHint::LowerMac) => Err(()),
+            Some(DisplayHint::UpperMac) => Err(()),
+            None => Ok(Ipv4Formatter::format(*self)),
+        }
+    }
+}
+
+impl Format for [u8; 6] {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Err(()),
+            Some(DisplayHint::LowerHex) => Err(()),
+            Some(DisplayHint::UpperHex) => Err(()),
+            Some(DisplayHint::Ip) => Err(()),
+            Some(DisplayHint::LowerMac) => Ok(LowerMacFormatter::format(*self)),
+            Some(DisplayHint::UpperMac) => Ok(UpperMacFormatter::format(*self)),
+            _ => Err(()),
+        }
+    }
+}
+
+impl Format for [u8; 16] {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Err(()),
+            Some(DisplayHint::LowerHex) => Err(()),
+            Some(DisplayHint::UpperHex) => Err(()),
+            Some(DisplayHint::Ip) => Ok(Ipv6Formatter::format(*self)),
+            Some(DisplayHint::LowerMac) => Err(()),
+            Some(DisplayHint::UpperMac) => Err(()),
+            _ => Err(()),
+        }
+    }
+}
+
+impl Format for [u16; 8] {
+    fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+        match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+            Some(DisplayHint::Default) => Err(()),
+            Some(DisplayHint::LowerHex) => Err(()),
+            Some(DisplayHint::UpperHex) => Err(()),
+            Some(DisplayHint::Ip) => Ok(Ipv6Formatter::format(*self)),
+            Some(DisplayHint::LowerMac) => Err(()),
+            Some(DisplayHint::UpperMac) => Err(()),
+            _ => Err(()),
+        }
+    }
+}
+
+macro_rules! impl_format {
+    ($type:ident) => {
+        impl Format for $type {
+            fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+                match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+                    Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)),
+                    Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)),
+                    Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)),
+                    Some(DisplayHint::Ip) => Err(()),
+                    Some(DisplayHint::LowerMac) => Err(()),
+                    Some(DisplayHint::UpperMac) => Err(()),
+                    _ => Ok(DefaultFormatter::format(self)),
+                }
+            }
+        }
+    };
+}
+
+impl_format!(i8);
+impl_format!(i16);
+impl_format!(i32);
+impl_format!(i64);
+impl_format!(isize);
+
+impl_format!(u8);
+impl_format!(u16);
+impl_format!(u64);
+impl_format!(usize);
+
+macro_rules! impl_format_float {
+    ($type:ident) => {
+        impl Format for $type {
+            fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
+                match last_hint.map(|DisplayHintWrapper(dh)| dh) {
+                    Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)),
+                    Some(DisplayHint::LowerHex) => Err(()),
+                    Some(DisplayHint::UpperHex) => Err(()),
+                    Some(DisplayHint::Ip) => Err(()),
+                    Some(DisplayHint::LowerMac) => Err(()),
+                    Some(DisplayHint::UpperMac) => Err(()),
+                    _ => Ok(DefaultFormatter::format(self)),
+                }
+            }
+        }
+    };
+}
+
+impl_format_float!(f32);
+impl_format_float!(f64);
+
+#[derive(Error, Debug)]
+pub enum Error {
+    #[error("log event array {} doesn't exist", MAP_NAME)]
+    MapNotFound,
+
+    #[error("error opening log event array")]
+    MapError(#[from] MapError),
+
+    #[error("error opening log buffer")]
+    PerfBufferError(#[from] PerfBufferError),
+
+    #[error("invalid /sys/devices/system/cpu/online format")]
+    InvalidOnlineCpu(#[source] io::Error),
+
+    #[error("program not found")]
+    ProgramNotFound,
+
+    #[error(transparent)]
+    ProgramError(#[from] ProgramError),
+}
+
+#[allow(clippy::result_unit_err)]
+pub fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> {
+    let mut target = None;
+    let mut level = None;
+    let mut module = None;
+    let mut file = None;
+    let mut line = None;
+    let mut num_args = None;
+
+    for _ in 0..LOG_FIELDS {
+        let (RecordFieldWrapper(tag), value, rest) = try_read(buf)?;
+
+        match tag {
+            RecordField::Target => {
+                target = Some(str::from_utf8(value).map_err(|_| ())?);
+            }
+            RecordField::Level => {
+                level = Some({
+                    let level = unsafe { ptr::read_unaligned(value.as_ptr() as *const _) };
+                    match level {
+                        Level::Error => log::Level::Error,
+                        Level::Warn => log::Level::Warn,
+                        Level::Info => log::Level::Info,
+                        Level::Debug => log::Level::Debug,
+                        Level::Trace => log::Level::Trace,
+                    }
+                })
+            }
+            RecordField::Module => {
+                module = Some(str::from_utf8(value).map_err(|_| ())?);
+            }
+            RecordField::File => {
+                file = Some(str::from_utf8(value).map_err(|_| ())?);
+            }
+            RecordField::Line => {
+                line = Some(u32::from_ne_bytes(value.try_into().map_err(|_| ())?));
+            }
+            RecordField::NumArgs => {
+                num_args = Some(usize::from_ne_bytes(value.try_into().map_err(|_| ())?));
+            }
+        }
+
+        buf = rest;
+    }
+
+    let mut full_log_msg = String::new();
+    let mut last_hint: Option<DisplayHintWrapper> = None;
+    for _ in 0..num_args.ok_or(())? {
+        let (ArgumentWrapper(tag), value, rest) = try_read(buf)?;
+
+        match tag {
+            Argument::DisplayHint => {
+                last_hint = Some(unsafe { ptr::read_unaligned(value.as_ptr() as *const _) });
+            }
+            Argument::I8 => {
+                full_log_msg.push_str(
+                    &i8::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::I16 => {
+                full_log_msg.push_str(
+                    &i16::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::I32 => {
+                full_log_msg.push_str(
+                    &i32::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::I64 => {
+                full_log_msg.push_str(
+                    &i64::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::Isize => {
+                full_log_msg.push_str(
+                    &isize::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::U8 => {
+                full_log_msg.push_str(
+                    &u8::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::U16 => {
+                full_log_msg.push_str(
+                    &u16::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::U32 => {
+                full_log_msg.push_str(
+                    &u32::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::U64 => {
+                full_log_msg.push_str(
+                    &u64::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::Usize => {
+                full_log_msg.push_str(
+                    &usize::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::F32 => {
+                full_log_msg.push_str(
+                    &f32::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::F64 => {
+                full_log_msg.push_str(
+                    &f64::from_ne_bytes(value.try_into().map_err(|_| ())?)
+                        .format(last_hint.take())?,
+                );
+            }
+            Argument::Ipv4Addr => {
+                let value: [u8; 4] = value.try_into().map_err(|_| ())?;
+                let value = Ipv4Addr::from(value);
+                full_log_msg.push_str(&value.format(last_hint.take())?)
+            }
+            Argument::Ipv6Addr => {
+                let value: [u8; 16] = value.try_into().map_err(|_| ())?;
+                let value = Ipv6Addr::from(value);
+                full_log_msg.push_str(&value.format(last_hint.take())?)
+            }
+            Argument::ArrU8Len4 => {
+                let value: [u8; 4] = value.try_into().map_err(|_| ())?;
+                full_log_msg.push_str(&value.format(last_hint.take())?);
+            }
+            Argument::ArrU8Len6 => {
+                let value: [u8; 6] = value.try_into().map_err(|_| ())?;
+                full_log_msg.push_str(&value.format(last_hint.take())?);
+            }
+            Argument::ArrU8Len16 => {
+                let value: [u8; 16] = value.try_into().map_err(|_| ())?;
+                full_log_msg.push_str(&value.format(last_hint.take())?);
+            }
+            Argument::ArrU16Len8 => {
+                let data: [u8; 16] = value.try_into().map_err(|_| ())?;
+                let mut value: [u16; 8] = Default::default();
+                for (i, s) in data.chunks_exact(2).enumerate() {
+                    value[i] = ((s[1] as u16) << 8) | s[0] as u16;
+                }
+                full_log_msg.push_str(&value.format(last_hint.take())?);
+            }
+            Argument::Bytes => {
+                full_log_msg.push_str(&value.format(last_hint.take())?);
+            }
+            Argument::Str => match str::from_utf8(value) {
+                Ok(v) => {
+                    full_log_msg.push_str(v);
+                }
+                Err(e) => error!("received invalid utf8 string: {}", e),
+            },
+        }
+
+        buf = rest;
+    }
+
+    logger.log(
+        &Record::builder()
+            .args(format_args!("{full_log_msg}"))
+            .target(target.ok_or(())?)
+            .level(level.ok_or(())?)
+            .module_path(module)
+            .file(file)
+            .line(line)
+            .build(),
+    );
+    logger.flush();
+    Ok(())
+}
+
+fn try_read<T: Pod>(mut buf: &[u8]) -> Result<(T, &[u8], &[u8]), ()> {
+    if buf.len() < mem::size_of::<T>() + mem::size_of::<LogValueLength>() {
+        return Err(());
+    }
+
+    let tag = unsafe { ptr::read_unaligned(buf.as_ptr() as *const T) };
+    buf = &buf[mem::size_of::<T>()..];
+
+    let len =
+        LogValueLength::from_ne_bytes(buf[..mem::size_of::<LogValueLength>()].try_into().unwrap());
+    buf = &buf[mem::size_of::<LogValueLength>()..];
+
+    let len: usize = len.into();
+    if buf.len() < len {
+        return Err(());
+    }
+
+    let (value, rest) = buf.split_at(len);
+    Ok((tag, value, rest))
+}
+
+#[cfg(test)]
+mod test {
+    use std::net::IpAddr;
+
+    use aya_log_common::{write_record_header, WriteToBuf};
+    use log::{logger, Level};
+
+    use super::*;
+
+    fn new_log(args: usize) -> Option<(usize, Vec<u8>)> {
+        let mut buf = vec![0; 8192];
+        let len = write_record_header(
+            &mut buf,
+            "test",
+            aya_log_common::Level::Info,
+            "test",
+            "test.rs",
+            123,
+            args,
+        )?;
+        Some((len.get(), buf))
+    }
+
+    #[test]
+    fn test_str() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(1).unwrap();
+
+        len += "test".write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "test");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_str_with_args() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(2).unwrap();
+
+        len += "hello ".write(&mut input[len..]).unwrap().get();
+        len += "test".write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "hello test");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_bytes() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(2).unwrap();
+
+        len += DisplayHint::LowerHex
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        len += [0xde, 0xad].write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "dead");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_bytes_with_args() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(5).unwrap();
+
+        len += DisplayHint::LowerHex
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        len += [0xde, 0xad].write(&mut input[len..]).unwrap().get();
+
+        len += " ".write(&mut input[len..]).unwrap().get();
+
+        len += DisplayHint::UpperHex
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        len += [0xbe, 0xef].write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "dead BEEF");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_default() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "default hint: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Default.write(&mut input[len..]).unwrap().get();
+        len += 14.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "default hint: 14");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_lower_hex() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "lower hex: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::LowerHex
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        len += 200.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "lower hex: c8");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_upper_hex() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "upper hex: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::UpperHex
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        len += 200.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "upper hex: C8");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ipv4() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv4: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        len += Ipv4Addr::new(10, 0, 0, 1)
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv4: 10.0.0.1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ip_ipv4() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv4: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        len += IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv4: 10.0.0.1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ipv4_u32() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv4: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        // 10.0.0.1 as u32
+        len += 167772161u32.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv4: 10.0.0.1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ipv6() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv6: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        len += Ipv6Addr::new(
+            0x2001, 0x0db8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001,
+        )
+        .write(&mut input[len..])
+        .unwrap()
+        .get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ip_ipv6() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv6: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        len += IpAddr::V6(Ipv6Addr::new(
+            0x2001, 0x0db8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001,
+        ))
+        .write(&mut input[len..])
+        .unwrap()
+        .get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ipv6_arr_u8_len_16() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv6: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        // 2001:db8::1:1 as byte array
+        let ipv6_arr: [u8; 16] = [
+            0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+            0x00, 0x01,
+        ];
+        len += ipv6_arr.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_ipv6_arr_u16_len_8() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "ipv6: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::Ip.write(&mut input[len..]).unwrap().get();
+        // 2001:db8::1:1 as u16 array
+        let ipv6_arr: [u16; 8] = [
+            0x2001, 0x0db8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001,
+        ];
+        len += ipv6_arr.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_lower_mac() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "mac: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::LowerMac
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        // 00:00:5e:00:53:af as byte array
+        let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf];
+        len += mac_arr.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "mac: 00:00:5e:00:53:af");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+
+    #[test]
+    fn test_display_hint_upper_mac() {
+        testing_logger::setup();
+        let (mut len, mut input) = new_log(3).unwrap();
+
+        len += "mac: ".write(&mut input[len..]).unwrap().get();
+        len += DisplayHint::UpperMac
+            .write(&mut input[len..])
+            .unwrap()
+            .get();
+        // 00:00:5E:00:53:AF as byte array
+        let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf];
+        len += mac_arr.write(&mut input[len..]).unwrap().get();
+
+        _ = len;
+
+        let logger = logger();
+        let () = log_buf(&input, logger).unwrap();
+        testing_logger::validate(|captured_logs| {
+            assert_eq!(captured_logs.len(), 1);
+            assert_eq!(captured_logs[0].body, "mac: 00:00:5E:00:53:AF");
+            assert_eq!(captured_logs[0].level, Level::Info);
+        });
+    }
+}

+ 21 - 0
aya/Cargo.toml

@@ -0,0 +1,21 @@
+[package]
+name = "aya"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+thiserror = { workspace = true }
+aya-obj = { workspace = true }
+bitflags = { workspace = true }
+log = { workspace = true }
+lazy_static = { version = "1", default-features = false }
+object = { workspace = true, features = ["elf", "read_core", "std", "write"] }
+bytes = { workspace = true }
+assert_matches = { workspace = true }
+tokio = { workspace = true, features = ["rt"], optional = true }
+async-io = { workspace = true, optional = true }
+libc = { workspace = true }
+[features]
+default = ["async_tokio"]
+async_tokio = ["tokio/net"]
+async_std = ["dep:async-io"]

+ 968 - 0
aya/src/bpf.rs

@@ -0,0 +1,968 @@
+use core::{ffi::c_int, mem, slice};
+use std::{
+    borrow::{Cow, ToOwned},
+    collections::{HashMap, HashSet},
+    fs, io,
+    os::fd::{AsFd, AsRawFd, OwnedFd},
+    path::{Path, PathBuf},
+    string::String,
+    sync::Arc,
+    vec::Vec,
+};
+
+use aya_obj::{
+    btf::{Btf, BtfError, BtfFeatures, BtfRelocationError},
+    generated::{bpf_map_type::*, *},
+    maps::PinningType,
+    relocation::EbpfRelocationError,
+    EbpfSectionKind, Features, Object, ParseError, ProgramSection,
+};
+use log::{debug, warn};
+use thiserror::Error;
+
+use crate::{
+    maps::{Map, MapData, MapError},
+    programs::{
+        extension::Extension, kprobe::KProbe, probe::ProbeKind, Program, ProgramData, ProgramError,
+    },
+    sys::*,
+    util::{possible_cpus, POSSIBLE_CPUS},
+};
+
+pub(crate) const BPF_OBJ_NAME_LEN: usize = 16;
+
+pub(crate) const PERF_EVENT_IOC_ENABLE: c_int = AYA_PERF_EVENT_IOC_ENABLE;
+pub(crate) const PERF_EVENT_IOC_DISABLE: c_int = AYA_PERF_EVENT_IOC_DISABLE;
+pub(crate) const PERF_EVENT_IOC_SET_BPF: c_int = AYA_PERF_EVENT_IOC_SET_BPF;
+
+lazy_static::lazy_static! {
+    pub(crate) static ref FEATURES: Features = detect_features();
+}
+
+fn detect_features() -> Features {
+    let btf = if is_btf_supported() {
+        Some(BtfFeatures::new(
+            is_btf_func_supported(),
+            is_btf_func_global_supported(),
+            is_btf_datasec_supported(),
+            is_btf_float_supported(),
+            is_btf_decl_tag_supported(),
+            is_btf_type_tag_supported(),
+            is_btf_enum64_supported(),
+        ))
+    } else {
+        None
+    };
+    let f = Features::new(
+        is_prog_name_supported(),
+        is_probe_read_kernel_supported(), // todo! kernel should support helper probe_read_kernel
+        false,
+        is_bpf_global_data_supported(),
+        is_bpf_cookie_supported(), // todo! kernel should support helper bpf_get_attach_cookie
+        is_prog_id_supported(BPF_MAP_TYPE_CPUMAP),
+        is_prog_id_supported(BPF_MAP_TYPE_DEVMAP),
+        btf,
+    );
+    info!("BPF Feature Detection: {:#?}", f);
+    f
+}
+
+/// Returns a reference to the detected BPF features.
+pub fn features() -> &'static Features {
+    &FEATURES
+}
+
+/// Builder style API for advanced loading of eBPF programs.
+///
+/// Loading eBPF code involves a few steps, including loading maps and applying
+/// relocations. You can use `EbpfLoader` to customize some of the loading
+/// options.
+///
+/// # Examples
+///
+/// ```no_run
+/// use aya::{EbpfLoader, Btf};
+/// use std::fs;
+///
+/// let bpf = EbpfLoader::new()
+///     // load the BTF data from /sys/kernel/btf/vmlinux
+///     .btf(Btf::from_sys_fs().ok().as_ref())
+///     // load pinned maps from /sys/fs/bpf/my-program
+///     .map_pin_path("/sys/fs/bpf/my-program")
+///     // finally load the code
+///     .load_file("file.o")?;
+/// # Ok::<(), aya::EbpfError>(())
+/// ```
+#[derive(Debug)]
+pub struct EbpfLoader<'a> {
+    btf: Option<Cow<'a, Btf>>,
+    map_pin_path: Option<PathBuf>,
+    globals: std::collections::HashMap<&'a str, (&'a [u8], bool)>,
+    max_entries: HashMap<&'a str, u32>,
+    extensions: HashSet<&'a str>,
+    verifier_log_level: VerifierLogLevel,
+    allow_unsupported_maps: bool,
+}
+
+bitflags::bitflags! {
+    /// Used to set the verifier log level flags in [EbpfLoader](EbpfLoader::verifier_log_level()).
+    #[derive(Clone, Copy, Debug)]
+    pub struct VerifierLogLevel: u32 {
+        /// Sets no verifier logging.
+        const DISABLE = 0;
+        /// Enables debug verifier logging.
+        const DEBUG = 1;
+        /// Enables verbose verifier logging.
+        const VERBOSE = 2 | Self::DEBUG.bits();
+        /// Enables verifier stats.
+        const STATS = 4;
+    }
+}
+
+impl Default for VerifierLogLevel {
+    fn default() -> Self {
+        Self::DEBUG | Self::STATS
+    }
+}
+
+impl<'a> Default for EbpfLoader<'a> {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl<'a> EbpfLoader<'a> {
+    /// Creates a new loader instance.
+    pub fn new() -> Self {
+        Self {
+            btf: None,
+            map_pin_path: None,
+            globals: std::collections::HashMap::new(),
+            max_entries: HashMap::new(),
+            extensions: HashSet::new(),
+            verifier_log_level: VerifierLogLevel::default(),
+            allow_unsupported_maps: false,
+        }
+    }
+    /// Sets the target [BTF](Btf) info.
+    ///
+    /// The loader defaults to loading `BTF` info using [Btf::from_sys_fs].
+    /// Use this method if you want to load `BTF` from a custom location or
+    /// pass `None` to disable `BTF` relocations entirely.
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::{EbpfLoader, Btf, Endianness};
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     // load the BTF data from a custom location
+    ///     .btf(Btf::parse_file("/custom_btf_file", Endianness::default()).ok().as_ref())
+    ///     .load_file("file.o")?;
+    ///
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn btf(&mut self, btf: Option<&'a Btf>) -> &mut Self {
+        self.btf = btf.map(Cow::Borrowed);
+        self
+    }
+
+    /// Allows programs containing unsupported maps to be loaded.
+    ///
+    /// By default programs containing unsupported maps will fail to load. This
+    /// method can be used to configure the loader so that unsupported maps will
+    /// be loaded, but won't be accessible from userspace. Can be useful when
+    /// using unsupported maps that are only accessed from eBPF code and don't
+    /// require any userspace interaction.
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     .allow_unsupported_maps()
+    ///     .load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    ///
+    pub fn allow_unsupported_maps(&mut self) -> &mut Self {
+        self.allow_unsupported_maps = true;
+        self
+    }
+    /// Sets the base directory path for pinned maps.
+    ///
+    /// Pinned maps will be loaded from `path/MAP_NAME`.
+    /// The caller is responsible for ensuring the directory exists.
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     .map_pin_path("/sys/fs/bpf/my-program")
+    ///     .load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    ///
+    pub fn map_pin_path<P: AsRef<Path>>(&mut self, path: P) -> &mut Self {
+        self.map_pin_path = Some(path.as_ref().to_owned());
+        self
+    }
+    /// Sets the value of a global variable.
+    ///
+    /// If the `must_exist` argument is `true`, [`EbpfLoader::load`] will fail with [`ParseError::SymbolNotFound`] if the loaded object code does not contain the variable.
+    ///
+    /// From Rust eBPF, a global variable can be defined as follows:
+    ///
+    /// ```no_run
+    /// #[no_mangle]
+    /// static VERSION: i32 = 0;
+    /// ```
+    ///
+    /// Then it can be accessed using `core::ptr::read_volatile`:
+    ///
+    /// ```no_run
+    /// # #[no_mangle]
+    /// # static VERSION: i32 = 0;
+    /// # unsafe fn try_test() {
+    /// let version = core::ptr::read_volatile(&VERSION);
+    /// # }
+    /// ```
+    ///
+    /// The type of a global variable must be `Pod` (plain old data), for instance `u8`, `u32` and
+    /// all other primitive types. You may use custom types as well, but you must ensure that those
+    /// types are `#[repr(C)]` and only contain other `Pod` types.
+    ///
+    /// From C eBPF, you would annotate a global variable as `volatile const`.
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     .set_global("VERSION", &2, true)
+    ///     .set_global("PIDS", &[1234u16, 5678], true)
+    ///     .load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    ///
+    pub fn set_global<T: Into<GlobalData<'a>>>(
+        &mut self,
+        name: &'a str,
+        value: T,
+        must_exist: bool,
+    ) -> &mut Self {
+        self.globals.insert(name, (value.into().bytes, must_exist));
+        self
+    }
+
+    /// Set the max_entries for specified map.
+    ///
+    /// Overwrite the value of max_entries of the map that matches
+    /// the provided name before the map is created.
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     .set_max_entries("map", 64)
+    ///     .load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    ///
+    pub fn set_max_entries(&mut self, name: &'a str, size: u32) -> &mut Self {
+        self.max_entries.insert(name, size);
+        self
+    }
+
+    /// Treat the provided program as an [`Extension`]
+    ///
+    /// When attempting to load the program with the provided `name`
+    /// the program type is forced to be ] [`Extension`] and is not
+    /// inferred from the ELF section name.
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     .extension("myfunc")
+    ///     .load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    ///
+    pub fn extension(&mut self, name: &'a str) -> &mut Self {
+        self.extensions.insert(name);
+        self
+    }
+
+    /// Sets BPF verifier log level.
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// use aya::{EbpfLoader, VerifierLogLevel};
+    ///
+    /// let bpf = EbpfLoader::new()
+    ///     .verifier_log_level(VerifierLogLevel::VERBOSE | VerifierLogLevel::STATS)
+    ///     .load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    ///
+    pub fn verifier_log_level(&mut self, level: VerifierLogLevel) -> &mut Self {
+        self.verifier_log_level = level;
+        self
+    }
+
+    /// Loads eBPF bytecode from a file.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    ///
+    /// let bpf = EbpfLoader::new().load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Ebpf, EbpfError> {
+        let path = path.as_ref();
+        self.load(&fs::read(path).map_err(|error| EbpfError::FileError {
+            path: path.to_owned(),
+            error,
+        })?)
+    }
+
+    /// Loads eBPF bytecode from a buffer.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use aya::EbpfLoader;
+    /// use std::fs;
+    ///
+    /// let data = fs::read("file.o").unwrap();
+    /// let bpf = EbpfLoader::new().load(&data)?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn load(&mut self, data: &[u8]) -> Result<Ebpf, EbpfError> {
+        let Self {
+            btf,
+            map_pin_path: _,
+            globals,
+            max_entries,
+            extensions,
+            verifier_log_level,
+            allow_unsupported_maps,
+        } = self;
+        let mut obj = Object::parse(data)?;
+        obj.patch_map_data(globals.clone())?;
+        let btf_fd = if let Some(features) = &FEATURES.btf() {
+            if let Some(btf) = obj.fixup_and_sanitize_btf(features)? {
+                match load_btf(btf.to_bytes(), *verifier_log_level) {
+                    Ok(btf_fd) => Some(Arc::new(btf_fd)),
+                    // Only report an error here if the BTF is truly needed, otherwise proceed without.
+                    Err(err) => {
+                        for program in obj.programs.values() {
+                            match program.section {
+                                ProgramSection::Extension
+                                | ProgramSection::FEntry { sleepable: _ }
+                                | ProgramSection::FExit { sleepable: _ }
+                                | ProgramSection::Lsm { sleepable: _ }
+                                | ProgramSection::BtfTracePoint => {
+                                    return Err(EbpfError::BtfError(err))
+                                }
+                                ProgramSection::KRetProbe
+                                | ProgramSection::KProbe
+                                | ProgramSection::UProbe { sleepable: _ }
+                                | ProgramSection::URetProbe { sleepable: _ }
+                                | ProgramSection::TracePoint
+                                | ProgramSection::SocketFilter
+                                | ProgramSection::Xdp {
+                                    frags: _,
+                                    attach_type: _,
+                                }
+                                | ProgramSection::SkMsg
+                                | ProgramSection::SkSkbStreamParser
+                                | ProgramSection::SkSkbStreamVerdict
+                                | ProgramSection::SockOps
+                                | ProgramSection::SchedClassifier
+                                | ProgramSection::CgroupSkb
+                                | ProgramSection::CgroupSkbIngress
+                                | ProgramSection::CgroupSkbEgress
+                                | ProgramSection::CgroupSockAddr { attach_type: _ }
+                                | ProgramSection::CgroupSysctl
+                                | ProgramSection::CgroupSockopt { attach_type: _ }
+                                | ProgramSection::LircMode2
+                                | ProgramSection::PerfEvent
+                                | ProgramSection::RawTracePoint
+                                | ProgramSection::SkLookup
+                                | ProgramSection::CgroupSock { attach_type: _ }
+                                | ProgramSection::CgroupDevice => {}
+                            }
+                        }
+                        warn!("Object BTF couldn't be loaded in the kernel: {err}");
+                        None
+                    }
+                }
+            } else {
+                None
+            }
+        } else {
+            warn!("BTF is not supported in the kernel");
+            None
+        };
+
+        if let Some(btf) = &btf {
+            obj.relocate_btf(btf)?;
+        }
+        let mut maps = HashMap::new();
+
+        for (name, mut obj) in obj.maps.drain() {
+            if let (false, EbpfSectionKind::Bss | EbpfSectionKind::Data | EbpfSectionKind::Rodata) =
+                (FEATURES.bpf_global_data(), obj.section_kind())
+            {
+                continue;
+            }
+            let num_cpus = || -> Result<u32, EbpfError> {
+                Ok(possible_cpus()
+                    .map_err(|error| EbpfError::FileError {
+                        path: PathBuf::from(POSSIBLE_CPUS),
+                        error,
+                    })?
+                    .len() as u32)
+            };
+            let map_type: bpf_map_type = obj.map_type().try_into().map_err(MapError::from)?;
+
+            // if user provided a max_entries override, use that, otherwise use the value from the object
+            if let Some(max_entries) = max_entries_override(
+                map_type,
+                max_entries.get(name.as_str()).copied(),
+                || obj.max_entries(),
+                num_cpus,
+                || page_size() as u32,
+            )? {
+                debug!("Overriding max_entries for map {name} to {max_entries}");
+                obj.set_max_entries(max_entries)
+            }
+            match obj.map_type().try_into() {
+                Ok(BPF_MAP_TYPE_CPUMAP) => {
+                    obj.set_value_size(if FEATURES.cpumap_prog_id() { 8 } else { 4 })
+                }
+                Ok(BPF_MAP_TYPE_DEVMAP | BPF_MAP_TYPE_DEVMAP_HASH) => {
+                    obj.set_value_size(if FEATURES.devmap_prog_id() { 8 } else { 4 })
+                }
+                _ => (),
+            }
+            let btf_fd = btf_fd.as_deref().map(|fd| fd.as_fd());
+            let mut map = match obj.pinning() {
+                PinningType::None => MapData::create(obj, &name, btf_fd)?,
+                PinningType::ByName => {
+                    // pin maps in /sys/fs/bpf by default to align with libbpf
+                    // behavior https://github.com/libbpf/libbpf/blob/v1.2.2/src/libbpf.c#L2161.
+                    // let path = map_pin_path
+                    //     .as_deref()
+                    //     .unwrap_or_else(|| Path::new("/sys/fs/bpf"));
+                    //
+                    // MapData::create_pinned_by_name(path, obj, &name, btf_fd)?
+                    unimplemented!(
+                        "pin maps in /sys/fs/bpf by default to align with libbpf behavior"
+                    );
+                }
+            };
+            map.finalize()?;
+            maps.insert(name, map);
+        }
+        let text_sections = obj
+            .functions
+            .keys()
+            .map(|(section_index, _)| *section_index)
+            .collect();
+
+        maps.iter()
+            .map(|(s, data)| (s.as_str(), data.fd().as_fd().as_raw_fd(), data.obj()))
+            .for_each(|(s, fd, obj)| {
+                let x = obj.section_index();
+                info!("section {s} fd {fd} section_index {x}");
+            });
+
+        obj.relocate_maps(
+            maps.iter()
+                .map(|(s, data)| (s.as_str(), data.fd().as_fd().as_raw_fd() as _, data.obj())),
+            &text_sections,
+        )?;
+
+        obj.relocate_calls(&text_sections)?;
+        obj.sanitize_functions(&FEATURES);
+
+        let programs = obj
+            .programs
+            .drain()
+            .map(|(name, prog_obj)| {
+                let function_obj = obj.functions.get(&prog_obj.function_key()).unwrap().clone();
+
+                let prog_name = if FEATURES.bpf_name() {
+                    Some(name.clone())
+                } else {
+                    None
+                };
+                let section = prog_obj.section.clone();
+                let obj = (prog_obj, function_obj);
+
+                let btf_fd = btf_fd.clone();
+                let program = if extensions.contains(name.as_str()) {
+                    Program::Extension(Extension {
+                        data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
+                    })
+                } else {
+                    match &section {
+                        ProgramSection::KProbe => Program::KProbe(KProbe {
+                            data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
+                            kind: ProbeKind::KProbe,
+                        }),
+                        _ => {
+                            unimplemented!()
+                        }
+                    }
+                };
+                (name, program)
+            })
+            .collect();
+
+        let maps = maps
+            .drain()
+            .map(parse_map)
+            .collect::<Result<HashMap<String, Map>, EbpfError>>()?;
+        if !*allow_unsupported_maps {
+            maps.iter().try_for_each(|(_, x)| match x {
+                Map::Unsupported(map) => Err(EbpfError::MapError(MapError::Unsupported {
+                    map_type: map.obj().map_type(),
+                })),
+                _ => Ok(()),
+            })?;
+        };
+        Ok(Ebpf { maps, programs })
+    }
+}
+
+fn parse_map(data: (String, MapData)) -> Result<(String, Map), EbpfError> {
+    let (name, map) = data;
+    let map_type = bpf_map_type::try_from(map.obj().map_type()).map_err(MapError::from)?;
+    let map = match map_type {
+        BPF_MAP_TYPE_ARRAY => Map::Array(map),
+        BPF_MAP_TYPE_PERCPU_ARRAY => Map::PerCpuArray(map),
+        BPF_MAP_TYPE_PROG_ARRAY => Map::ProgramArray(map),
+        BPF_MAP_TYPE_HASH => Map::HashMap(map),
+        BPF_MAP_TYPE_LRU_HASH => Map::LruHashMap(map),
+        BPF_MAP_TYPE_PERCPU_HASH => Map::PerCpuHashMap(map),
+        BPF_MAP_TYPE_LRU_PERCPU_HASH => Map::PerCpuLruHashMap(map),
+        BPF_MAP_TYPE_PERF_EVENT_ARRAY => Map::PerfEventArray(map),
+        BPF_MAP_TYPE_RINGBUF => Map::RingBuf(map),
+        BPF_MAP_TYPE_SOCKHASH => Map::SockHash(map),
+        BPF_MAP_TYPE_SOCKMAP => Map::SockMap(map),
+        BPF_MAP_TYPE_BLOOM_FILTER => Map::BloomFilter(map),
+        BPF_MAP_TYPE_LPM_TRIE => Map::LpmTrie(map),
+        BPF_MAP_TYPE_STACK => Map::Stack(map),
+        BPF_MAP_TYPE_STACK_TRACE => Map::StackTraceMap(map),
+        BPF_MAP_TYPE_QUEUE => Map::Queue(map),
+        BPF_MAP_TYPE_CPUMAP => Map::CpuMap(map),
+        BPF_MAP_TYPE_DEVMAP => Map::DevMap(map),
+        BPF_MAP_TYPE_DEVMAP_HASH => Map::DevMapHash(map),
+        BPF_MAP_TYPE_XSKMAP => Map::XskMap(map),
+        m => {
+            warn!("The map {name} is of type {:#?} which is currently unsupported in Aya, use `allow_unsupported_maps()` to load it anyways", m);
+            Map::Unsupported(map)
+        }
+    };
+
+    Ok((name, map))
+}
+
+/// Computes the value which should be used to override the max_entries value of the map
+/// based on the user-provided override and the rules for that map type.
+fn max_entries_override(
+    map_type: bpf_map_type,
+    user_override: Option<u32>,
+    current_value: impl Fn() -> u32,
+    num_cpus: impl Fn() -> Result<u32, EbpfError>,
+    page_size: impl Fn() -> u32,
+) -> Result<Option<u32>, EbpfError> {
+    let max_entries = || user_override.unwrap_or_else(&current_value);
+    Ok(match map_type {
+        BPF_MAP_TYPE_PERF_EVENT_ARRAY if max_entries() == 0 => Some(num_cpus()?),
+        BPF_MAP_TYPE_RINGBUF => Some(adjust_to_page_size(max_entries(), page_size()))
+            .filter(|adjusted| *adjusted != max_entries())
+            .or(user_override),
+        _ => user_override,
+    })
+}
+
+// Adjusts the byte size of a RingBuf map to match a power-of-two multiple of the page size.
+//
+// This mirrors the logic used by libbpf.
+// See https://github.com/libbpf/libbpf/blob/ec6f716eda43/src/libbpf.c#L2461-L2463
+fn adjust_to_page_size(byte_size: u32, page_size: u32) -> u32 {
+    // If the byte_size is zero, return zero and let the verifier reject the map
+    // when it is loaded. This is the behavior of libbpf.
+    if byte_size == 0 {
+        return 0;
+    }
+    // TODO: Replace with primitive method when int_roundings (https://github.com/rust-lang/rust/issues/88581)
+    // is stabilized.
+    fn div_ceil(n: u32, rhs: u32) -> u32 {
+        let d = n / rhs;
+        let r = n % rhs;
+        if r > 0 && rhs > 0 {
+            d + 1
+        } else {
+            d
+        }
+    }
+    let pages_needed = div_ceil(byte_size, page_size);
+    page_size * pages_needed.next_power_of_two()
+}
+
+/// Try loading the BTF data into the kernel.
+///
+/// The kernel will write error messages to the provided logger. User should provide enough capacity
+/// to store the error messages.
+fn load_btf(raw_btf: Vec<u8>, verifier_log_level: VerifierLogLevel) -> Result<OwnedFd, BtfError> {
+    let (ret, verifier_log) = retry_with_verifier_logs(10, |logger| {
+        bpf_load_btf(raw_btf.as_slice(), logger, verifier_log_level)
+    });
+    ret.map_err(|(_, io_error)| BtfError::LoadError {
+        io_error,
+        verifier_log,
+    })
+}
+/// The main entry point into the library, used to work with eBPF programs and maps.
+#[derive(Debug)]
+pub struct Ebpf {
+    maps: HashMap<String, Map>,
+    programs: HashMap<String, Program>,
+}
+
+impl Ebpf {
+    /// Loads eBPF bytecode from a file.
+    ///
+    /// Parses the given object code file and initializes the [maps](crate::maps) defined in it. If
+    /// the kernel supports [BTF](Btf) debug info, it is automatically loaded from
+    /// `/sys/kernel/btf/vmlinux`.
+    ///
+    /// For more loading options, see [EbpfLoader].
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use aya::Ebpf;
+    ///
+    /// let bpf = Ebpf::load_file("file.o")?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn load_file<P: AsRef<str>>(path: P) -> Result<Self, EbpfError> {
+        // EbpfLoader::new()
+        // .btf(Btf::from_sys_fs().ok().as_ref())
+        // .load_file(path)
+        unimplemented!()
+    }
+
+    /// Loads eBPF bytecode from a buffer.
+    ///
+    /// Parses the object code contained in `data` and initializes the
+    /// [maps](crate::maps) defined in it. If the kernel supports [BTF](Btf)
+    /// debug info, it is automatically loaded from `/sys/kernel/btf/vmlinux`.
+    ///
+    /// For more loading options, see [EbpfLoader].
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use aya::{Ebpf, Btf};
+    /// use std::fs;
+    ///
+    /// let data = fs::read("file.o").unwrap();
+    /// // load the BTF data from /sys/kernel/btf/vmlinux
+    /// let bpf = Ebpf::load(&data)?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn load(data: &[u8]) -> Result<Self, EbpfError> {
+        EbpfLoader::new()
+            // .btf(Btf::from_sys_fs().ok().as_ref())
+            .load(data)
+    }
+    /// Returns a reference to the map with the given name.
+    ///
+    /// The returned type is mostly opaque. In order to do anything useful with it you need to
+    /// convert it to a [typed map](crate::maps).
+    ///
+    /// For more details and examples on maps and their usage, see the [maps module
+    /// documentation][crate::maps].
+    pub fn map(&self, name: &str) -> Option<&Map> {
+        self.maps.get(name)
+    }
+
+    /// Returns a mutable reference to the map with the given name.
+    ///
+    /// The returned type is mostly opaque. In order to do anything useful with it you need to
+    /// convert it to a [typed map](crate::maps).
+    ///
+    /// For more details and examples on maps and their usage, see the [maps module
+    /// documentation][crate::maps].
+    pub fn map_mut(&mut self, name: &str) -> Option<&mut Map> {
+        self.maps.get_mut(name)
+    }
+
+    /// Takes ownership of a map with the given name.
+    ///
+    /// Use this when borrowing with [`map`](crate::Ebpf::map) or [`map_mut`](crate::Ebpf::map_mut)
+    /// is not possible (eg when using the map from an async task). The returned
+    /// map will be closed on `Drop`, therefore the caller is responsible for
+    /// managing its lifetime.
+    ///
+    /// The returned type is mostly opaque. In order to do anything useful with it you need to
+    /// convert it to a [typed map](crate::maps).
+    ///
+    /// For more details and examples on maps and their usage, see the [maps module
+    /// documentation][crate::maps].
+    pub fn take_map(&mut self, name: &str) -> Option<Map> {
+        self.maps.remove(name)
+    }
+
+    /// An iterator over all the maps.
+    ///
+    /// # Examples
+    /// ```no_run
+    /// # let mut bpf = aya::Ebpf::load(&[])?;
+    /// for (name, map) in bpf.maps() {
+    ///     println!(
+    ///         "found map `{}`",
+    ///         name,
+    ///     );
+    /// }
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn maps(&self) -> impl Iterator<Item = (&str, &Map)> {
+        self.maps.iter().map(|(name, map)| (name.as_str(), map))
+    }
+
+    /// A mutable iterator over all the maps.
+    ///
+    /// # Examples
+    /// ```no_run
+    /// # use std::path::Path;
+    /// # #[derive(thiserror::Error, Debug)]
+    /// # enum Error {
+    /// #     #[error(transparent)]
+    /// #     Ebpf(#[from] aya::EbpfError),
+    /// #     #[error(transparent)]
+    /// #     Pin(#[from] aya::pin::PinError)
+    /// # }
+    /// # let mut bpf = aya::Ebpf::load(&[])?;
+    /// # let pin_path = Path::new("/tmp/pin_path");
+    /// for (_, map) in bpf.maps_mut() {
+    ///     map.pin(pin_path)?;
+    /// }
+    /// # Ok::<(), Error>(())
+    /// ```
+    pub fn maps_mut(&mut self) -> impl Iterator<Item = (&str, &mut Map)> {
+        self.maps.iter_mut().map(|(name, map)| (name.as_str(), map))
+    }
+    /// Returns a reference to the program with the given name.
+    ///
+    /// You can use this to inspect a program and its properties. To load and attach a program, use
+    /// [program_mut](Self::program_mut) instead.
+    ///
+    /// For more details on programs and their usage, see the [programs module
+    /// documentation](crate::programs).
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # let bpf = aya::Ebpf::load(&[])?;
+    /// let program = bpf.program("SSL_read").unwrap();
+    /// println!("program SSL_read is of type {:?}", program.prog_type());
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn program(&self, name: &str) -> Option<&Program> {
+        self.programs.get(name)
+    }
+
+    /// Returns a mutable reference to the program with the given name.
+    ///
+    /// Used to get a program before loading and attaching it. For more details on programs and
+    /// their usage, see the [programs module documentation](crate::programs).
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # let mut bpf = aya::Ebpf::load(&[])?;
+    /// use aya::programs::UProbe;
+    ///
+    /// let program: &mut UProbe = bpf.program_mut("SSL_read").unwrap().try_into()?;
+    /// program.load()?;
+    /// program.attach(Some("SSL_read"), 0, "libssl", None)?;
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn program_mut(&mut self, name: &str) -> Option<&mut Program> {
+        self.programs.get_mut(name)
+    }
+
+    /// An iterator over all the programs.
+    ///
+    /// # Examples
+    /// ```no_run
+    /// # let bpf = aya::Ebpf::load(&[])?;
+    /// for (name, program) in bpf.programs() {
+    ///     println!(
+    ///         "found program `{}` of type `{:?}`",
+    ///         name,
+    ///         program.prog_type()
+    ///     );
+    /// }
+    /// # Ok::<(), aya::EbpfError>(())
+    /// ```
+    pub fn programs(&self) -> impl Iterator<Item = (&str, &Program)> {
+        self.programs.iter().map(|(s, p)| (s.as_str(), p))
+    }
+
+    /// An iterator mutably referencing all of the programs.
+    ///
+    /// # Examples
+    /// ```no_run
+    /// # use std::path::Path;
+    /// # #[derive(thiserror::Error, Debug)]
+    /// # enum Error {
+    /// #     #[error(transparent)]
+    /// #     Ebpf(#[from] aya::EbpfError),
+    /// #     #[error(transparent)]
+    /// #     Pin(#[from] aya::pin::PinError)
+    /// # }
+    /// # let mut bpf = aya::Ebpf::load(&[])?;
+    /// # let pin_path = Path::new("/tmp/pin_path");
+    /// for (_, program) in bpf.programs_mut() {
+    ///     program.pin(pin_path)?;
+    /// }
+    /// # Ok::<(), Error>(())
+    /// ```
+    pub fn programs_mut(&mut self) -> impl Iterator<Item = (&str, &mut Program)> {
+        self.programs.iter_mut().map(|(s, p)| (s.as_str(), p))
+    }
+}
+
+/// The error type returned by [`Ebpf::load_file`] and [`Ebpf::load`].
+#[derive(Debug, Error)]
+pub enum EbpfError {
+    /// Error loading file
+    #[error("error loading {path}")]
+    FileError {
+        /// The file path
+        path: PathBuf,
+        #[source]
+        /// The original io::Error
+        error: io::Error,
+    },
+
+    /// Unexpected pinning type
+    #[error("unexpected pinning type {name}")]
+    UnexpectedPinningType {
+        /// The value encountered
+        name: u32,
+    },
+
+    /// Error parsing BPF object
+    #[error("error parsing BPF object: {0}")]
+    ParseError(#[from] ParseError),
+
+    /// Error parsing BTF object
+    #[error("BTF error: {0}")]
+    BtfError(#[from] BtfError),
+
+    /// Error performing relocations
+    #[error("error relocating function")]
+    RelocationError(#[from] EbpfRelocationError),
+
+    /// Error performing relocations
+    #[error("error relocating section")]
+    BtfRelocationError(#[from] BtfRelocationError),
+
+    /// No BTF parsed for object
+    #[error("no BTF parsed for object")]
+    NoBTF,
+
+    #[error("map error: {0}")]
+    /// A map error
+    MapError(#[from] MapError),
+
+    #[error("program error: {0}")]
+    /// A program error
+    ProgramError(#[from] ProgramError),
+}
+
+/// Marker trait for types that can safely be converted to and from byte slices.
+///
+/// # Safety
+/// This trait is unsafe because it is up to the implementor to ensure that the type is safe to
+/// convert to and from a byte slice. The implementor must ensure that the type is a valid
+/// representation of the data in the byte slice.
+pub unsafe trait Pod: Copy + 'static {}
+
+macro_rules! unsafe_impl_pod {
+    ($($struct_name:ident),+ $(,)?) => {
+        $(
+            unsafe impl Pod for $struct_name { }
+        )+
+    }
+}
+
+unsafe_impl_pod!(i8, u8, i16, u16, i32, u32, i64, u64, u128, i128);
+
+// It only makes sense that an array of POD types is itself POD
+unsafe impl<T: Pod, const N: usize> Pod for [T; N] {}
+/// Global data that can be exported to eBPF programs before they are loaded.
+///
+/// Valid global data includes `Pod` types and slices of `Pod` types. See also
+/// [EbpfLoader::set_global].
+pub struct GlobalData<'a> {
+    bytes: &'a [u8],
+}
+
+impl<'a, T: Pod> From<&'a [T]> for GlobalData<'a> {
+    fn from(s: &'a [T]) -> Self {
+        GlobalData {
+            bytes: bytes_of_slice(s),
+        }
+    }
+}
+
+impl<'a, T: Pod> From<&'a T> for GlobalData<'a> {
+    fn from(v: &'a T) -> Self {
+        GlobalData {
+            // Safety: v is Pod
+            bytes: unsafe { bytes_of(v) },
+        }
+    }
+}
+
+pub(crate) fn page_size() -> usize {
+    // Safety: libc
+    4096
+}
+
+// bytes_of converts a <T> to a byte slice
+pub(crate) unsafe fn bytes_of<T: Pod>(val: &T) -> &[u8] {
+    let size = mem::size_of::<T>();
+    slice::from_raw_parts(slice::from_ref(val).as_ptr().cast(), size)
+}
+
+pub(crate) fn bytes_of_slice<T: Pod>(val: &[T]) -> &[u8] {
+    let size = val.len().wrapping_mul(mem::size_of::<T>());
+    // Safety:
+    // Any alignment is allowed.
+    // The size is determined in this function.
+    // The Pod trait ensures the type is valid to cast to bytes.
+    unsafe { slice::from_raw_parts(val.as_ptr().cast(), size) }
+}

+ 96 - 0
aya/src/lib.rs

@@ -0,0 +1,96 @@
+#![allow(unused)]
+
+#[macro_use]
+extern crate log;
+
+mod bpf;
+pub mod maps;
+pub mod pin;
+pub mod programs;
+mod sys;
+pub mod util;
+
+use std::os::fd::{AsFd, BorrowedFd, OwnedFd};
+
+use aya_obj as obj;
+pub use bpf::*;
+pub use obj::btf::{Btf, BtfError};
+pub use object::Endianness;
+pub use programs::loaded_programs;
+// See https://github.com/rust-lang/rust/pull/124210; this structure exists to avoid crashing the
+// process when we try to close a fake file descriptor.
+#[derive(Debug)]
+struct MockableFd {
+    #[cfg(not(test))]
+    fd: OwnedFd,
+    #[cfg(test)]
+    fd: Option<OwnedFd>,
+}
+
+impl MockableFd {
+    #[cfg(test)]
+    const fn mock_signed_fd() -> i32 {
+        1337
+    }
+
+    #[cfg(test)]
+    const fn mock_unsigned_fd() -> u32 {
+        1337
+    }
+
+    #[cfg(not(test))]
+    fn from_fd(fd: OwnedFd) -> Self {
+        Self { fd }
+    }
+
+    #[cfg(test)]
+    fn from_fd(fd: OwnedFd) -> Self {
+        Self { fd: Some(fd) }
+    }
+
+    #[cfg(not(test))]
+    fn try_clone(&self) -> std::io::Result<Self> {
+        let Self { fd } = self;
+        let fd = fd.try_clone()?;
+        Ok(Self { fd })
+    }
+
+    #[cfg(test)]
+    fn try_clone(&self) -> std::io::Result<Self> {
+        let Self { fd } = self;
+        let fd = fd.as_ref().map(OwnedFd::try_clone).transpose()?;
+        Ok(Self { fd })
+    }
+}
+
+impl AsFd for MockableFd {
+    #[cfg(not(test))]
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        let Self { fd } = self;
+        fd.as_fd()
+    }
+
+    #[cfg(test)]
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        let Self { fd } = self;
+        fd.as_ref().unwrap().as_fd()
+    }
+}
+
+impl Drop for MockableFd {
+    #[cfg(not(test))]
+    fn drop(&mut self) {
+        // Intentional no-op.
+    }
+
+    #[cfg(test)]
+    fn drop(&mut self) {
+        use std::os::fd::AsRawFd as _;
+
+        let Self { fd } = self;
+        if fd.as_ref().unwrap().as_raw_fd() >= Self::mock_signed_fd() {
+            let fd: OwnedFd = fd.take().unwrap();
+            std::mem::forget(fd)
+        }
+    }
+}

+ 613 - 0
aya/src/maps/mod.rs

@@ -0,0 +1,613 @@
+pub mod perf;
+
+use core::mem;
+use std::{
+    ffi::CString,
+    fmt, io,
+    os::fd::{AsFd, BorrowedFd, OwnedFd},
+    path::Path,
+};
+
+use aya_obj::{
+    generated::bpf_map_info,
+    maps,
+    maps::{InvalidMapTypeError, PinningType},
+    parse_map_info, EbpfSectionKind,
+};
+use libc::{getrlimit, rlim_t, rlimit, RLIMIT_MEMLOCK, RLIM_INFINITY};
+pub use perf::PerfEventArray;
+use thiserror::Error;
+
+#[cfg(any(feature = "async_tokio", feature = "async_std"))]
+use crate::maps::perf::AsyncPerfEventArray;
+use crate::{
+    pin::PinError,
+    sys::{
+        bpf_create_map, bpf_get_object, bpf_map_freeze, bpf_map_get_fd_by_id,
+        bpf_map_get_info_by_fd, bpf_map_update_elem_ptr, bpf_pin_object, iter_map_ids,
+        SyscallError,
+    },
+    util::{bytes_of_bpf_name, KernelVersion},
+};
+
+#[derive(Error, Debug)]
+/// Errors occuring from working with Maps
+pub enum MapError {
+    /// Invalid map type encontered
+    #[error("invalid map type {map_type}")]
+    InvalidMapType {
+        /// The map type
+        map_type: u32,
+    },
+
+    /// Invalid map name encountered
+    #[error("invalid map name `{name}`")]
+    InvalidName {
+        /// The map name
+        name: String,
+    },
+
+    /// Failed to create map
+    #[error("failed to create map `{name}` with code {code}")]
+    CreateError {
+        /// Map name
+        name: String,
+        /// Error code
+        code: i64,
+        #[source]
+        /// Original io::Error
+        io_error: io::Error,
+    },
+
+    /// Invalid key size
+    #[error("invalid key size {size}, expected {expected}")]
+    InvalidKeySize {
+        /// Size encountered
+        size: usize,
+        /// Size expected
+        expected: usize,
+    },
+
+    /// Invalid value size
+    #[error("invalid value size {size}, expected {expected}")]
+    InvalidValueSize {
+        /// Size encountered
+        size: usize,
+        /// Size expected
+        expected: usize,
+    },
+
+    /// Index is out of bounds
+    #[error("the index is {index} but `max_entries` is {max_entries}")]
+    OutOfBounds {
+        /// Index accessed
+        index: u32,
+        /// Map size
+        max_entries: u32,
+    },
+
+    /// Key not found
+    #[error("key not found")]
+    KeyNotFound,
+
+    /// Element not found
+    #[error("element not found")]
+    ElementNotFound,
+
+    /// Progam Not Loaded
+    #[error("the program is not loaded")]
+    ProgramNotLoaded,
+
+    /// Syscall failed
+    #[error(transparent)]
+    SyscallError(#[from] SyscallError),
+
+    /// Could not pin map
+    #[error("map `{name:?}` requested pinning. pinning failed")]
+    PinError {
+        /// The map name
+        name: Option<String>,
+        /// The reason for the failure
+        #[source]
+        error: PinError,
+    },
+
+    /// Program IDs are not supported
+    #[error("program ids are not supported by the current kernel")]
+    ProgIdNotSupported,
+
+    /// Unsupported Map type
+    #[error("Unsupported map type found {map_type}")]
+    Unsupported {
+        /// The map type
+        map_type: u32,
+    },
+}
+
+// Note that this is not just derived using #[from] because InvalidMapTypeError cannot implement
+// Error due the the fact that aya-obj is no_std and error_in_core is not stabilized
+// (https://github.com/rust-lang/rust/issues/103765).
+impl From<InvalidMapTypeError> for MapError {
+    fn from(e: InvalidMapTypeError) -> Self {
+        let InvalidMapTypeError { map_type } = e;
+        Self::InvalidMapType { map_type }
+    }
+}
+
+/// A map file descriptor.
+#[derive(Debug)]
+pub struct MapFd {
+    fd: crate::MockableFd,
+}
+
+impl MapFd {
+    fn from_fd(fd: OwnedFd) -> Self {
+        let fd = crate::MockableFd::from_fd(fd);
+        Self { fd }
+    }
+
+    fn try_clone(&self) -> io::Result<Self> {
+        let Self { fd } = self;
+        let fd = fd.try_clone()?;
+        Ok(Self { fd })
+    }
+}
+
+impl AsFd for MapFd {
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        let Self { fd } = self;
+        fd.as_fd()
+    }
+}
+
+/// eBPF map types.
+#[derive(Debug)]
+pub enum Map {
+    /// An [`Array`] map.
+    Array(MapData),
+    /// A [`BloomFilter`] map.
+    BloomFilter(MapData),
+    /// A [`CpuMap`] map.
+    CpuMap(MapData),
+    /// A [`DevMap`] map.
+    DevMap(MapData),
+    /// A [`DevMapHash`] map.
+    DevMapHash(MapData),
+    /// A [`HashMap`] map.
+    HashMap(MapData),
+    /// A [`LpmTrie`] map.
+    LpmTrie(MapData),
+    /// A [`HashMap`] map that uses a LRU eviction policy.
+    LruHashMap(MapData),
+    /// A [`PerCpuArray`] map.
+    PerCpuArray(MapData),
+    /// A [`PerCpuHashMap`] map.
+    PerCpuHashMap(MapData),
+    /// A [`PerCpuHashMap`] map that uses a LRU eviction policy.
+    PerCpuLruHashMap(MapData),
+    /// A [`PerfEventArray`] map.
+    PerfEventArray(MapData),
+    /// A [`ProgramArray`] map.
+    ProgramArray(MapData),
+    /// A [`Queue`] map.
+    Queue(MapData),
+    /// A [`RingBuf`] map.
+    RingBuf(MapData),
+    /// A [`SockHash`] map
+    SockHash(MapData),
+    /// A [`SockMap`] map.
+    SockMap(MapData),
+    /// A [`Stack`] map.
+    Stack(MapData),
+    /// A [`StackTraceMap`] map.
+    StackTraceMap(MapData),
+    /// An unsupported map type.
+    Unsupported(MapData),
+    /// A [`XskMap`] map.
+    XskMap(MapData),
+}
+impl Map {
+    /// Returns the low level map type.
+    fn map_type(&self) -> u32 {
+        match self {
+            Self::Array(map) => map.obj.map_type(),
+            Self::BloomFilter(map) => map.obj.map_type(),
+            Self::CpuMap(map) => map.obj.map_type(),
+            Self::DevMap(map) => map.obj.map_type(),
+            Self::DevMapHash(map) => map.obj.map_type(),
+            Self::HashMap(map) => map.obj.map_type(),
+            Self::LpmTrie(map) => map.obj.map_type(),
+            Self::LruHashMap(map) => map.obj.map_type(),
+            Self::PerCpuArray(map) => map.obj.map_type(),
+            Self::PerCpuHashMap(map) => map.obj.map_type(),
+            Self::PerCpuLruHashMap(map) => map.obj.map_type(),
+            Self::PerfEventArray(map) => map.obj.map_type(),
+            Self::ProgramArray(map) => map.obj.map_type(),
+            Self::Queue(map) => map.obj.map_type(),
+            Self::RingBuf(map) => map.obj.map_type(),
+            Self::SockHash(map) => map.obj.map_type(),
+            Self::SockMap(map) => map.obj.map_type(),
+            Self::Stack(map) => map.obj.map_type(),
+            Self::StackTraceMap(map) => map.obj.map_type(),
+            Self::Unsupported(map) => map.obj.map_type(),
+            Self::XskMap(map) => map.obj.map_type(),
+        }
+    }
+    /// Pins the map to a BPF filesystem.
+    ///
+    /// When a map is pinned it will remain loaded until the corresponding file
+    /// is deleted. All parent directories in the given `path` must already exist.
+    pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
+        match self {
+            Self::Array(map) => map.pin(path),
+            Self::BloomFilter(map) => map.pin(path),
+            Self::CpuMap(map) => map.pin(path),
+            Self::DevMap(map) => map.pin(path),
+            Self::DevMapHash(map) => map.pin(path),
+            Self::HashMap(map) => map.pin(path),
+            Self::LpmTrie(map) => map.pin(path),
+            Self::LruHashMap(map) => map.pin(path),
+            Self::PerCpuArray(map) => map.pin(path),
+            Self::PerCpuHashMap(map) => map.pin(path),
+            Self::PerCpuLruHashMap(map) => map.pin(path),
+            Self::PerfEventArray(map) => map.pin(path),
+            Self::ProgramArray(map) => map.pin(path),
+            Self::Queue(map) => map.pin(path),
+            Self::RingBuf(map) => map.pin(path),
+            Self::SockHash(map) => map.pin(path),
+            Self::SockMap(map) => map.pin(path),
+            Self::Stack(map) => map.pin(path),
+            Self::StackTraceMap(map) => map.pin(path),
+            Self::Unsupported(map) => map.pin(path),
+            Self::XskMap(map) => map.pin(path),
+        }
+    }
+}
+/// A generic handle to a BPF map.
+///
+/// You should never need to use this unless you're implementing a new map type.
+#[derive(Debug)]
+pub struct MapData {
+    obj: maps::Map,
+    fd: MapFd,
+}
+
+impl MapData {
+    /// Creates a new map with the provided `name`
+    pub fn create(
+        obj: maps::Map,
+        name: &str,
+        btf_fd: Option<BorrowedFd<'_>>,
+    ) -> Result<Self, MapError> {
+        let c_name = CString::new(name).map_err(|_| MapError::InvalidName { name: name.into() })?;
+
+        // #[cfg(not(test))]
+        // let kernel_version = KernelVersion::current().unwrap();
+        // #[cfg(test)]
+        let kernel_version = KernelVersion::new(0xff, 0xff, 0xff);
+        let fd =
+            bpf_create_map(&c_name, &obj, btf_fd, kernel_version).map_err(|(code, io_error)| {
+                if kernel_version < KernelVersion::new(5, 11, 0) {
+                    maybe_warn_rlimit();
+                }
+
+                MapError::CreateError {
+                    name: name.into(),
+                    code,
+                    io_error,
+                }
+            })?;
+        log::info!("created map with fd: {:?}", fd);
+        Ok(Self {
+            obj,
+            fd: MapFd::from_fd(fd),
+        })
+    }
+
+    pub(crate) fn finalize(&mut self) -> Result<(), MapError> {
+        let Self { obj, fd } = self;
+        if !obj.data().is_empty() && obj.section_kind() != EbpfSectionKind::Bss {
+            log::error!(
+                "map data is not empty, but section kind is not BSS, {:?}",
+                obj.section_kind()
+            );
+            let data = obj.data();
+            let value = u64::from_le_bytes(data[0..8].try_into().unwrap());
+            log::error!(
+                "bpf_map_update_elem_ptr, key_ptr: {:?}, value_ptr: {:?}, value: {}",
+                &0 as *const _,
+                obj.data_mut().as_mut_ptr(),
+                value
+            );
+            bpf_map_update_elem_ptr(fd.as_fd(), &0 as *const _, obj.data_mut().as_mut_ptr(), 0)
+                .map_err(|(_, io_error)| SyscallError {
+                    call: "bpf_map_update_elem",
+                    io_error,
+                })
+                .map_err(MapError::from)?;
+        }
+        if obj.section_kind() == EbpfSectionKind::Rodata {
+            bpf_map_freeze(fd.as_fd())
+                .map_err(|(_, io_error)| SyscallError {
+                    call: "bpf_map_freeze",
+                    io_error,
+                })
+                .map_err(MapError::from)?;
+        }
+        Ok(())
+    }
+    /// Allows the map to be pinned to the provided path.
+    ///
+    /// Any directories in the the path provided should have been created by the caller.
+    /// The path must be on a BPF filesystem.
+    ///
+    /// # Errors
+    ///
+    /// Returns a [`PinError::SyscallError`] if the underlying syscall fails.
+    /// This may also happen if the path already exists, in which case the wrapped
+    /// [`std::io::Error`] kind will be [`std::io::ErrorKind::AlreadyExists`].
+    /// Returns a [`PinError::InvalidPinPath`] if the path provided cannot be
+    /// converted to a [`CString`].
+    ///
+    /// # Example
+    ///
+    /// ```no_run
+    /// # let mut bpf = aya::Ebpf::load(&[])?;
+    /// # use aya::maps::MapData;
+    ///
+    /// let mut map = MapData::from_pin("/sys/fs/bpf/my_map")?;
+    /// map.pin("/sys/fs/bpf/my_map2")?;
+    ///
+    /// # Ok::<(), Box<dyn std::error::Error>>(())
+    /// ```
+    pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
+        use std::os::unix::ffi::OsStrExt as _;
+
+        let Self { fd, obj: _ } = self;
+        let path = path.as_ref();
+        let path_string = CString::new(path.as_os_str().as_bytes()).map_err(|error| {
+            PinError::InvalidPinPath {
+                path: path.to_path_buf(),
+                error,
+            }
+        })?;
+        bpf_pin_object(fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
+            call: "BPF_OBJ_PIN",
+            io_error,
+        })?;
+        Ok(())
+    }
+    pub(crate) fn obj(&self) -> &maps::Map {
+        let Self { obj, fd: _ } = self;
+        obj
+    }
+
+    pub fn from_id(id: u32) -> Result<Self, MapError> {
+        let fd = bpf_map_get_fd_by_id(id)?;
+        Self::from_fd(fd)
+    }
+    /// Loads a map from a file descriptor.
+    ///
+    /// If loading from a BPF Filesystem (bpffs) you should use [`Map::from_pin`](crate::maps::MapData::from_pin).
+    /// This API is intended for cases where you have received a valid BPF FD from some other means.
+    /// For example, you received an FD over Unix Domain Socket.
+    pub fn from_fd(fd: OwnedFd) -> Result<Self, MapError> {
+        let MapInfo(info) = MapInfo::new_from_fd(fd.as_fd())?;
+        Ok(Self {
+            obj: parse_map_info(info, PinningType::None),
+            fd: MapFd::from_fd(fd),
+        })
+    }
+
+    /// Returns the file descriptor of the map.
+    pub fn fd(&self) -> &MapFd {
+        let Self { obj: _, fd } = self;
+        fd
+    }
+}
+
+/// Raises a warning about rlimit. Should be used only if creating a map was not
+/// successful.
+fn maybe_warn_rlimit() {
+    let mut limit = mem::MaybeUninit::<rlimit>::uninit();
+    let ret = unsafe { getrlimit(RLIMIT_MEMLOCK, limit.as_mut_ptr()) };
+    if ret == 0 {
+        let limit = unsafe { limit.assume_init() };
+
+        if limit.rlim_cur == RLIM_INFINITY {
+            return;
+        }
+        struct HumanSize(rlim_t);
+
+        impl fmt::Display for HumanSize {
+            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                let &Self(size) = self;
+                if size < 1024 {
+                    write!(f, "{} bytes", size)
+                } else if size < 1024 * 1024 {
+                    write!(f, "{} KiB", size / 1024)
+                } else {
+                    write!(f, "{} MiB", size / 1024 / 1024)
+                }
+            }
+        }
+        warn!(
+            "RLIMIT_MEMLOCK value is {}, not RLIM_INFINITY; if experiencing problems with creating \
+            maps, try raising RLIMIT_MEMLOCK either to RLIM_INFINITY or to a higher value sufficient \
+            for the size of your maps",
+            HumanSize(limit.rlim_cur)
+        );
+    }
+}
+
+/// Provides information about a loaded map, like name, id and size.
+#[derive(Debug)]
+pub struct MapInfo(bpf_map_info);
+
+impl MapInfo {
+    fn new_from_fd(fd: BorrowedFd<'_>) -> Result<Self, MapError> {
+        let info = bpf_map_get_info_by_fd(fd.as_fd())?;
+        Ok(Self(info))
+    }
+
+    /// Loads map info from a map id.
+    pub fn from_id(id: u32) -> Result<Self, MapError> {
+        bpf_map_get_fd_by_id(id)
+            .map_err(MapError::from)
+            .and_then(|fd| Self::new_from_fd(fd.as_fd()))
+    }
+
+    /// The name of the map, limited to 16 bytes.
+    pub fn name(&self) -> &[u8] {
+        bytes_of_bpf_name(&self.0.name)
+    }
+
+    /// The name of the map as a &str. If the name is not valid unicode, None is returned.
+    pub fn name_as_str(&self) -> Option<&str> {
+        std::str::from_utf8(self.name()).ok()
+    }
+
+    /// The id for this map. Each map has a unique id.
+    pub fn id(&self) -> u32 {
+        self.0.id
+    }
+
+    /// The map type as defined by the linux kernel enum
+    /// [`bpf_map_type`](https://elixir.bootlin.com/linux/v6.4.4/source/include/uapi/linux/bpf.h#L905).
+    pub fn map_type(&self) -> u32 {
+        self.0.type_
+    }
+
+    /// The key size for this map.
+    pub fn key_size(&self) -> u32 {
+        self.0.key_size
+    }
+
+    /// The value size for this map.
+    pub fn value_size(&self) -> u32 {
+        self.0.value_size
+    }
+
+    /// The maximum number of entries in this map.
+    pub fn max_entries(&self) -> u32 {
+        self.0.max_entries
+    }
+
+    /// The flags for this map.
+    pub fn map_flags(&self) -> u32 {
+        self.0.map_flags
+    }
+
+    /// Returns a file descriptor referencing the map.
+    ///
+    /// The returned file descriptor can be closed at any time and doing so does
+    /// not influence the life cycle of the map.
+    pub fn fd(&self) -> Result<MapFd, MapError> {
+        let Self(info) = self;
+        let fd = bpf_map_get_fd_by_id(info.id)?;
+        Ok(MapFd::from_fd(fd))
+    }
+
+    /// Loads a map from a pinned path in bpffs.
+    pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, MapError> {
+        use std::os::unix::ffi::OsStrExt as _;
+
+        // TODO: avoid this unwrap by adding a new error variant.
+        let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
+        let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| SyscallError {
+            call: "BPF_OBJ_GET",
+            io_error,
+        })?;
+
+        Self::new_from_fd(fd.as_fd())
+    }
+}
+
+/// Returns an iterator over all loaded bpf maps.
+///
+/// This differs from [`crate::Ebpf::maps`] since it will return all maps
+/// listed on the host system and not only maps for a specific [`crate::Ebpf`] instance.
+///
+/// # Example
+/// ```
+/// # use aya::maps::loaded_maps;
+///
+/// for m in loaded_maps() {
+///     match m {
+///         Ok(map) => println!("{:?}", map.name_as_str()),
+///         Err(e) => println!("Error iterating maps: {:?}", e),
+///     }
+/// }
+/// ```
+///
+/// # Errors
+///
+/// Returns [`MapError::SyscallError`] if any of the syscalls required to either get
+/// next map id, get the map fd, or the [`MapInfo`] fail. In cases where
+/// iteration can't be performed, for example the caller does not have the necessary privileges,
+/// a single item will be yielded containing the error that occurred.
+pub fn loaded_maps() -> impl Iterator<Item = Result<MapInfo, MapError>> {
+    iter_map_ids().map(|id| {
+        let id = id?;
+        MapInfo::from_id(id)
+    })
+}
+
+// Implements TryFrom<Map> for different map implementations. Different map implementations can be
+// constructed from different variants of the map enum. Also, the implementation may have type
+// parameters (which we assume all have the bound `Pod` and nothing else).
+macro_rules! impl_try_from_map {
+    // At the root the type parameters are marked as a single token tree which will be pasted into
+    // the invocation for each type. Note that the later patterns require that the token tree be
+    // zero or more comma separated idents wrapped in parens. Note that the tt metavar is used here
+    // rather than the repeated idents used later because the macro language does not allow one
+    // repetition to be pasted inside another.
+    ($ty_param:tt {
+        $($ty:ident $(from $($variant:ident)|+)?),+ $(,)?
+    }) => {
+        $(impl_try_from_map!(<$ty_param> $ty $(from $($variant)|+)?);)+
+    };
+    // Add the "from $variant" using $ty as the default if it is missing.
+    (<$ty_param:tt> $ty:ident) => {
+        impl_try_from_map!(<$ty_param> $ty from $ty);
+    };
+    // Dispatch for each of the lifetimes.
+    (
+        <($($ty_param:ident),*)> $ty:ident from $($variant:ident)|+
+    ) => {
+        impl_try_from_map!(<'a> ($($ty_param),*) $ty from $($variant)|+);
+        impl_try_from_map!(<'a mut> ($($ty_param),*) $ty from $($variant)|+);
+        impl_try_from_map!(<> ($($ty_param),*) $ty from $($variant)|+);
+    };
+    // An individual impl.
+    (
+        <$($l:lifetime $($m:ident)?)?>
+        ($($ty_param:ident),*)
+        $ty:ident from $($variant:ident)|+
+    ) => {
+        impl<$($l,)? $($ty_param: Pod),*> TryFrom<$(&$l $($m)?)? Map>
+            for $ty<$(&$l $($m)?)? MapData, $($ty_param),*>
+        {
+            type Error = MapError;
+
+            fn try_from(map: $(&$l $($m)?)? Map) -> Result<Self, Self::Error> {
+                match map {
+                    $(Map::$variant(map_data) => Self::new(map_data),)+
+                    map => Err(MapError::InvalidMapType {
+                        map_type: map.map_type()
+                    }),
+                }
+            }
+        }
+    };
+}
+
+#[cfg(any(feature = "async_tokio", feature = "async_std"))]
+#[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
+impl_try_from_map!(() {
+    AsyncPerfEventArray from PerfEventArray,
+});
+
+impl_try_from_map!(() {
+    PerfEventArray,
+});

+ 186 - 0
aya/src/maps/perf/async_perf_event_array.rs

@@ -0,0 +1,186 @@
+use std::{
+    borrow::{Borrow, BorrowMut},
+    path::Path,
+};
+
+// See https://doc.rust-lang.org/cargo/reference/features.html#mutually-exclusive-features.
+//
+// We should eventually split async functionality out into separate crates "aya-async-tokio" and
+// "async-async-std". Presently we arbitrarily choose tokio over async-std when both are requested.
+#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
+use async_io::Async;
+use bytes::BytesMut;
+#[cfg(feature = "async_tokio")]
+use tokio::io::unix::AsyncFd;
+
+use crate::maps::{
+    perf::{Events, PerfBufferError, PerfEventArray, PerfEventArrayBuffer},
+    MapData, MapError, PinError,
+};
+
+/// A `Future` based map that can be used to receive events from eBPF programs using the linux
+/// [`perf`](https://perf.wiki.kernel.org/index.php/Main_Page) API.
+///
+/// This is the async version of [`PerfEventArray`], which provides integration
+/// with [tokio](https://docs.rs/tokio) and [async-std](https:/docs.rs/async-std) and a nice `Future` based API.
+///
+/// To receive events you need to:
+/// * call [`AsyncPerfEventArray::open`]
+/// * call [`AsyncPerfEventArrayBuffer::read_events`] to read the events
+///
+/// # Minimum kernel version
+///
+/// The minimum kernel version required to use this feature is 4.3.
+///
+/// # Examples
+///
+/// ```no_run
+/// # #[derive(thiserror::Error, Debug)]
+/// # enum Error {
+/// #    #[error(transparent)]
+/// #    IO(#[from] std::io::Error),
+/// #    #[error(transparent)]
+/// #    Map(#[from] aya::maps::MapError),
+/// #    #[error(transparent)]
+/// #    Ebpf(#[from] aya::EbpfError),
+/// #    #[error(transparent)]
+/// #    PerfBuf(#[from] aya::maps::perf::PerfBufferError),
+/// # }
+/// # #[cfg(feature = "async_tokio")]
+/// # async fn try_main() -> Result<(), Error> {
+/// # let mut bpf = aya::Ebpf::load(&[])?;
+/// use aya::maps::perf::{AsyncPerfEventArray, PerfBufferError};
+/// use aya::util::online_cpus;
+/// use bytes::BytesMut;
+/// use tokio::task; // or async_std::task
+///
+/// // try to convert the PERF_ARRAY map to an AsyncPerfEventArray
+/// let mut perf_array = AsyncPerfEventArray::try_from(bpf.take_map("PERF_ARRAY").unwrap())?;
+///
+/// for cpu_id in online_cpus()? {
+///     // open a separate perf buffer for each cpu
+///     let mut buf = perf_array.open(cpu_id, None)?;
+///
+///     // process each perf buffer in a separate task
+///     task::spawn(async move {
+///         let mut buffers = (0..10)
+///             .map(|_| BytesMut::with_capacity(1024))
+///             .collect::<Vec<_>>();
+///
+///         loop {
+///             // wait for events
+///             let events = buf.read_events(&mut buffers).await?;
+///
+///             // events.read contains the number of events that have been read,
+///             // and is always <= buffers.len()
+///             for i in 0..events.read {
+///                 let buf = &mut buffers[i];
+///                 // process buf
+///             }
+///         }
+///
+///         Ok::<_, PerfBufferError>(())
+///     });
+/// }
+///
+/// # Ok(())
+/// # }
+/// ```
+#[doc(alias = "BPF_MAP_TYPE_PERF_EVENT_ARRAY")]
+pub struct AsyncPerfEventArray<T> {
+    perf_map: PerfEventArray<T>,
+}
+
+impl<T: BorrowMut<MapData>> AsyncPerfEventArray<T> {
+    /// Opens the perf buffer at the given index.
+    ///
+    /// The returned buffer will receive all the events eBPF programs send at the given index.
+    pub fn open(
+        &mut self,
+        index: u32,
+        page_count: Option<usize>,
+    ) -> Result<AsyncPerfEventArrayBuffer<T>, PerfBufferError> {
+        let Self { perf_map } = self;
+        let buf = perf_map.open(index, page_count)?;
+        #[cfg(feature = "async_tokio")]
+        let buf = AsyncFd::new(buf)?;
+        #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
+        let buf = Async::new(buf)?;
+        Ok(AsyncPerfEventArrayBuffer { buf })
+    }
+
+    /// Pins the map to a BPF filesystem.
+    ///
+    /// When a map is pinned it will remain loaded until the corresponding file
+    /// is deleted. All parent directories in the given `path` must already exist.
+    pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
+        self.perf_map.pin(path)
+    }
+}
+
+impl<T: Borrow<MapData>> AsyncPerfEventArray<T> {
+    pub(crate) fn new(map: T) -> Result<Self, MapError> {
+        Ok(Self {
+            perf_map: PerfEventArray::new(map)?,
+        })
+    }
+}
+
+/// A `Future` based ring buffer that can receive events from eBPF programs.
+///
+/// [`AsyncPerfEventArrayBuffer`] is a ring buffer that can receive events from eBPF programs that
+/// use `bpf_perf_event_output()`. It's returned by [`AsyncPerfEventArray::open`].
+///
+/// See the [`AsyncPerfEventArray` documentation](AsyncPerfEventArray) for an overview of how to
+/// use perf buffers.
+pub struct AsyncPerfEventArrayBuffer<T: BorrowMut<MapData>> {
+    #[cfg(not(any(feature = "async_tokio", feature = "async_std")))]
+    buf: PerfEventArrayBuffer<T>,
+
+    #[cfg(feature = "async_tokio")]
+    buf: AsyncFd<PerfEventArrayBuffer<T>>,
+
+    #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
+    buf: Async<PerfEventArrayBuffer<T>>,
+}
+
+impl<T: BorrowMut<MapData>> AsyncPerfEventArrayBuffer<T> {
+    /// Reads events from the buffer.
+    ///
+    /// This method reads events into the provided slice of buffers, filling
+    /// each buffer in order stopping when there are no more events to read or
+    /// all the buffers have been filled.
+    ///
+    /// Returns the number of events read and the number of events lost. Events
+    /// are lost when user space doesn't read events fast enough and the ring
+    /// buffer fills up.
+    pub async fn read_events(
+        &mut self,
+        buffers: &mut [BytesMut],
+    ) -> Result<Events, PerfBufferError> {
+        let Self { buf } = self;
+        loop {
+            #[cfg(feature = "async_tokio")]
+            let mut guard = buf.readable_mut().await?;
+            #[cfg(feature = "async_tokio")]
+            let buf = guard.get_inner_mut();
+
+            #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
+            let buf = {
+                if !buf.get_ref().readable() {
+                    buf.readable().await?;
+                }
+                unsafe { buf.get_mut() }
+            };
+
+            let events = buf.read_events(buffers)?;
+            const EMPTY: Events = Events { read: 0, lost: 0 };
+            if events != EMPTY {
+                break Ok(events);
+            }
+
+            #[cfg(feature = "async_tokio")]
+            guard.clear_ready();
+        }
+    }
+}

+ 15 - 0
aya/src/maps/perf/mod.rs

@@ -0,0 +1,15 @@
+//! Ring buffer types used to receive events from eBPF programs using the linux
+//! `perf` API.
+//!
+//! See [`PerfEventArray`] and [`AsyncPerfEventArray`].
+#[cfg(any(feature = "async_tokio", feature = "async_std"))]
+#[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
+mod async_perf_event_array;
+mod perf_buffer;
+mod perf_event_array;
+
+#[cfg(any(feature = "async_tokio", feature = "async_std"))]
+#[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
+pub use async_perf_event_array::*;
+pub use perf_buffer::*;
+pub use perf_event_array::*;

+ 626 - 0
aya/src/maps/perf/perf_buffer.rs

@@ -0,0 +1,626 @@
+use std::{
+    ffi::c_void,
+    io, mem,
+    os::fd::{AsFd, BorrowedFd},
+    ptr, slice,
+    sync::atomic::{self, AtomicPtr, Ordering},
+};
+
+use aya_obj::generated::{
+    perf_event_header, perf_event_mmap_page,
+    perf_event_type::{PERF_RECORD_LOST, PERF_RECORD_SAMPLE},
+};
+use bytes::BytesMut;
+use libc::*;
+use thiserror::Error;
+
+use crate::{
+    sys::{
+        mmap,
+        perf_event::{perf_event_ioctl, perf_event_open_bpf},
+        SysResult,
+    },
+    PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE,
+};
+
+/// Perf buffer error.
+#[derive(Error, Debug)]
+pub enum PerfBufferError {
+    /// the page count value passed to [`PerfEventArray::open`](crate::maps::PerfEventArray::open) is invalid.
+    #[error("invalid page count {page_count}, the value must be a power of two")]
+    InvalidPageCount {
+        /// the page count
+        page_count: usize,
+    },
+
+    /// `perf_event_open` failed.
+    #[error("perf_event_open failed: {io_error}")]
+    OpenError {
+        /// the source of this error
+        #[source]
+        io_error: io::Error,
+    },
+
+    /// `mmap`-ping the buffer failed.
+    #[error("mmap failed: {io_error}")]
+    MMapError {
+        /// the source of this error
+        #[source]
+        io_error: io::Error,
+    },
+
+    /// The `PERF_EVENT_IOC_ENABLE` ioctl failed
+    #[error("PERF_EVENT_IOC_ENABLE failed: {io_error}")]
+    PerfEventEnableError {
+        #[source]
+        /// the source of this error
+        io_error: io::Error,
+    },
+
+    /// `read_events()` was called with no output buffers.
+    #[error("read_events() was called with no output buffers")]
+    NoBuffers,
+
+    /// `read_events()` was called with a buffer that is not large enough to
+    /// contain the next event in the perf buffer.
+    #[deprecated(
+        since = "0.10.8",
+        note = "read_events() now calls BytesMut::reserve() internally, so this error is never returned"
+    )]
+    #[error("the buffer needs to be of at least {size} bytes")]
+    MoreSpaceNeeded {
+        /// expected size
+        size: usize,
+    },
+
+    /// An IO error occurred.
+    #[error(transparent)]
+    IOError(#[from] io::Error),
+}
+
+/// Return type of `read_events()`.
+#[derive(Debug, PartialEq, Eq)]
+pub struct Events {
+    /// The number of events read.
+    pub read: usize,
+    /// The number of events lost.
+    pub lost: usize,
+}
+
+#[derive(Debug)]
+pub(crate) struct PerfBuffer {
+    buf: AtomicPtr<perf_event_mmap_page>,
+    size: usize,
+    page_size: usize,
+    fd: crate::MockableFd,
+}
+
+impl PerfBuffer {
+    pub(crate) fn open(
+        cpu_id: u32,
+        page_size: usize,
+        page_count: usize,
+    ) -> Result<Self, PerfBufferError> {
+        if !page_count.is_power_of_two() {
+            return Err(PerfBufferError::InvalidPageCount { page_count });
+        }
+
+        let fd = perf_event_open_bpf(cpu_id as i32)
+            .map_err(|(_, io_error)| PerfBufferError::OpenError { io_error })?;
+        let size = page_size * page_count;
+        let buf = unsafe {
+            mmap(
+                ptr::null_mut(),
+                size + page_size,
+                PROT_READ | PROT_WRITE,
+                MAP_SHARED,
+                fd.as_fd(),
+                0,
+            )
+        };
+        if buf == MAP_FAILED {
+            return Err(PerfBufferError::MMapError {
+                io_error: io::Error::last_os_error(),
+            });
+        }
+
+        let fd = crate::MockableFd::from_fd(fd);
+        let perf_buf = Self {
+            buf: AtomicPtr::new(buf as *mut perf_event_mmap_page),
+            size,
+            page_size,
+            fd,
+        };
+
+        perf_event_ioctl(perf_buf.fd.as_fd(), PERF_EVENT_IOC_ENABLE, 0)
+            .map_err(|(_, io_error)| PerfBufferError::PerfEventEnableError { io_error })?;
+
+        Ok(perf_buf)
+    }
+
+    pub(crate) fn readable(&self) -> bool {
+        let header = self.buf.load(Ordering::SeqCst);
+        let head = unsafe { (*header).data_head } as usize;
+        let tail = unsafe { (*header).data_tail } as usize;
+        head != tail
+    }
+
+    pub(crate) fn read_events(
+        &mut self,
+        buffers: &mut [BytesMut],
+    ) -> Result<Events, PerfBufferError> {
+        if buffers.is_empty() {
+            return Err(PerfBufferError::NoBuffers);
+        }
+        let header = self.buf.load(Ordering::SeqCst);
+        let base = header as usize + self.page_size;
+
+        let mut events = Events { read: 0, lost: 0 };
+        let mut buf_n = 0;
+
+        let fill_buf = |start_off, base, mmap_size, out_buf: &mut [u8]| {
+            let len = out_buf.len();
+
+            let end = (start_off + len) % mmap_size;
+            let start = start_off % mmap_size;
+
+            if start < end {
+                out_buf.copy_from_slice(unsafe {
+                    slice::from_raw_parts((base + start) as *const u8, len)
+                });
+            } else {
+                let size = mmap_size - start;
+                unsafe {
+                    out_buf[..size]
+                        .copy_from_slice(slice::from_raw_parts((base + start) as *const u8, size));
+                    out_buf[size..]
+                        .copy_from_slice(slice::from_raw_parts(base as *const u8, len - size));
+                }
+            }
+        };
+
+        let read_event = |event_start, event_type, base, buf: &mut BytesMut| {
+            let sample_size = match event_type {
+                x if x == PERF_RECORD_SAMPLE as u32 || x == PERF_RECORD_LOST as u32 => {
+                    let mut size = [0u8; mem::size_of::<u32>()];
+                    fill_buf(
+                        event_start + mem::size_of::<perf_event_header>(),
+                        base,
+                        self.size,
+                        &mut size,
+                    );
+                    u32::from_ne_bytes(size)
+                }
+                _ => return Ok(None),
+            } as usize;
+
+            let sample_start =
+                (event_start + mem::size_of::<perf_event_header>() + mem::size_of::<u32>())
+                    % self.size;
+
+            log::trace!(
+                "sample_start: {}, sample_size: {}",
+                sample_start,
+                sample_size
+            );
+            match event_type {
+                x if x == PERF_RECORD_SAMPLE as u32 => {
+                    buf.clear();
+                    buf.reserve(sample_size);
+                    unsafe { buf.set_len(sample_size) };
+
+                    fill_buf(sample_start, base, self.size, buf);
+
+                    Ok(Some((1, 0)))
+                }
+                x if x == PERF_RECORD_LOST as u32 => {
+                    let mut count = [0u8; mem::size_of::<u64>()];
+                    fill_buf(
+                        event_start + mem::size_of::<perf_event_header>() + mem::size_of::<u64>(),
+                        base,
+                        self.size,
+                        &mut count,
+                    );
+                    Ok(Some((0, u64::from_ne_bytes(count) as usize)))
+                }
+                _ => Ok(None),
+            }
+        };
+
+        let head = unsafe { (*header).data_head } as usize;
+        let mut tail = unsafe { (*header).data_tail } as usize;
+
+        log::trace!("head: {}, tail: {}", head, tail);
+
+        let result = loop {
+            if head == tail {
+                break Ok(());
+            }
+            if buf_n == buffers.len() {
+                break Ok(());
+            }
+
+            let buf = &mut buffers[buf_n];
+
+            let event_start = tail % self.size;
+            let event =
+                unsafe { ptr::read_unaligned((base + event_start) as *const perf_event_header) };
+            let event_size = event.size as usize;
+            log::trace!(
+                "event_start: {}, event_size: {}, base: {:#x}, head: {:#x}, tail: {:#x}",
+                event_start,
+                event_size,
+                base,
+                head,
+                tail
+            );
+            match read_event(event_start, event.type_, base, buf) {
+                Ok(Some((read, lost))) => {
+                    if read > 0 {
+                        buf_n += 1;
+                        events.read += read;
+                    }
+                    events.lost += lost;
+                }
+                Ok(None) => { /* skip unknown event type */ }
+                Err(e) => {
+                    // we got an error and we didn't process any events, propagate the error
+                    // and give the caller a chance to increase buffers
+                    break Err(e);
+                }
+            }
+            tail += event_size;
+            tail %= self.size;
+        };
+
+        atomic::fence(Ordering::SeqCst);
+        unsafe { (*header).data_tail = tail as u64 };
+
+        result.map(|()| events)
+    }
+}
+
+impl AsFd for PerfBuffer {
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        self.fd.as_fd()
+    }
+}
+
+impl Drop for PerfBuffer {
+    fn drop(&mut self) {
+        unsafe {
+            let _: SysResult<_> = perf_event_ioctl(self.fd.as_fd(), PERF_EVENT_IOC_DISABLE, 0);
+            munmap(
+                self.buf.load(Ordering::SeqCst) as *mut c_void,
+                self.size + self.page_size,
+            );
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::fmt::Debug;
+
+    use assert_matches::assert_matches;
+
+    use super::*;
+    use crate::sys::{
+        fake::{override_syscall, TEST_MMAP_RET},
+        Syscall,
+    };
+    #[repr(C)]
+    #[derive(Debug)]
+    struct Sample {
+        header: perf_event_header,
+        size: u32,
+    }
+
+    const PAGE_SIZE: usize = 4096;
+    union MMappedBuf {
+        mmap_page: perf_event_mmap_page,
+        data: [u8; PAGE_SIZE * 2],
+    }
+
+    fn fake_mmap(buf: &MMappedBuf) {
+        override_syscall(|call| match call {
+            Syscall::PerfEventOpen { .. } | Syscall::PerfEventIoctl { .. } => {
+                Ok(crate::MockableFd::mock_signed_fd().into())
+            }
+            call => panic!("unexpected syscall: {:?}", call),
+        });
+        TEST_MMAP_RET.with(|ret| *ret.borrow_mut() = buf as *const _ as *mut _);
+    }
+
+    #[test]
+    fn test_invalid_page_count() {
+        assert_matches!(
+            PerfBuffer::open(1, PAGE_SIZE, 0),
+            Err(PerfBufferError::InvalidPageCount { .. })
+        );
+        assert_matches!(
+            PerfBuffer::open(1, PAGE_SIZE, 3),
+            Err(PerfBufferError::InvalidPageCount { .. })
+        );
+        assert_matches!(
+            PerfBuffer::open(1, PAGE_SIZE, 5),
+            Err(PerfBufferError::InvalidPageCount { .. })
+        );
+    }
+
+    #[test]
+    fn test_no_out_bufs() {
+        let mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+        assert_matches!(buf.read_events(&mut []), Err(PerfBufferError::NoBuffers))
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`unsafe { (*header).data_tail = tail as u64 };` is attempting a write access using using a tag that only grants SharedReadOnly permission"
+    )]
+    fn test_no_events() {
+        let mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+        let out_buf = BytesMut::with_capacity(4);
+        assert_eq!(
+            buf.read_events(&mut [out_buf]).unwrap(),
+            Events { read: 0, lost: 0 }
+        );
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_first_lost() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+
+        #[repr(C)]
+        #[derive(Debug)]
+        struct LostSamples {
+            header: perf_event_header,
+            id: u64,
+            count: u64,
+        }
+
+        let evt = LostSamples {
+            header: perf_event_header {
+                type_: PERF_RECORD_LOST as u32,
+                misc: 0,
+                size: mem::size_of::<LostSamples>() as u16,
+            },
+            id: 1,
+            count: 0xCAFEBABE,
+        };
+        write(&mut mmapped_buf, 0, evt);
+
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+        let out_buf = BytesMut::with_capacity(0);
+        let events = buf.read_events(&mut [out_buf]).unwrap();
+        assert_eq!(events.lost, 0xCAFEBABE);
+    }
+
+    #[repr(C)]
+    #[derive(Debug)]
+    struct PerfSample<T: Debug> {
+        s_hdr: Sample,
+        value: T,
+    }
+
+    fn write<T: Debug>(mmapped_buf: &mut MMappedBuf, offset: usize, value: T) -> usize {
+        let dst = (mmapped_buf as *const _ as usize + PAGE_SIZE + offset) as *const PerfSample<T>
+            as *mut T;
+        unsafe {
+            ptr::write_unaligned(dst, value);
+            mmapped_buf.mmap_page.data_head = (offset + mem::size_of::<T>()) as u64;
+            mmapped_buf.mmap_page.data_head as usize
+        }
+    }
+
+    fn write_sample<T: Debug>(mmapped_buf: &mut MMappedBuf, offset: usize, value: T) -> usize {
+        let sample = PerfSample {
+            s_hdr: Sample {
+                header: perf_event_header {
+                    type_: PERF_RECORD_SAMPLE as u32,
+                    misc: 0,
+                    size: mem::size_of::<PerfSample<T>>() as u16,
+                },
+                size: mem::size_of::<T>() as u32,
+            },
+            value,
+        };
+        write(mmapped_buf, offset, sample)
+    }
+
+    fn u32_from_buf(buf: &[u8]) -> u32 {
+        u32::from_ne_bytes(buf[..4].try_into().unwrap())
+    }
+
+    fn u64_from_buf(buf: &[u8]) -> u64 {
+        u64::from_ne_bytes(buf[..8].try_into().unwrap())
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_first_sample() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        write_sample(&mut mmapped_buf, 0, 0xCAFEBABEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(4)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_many_with_many_reads() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let next = write_sample(&mut mmapped_buf, 0, 0xCAFEBABEu32);
+        write_sample(&mut mmapped_buf, next, 0xBADCAFEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(4)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xBADCAFE);
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_many_with_one_read() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let next = write_sample(&mut mmapped_buf, 0, 0xCAFEBABEu32);
+        write_sample(&mut mmapped_buf, next, 0xBADCAFEu32);
+
+        let mut out_bufs = (0..3)
+            .map(|_| BytesMut::with_capacity(4))
+            .collect::<Vec<_>>();
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 2 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+        assert_eq!(u32_from_buf(&out_bufs[1]), 0xBADCAFE);
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_last_sample() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let offset = PAGE_SIZE - mem::size_of::<PerfSample<u32>>();
+        unsafe {
+            mmapped_buf.mmap_page.data_tail = offset as u64;
+        }
+        write_sample(&mut mmapped_buf, offset, 0xCAFEBABEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(4)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_wrapping_sample_size() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let header = perf_event_header {
+            type_: PERF_RECORD_SAMPLE as u32,
+            misc: 0,
+            size: mem::size_of::<PerfSample<u64>>() as u16,
+        };
+
+        let offset = PAGE_SIZE - mem::size_of::<perf_event_header>() - 2;
+        unsafe {
+            mmapped_buf.mmap_page.data_tail = offset as u64;
+        }
+
+        write(&mut mmapped_buf, offset, header);
+        write(&mut mmapped_buf, PAGE_SIZE - 2, 0x0004u16);
+        write(&mut mmapped_buf, 0, 0x0000u16);
+        write(&mut mmapped_buf, 2, 0xBAADCAFEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(8)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xBAADCAFE);
+    }
+
+    #[test]
+    #[cfg_attr(
+        miri,
+        ignore = "`ptr::write_unaligned(dst, value)` is attempting a write access but no exposed tags have suitable permission in the borrow stack for this location"
+    )]
+    fn test_read_wrapping_value() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let sample = PerfSample {
+            s_hdr: Sample {
+                header: perf_event_header {
+                    type_: PERF_RECORD_SAMPLE as u32,
+                    misc: 0,
+                    size: mem::size_of::<PerfSample<u64>>() as u16,
+                },
+                size: mem::size_of::<u64>() as u32,
+            },
+            value: 0xCAFEBABEu32,
+        };
+
+        let offset = PAGE_SIZE - mem::size_of::<PerfSample<u32>>();
+        unsafe {
+            mmapped_buf.mmap_page.data_tail = offset as u64;
+        }
+        write(&mut mmapped_buf, offset, sample);
+        write(&mut mmapped_buf, 0, 0xBAADCAFEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(8)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u64_from_buf(&out_bufs[0]), 0xBAADCAFECAFEBABE);
+    }
+}

+ 210 - 0
aya/src/maps/perf/perf_event_array.rs

@@ -0,0 +1,210 @@
+//! A map that can be used to receive events from eBPF programs using the linux [`perf`] API
+//!
+//! [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page.
+use std::{
+    borrow::{Borrow, BorrowMut},
+    ops::Deref,
+    os::fd::{AsFd, AsRawFd, BorrowedFd, RawFd},
+    path::Path,
+    sync::Arc,
+};
+
+use bytes::BytesMut;
+
+use crate::{
+    maps::{
+        perf::{Events, PerfBuffer, PerfBufferError},
+        MapData, MapError, PinError,
+    },
+    sys::bpf_map_update_elem,
+    util::page_size,
+};
+
+/// A ring buffer that can receive events from eBPF programs.
+///
+/// [`PerfEventArrayBuffer`] is a ring buffer that can receive events from eBPF
+/// programs that use `bpf_perf_event_output()`. It's returned by [`PerfEventArray::open`].
+///
+/// See the [`PerfEventArray` documentation](PerfEventArray) for an overview of how to use
+/// perf buffers.
+pub struct PerfEventArrayBuffer<T> {
+    _map: Arc<T>,
+    buf: PerfBuffer,
+}
+
+impl<T: BorrowMut<MapData>> PerfEventArrayBuffer<T> {
+    /// Returns true if the buffer contains events that haven't been read.
+    pub fn readable(&self) -> bool {
+        self.buf.readable()
+    }
+
+    /// Reads events from the buffer.
+    ///
+    /// This method reads events into the provided slice of buffers, filling
+    /// each buffer in order stopping when there are no more events to read or
+    /// all the buffers have been filled.
+    ///
+    /// Returns the number of events read and the number of events lost. Events
+    /// are lost when user space doesn't read events fast enough and the ring
+    /// buffer fills up.
+    ///
+    /// # Errors
+    ///
+    /// [`PerfBufferError::NoBuffers`] is returned when `out_bufs` is empty.
+    pub fn read_events(&mut self, out_bufs: &mut [BytesMut]) -> Result<Events, PerfBufferError> {
+        self.buf.read_events(out_bufs)
+    }
+}
+
+impl<T: BorrowMut<MapData>> AsFd for PerfEventArrayBuffer<T> {
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        self.buf.as_fd()
+    }
+}
+
+impl<T: BorrowMut<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
+    fn as_raw_fd(&self) -> RawFd {
+        self.buf.as_fd().as_raw_fd()
+    }
+}
+
+/// A map that can be used to receive events from eBPF programs using the linux [`perf`] API.
+///
+/// Each element of a [`PerfEventArray`] is a separate [`PerfEventArrayBuffer`] which can be used
+/// to receive events sent by eBPF programs that use `bpf_perf_event_output()`.
+///
+/// To receive events you need to:
+/// * call [`PerfEventArray::open`]
+/// * poll the returned [`PerfEventArrayBuffer`] to be notified when events are
+///   inserted in the buffer
+/// * call [`PerfEventArrayBuffer::read_events`] to read the events
+///
+/// # Minimum kernel version
+///
+/// The minimum kernel version required to use this feature is 4.3.
+///
+/// # Examples
+///
+/// A common way to use a perf array is to have one perf buffer for each
+/// available CPU:
+///
+/// ```no_run
+/// # use aya::maps::perf::PerfEventArrayBuffer;
+/// # use aya::maps::MapData;
+/// # use std::borrow::BorrowMut;
+/// # struct Poll<T> { _t: std::marker::PhantomData<T> };
+/// # impl<T: BorrowMut<MapData>> Poll<T> {
+/// #    fn poll_readable(&self) -> &mut [PerfEventArrayBuffer<T>] {
+/// #        &mut []
+/// #    }
+/// # }
+/// # fn poll_buffers<T: BorrowMut<MapData>>(bufs: Vec<PerfEventArrayBuffer<T>>) -> Poll<T> {
+/// #    Poll { _t: std::marker::PhantomData }
+/// # }
+/// # #[derive(thiserror::Error, Debug)]
+/// # enum Error {
+/// #    #[error(transparent)]
+/// #    IO(#[from] std::io::Error),
+/// #    #[error(transparent)]
+/// #    Map(#[from] aya::maps::MapError),
+/// #    #[error(transparent)]
+/// #    Ebpf(#[from] aya::EbpfError),
+/// #    #[error(transparent)]
+/// #    PerfBuf(#[from] aya::maps::perf::PerfBufferError),
+/// # }
+/// # let mut bpf = aya::Ebpf::load(&[])?;
+/// use aya::maps::PerfEventArray;
+/// use aya::util::online_cpus;
+/// use bytes::BytesMut;
+///
+/// let mut perf_array = PerfEventArray::try_from(bpf.map_mut("EVENTS").unwrap())?;
+///
+/// // eBPF programs are going to write to the EVENTS perf array, using the id of the CPU they're
+/// // running on as the array index.
+/// let mut perf_buffers = Vec::new();
+/// for cpu_id in online_cpus()? {
+///     // this perf buffer will receive events generated on the CPU with id cpu_id
+///     perf_buffers.push(perf_array.open(cpu_id, None)?);
+/// }
+///
+/// let mut out_bufs = [BytesMut::with_capacity(1024)];
+///
+/// // poll the buffers to know when they have queued events
+/// let poll = poll_buffers(perf_buffers);
+/// loop {
+///     for read_buf in poll.poll_readable() {
+///         read_buf.read_events(&mut out_bufs)?;
+///         // process out_bufs
+///     }
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Polling and avoiding lost events
+///
+/// In the example above the implementation of `poll_buffers()` and `poll.poll_readable()` is not
+/// given. [`PerfEventArrayBuffer`] implements the [`AsRawFd`] trait, so you can implement polling
+/// using any crate that can poll file descriptors, like [epoll], [mio] etc.
+///
+/// Perf buffers are internally implemented as ring buffers. If your eBPF programs produce large
+/// amounts of data, in order not to lose events you might want to process each
+/// [`PerfEventArrayBuffer`] on a different thread.
+///
+/// # Async
+///
+/// If you are using [tokio] or [async-std], you should use `AsyncPerfEventArray` which
+/// efficiently integrates with those and provides a nicer `Future` based API.
+///
+/// [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page
+/// [epoll]: https://docs.rs/epoll
+/// [mio]: https://docs.rs/mio
+/// [tokio]: https://docs.rs/tokio
+/// [async-std]: https://docs.rs/async-std
+#[doc(alias = "BPF_MAP_TYPE_PERF_EVENT_ARRAY")]
+pub struct PerfEventArray<T> {
+    map: Arc<T>,
+    page_size: usize,
+}
+
+impl<T: Borrow<MapData>> PerfEventArray<T> {
+    pub(crate) fn new(map: T) -> Result<Self, MapError> {
+        Ok(Self {
+            map: Arc::new(map),
+            page_size: page_size(),
+        })
+    }
+
+    /// Pins the map to a BPF filesystem.
+    ///
+    /// When a map is pinned it will remain loaded until the corresponding file
+    /// is deleted. All parent directories in the given `path` must already exist.
+    pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
+        let data: &MapData = self.map.deref().borrow();
+        data.pin(path)
+    }
+}
+
+impl<T: BorrowMut<MapData>> PerfEventArray<T> {
+    /// Opens the perf buffer at the given index.
+    ///
+    /// The returned buffer will receive all the events eBPF programs send at the given index.
+    pub fn open(
+        &mut self,
+        index: u32,
+        page_count: Option<usize>,
+    ) -> Result<PerfEventArrayBuffer<T>, PerfBufferError> {
+        // FIXME: keep track of open buffers
+
+        let map_data: &MapData = self.map.deref().borrow();
+        let map_fd = map_data.fd().as_fd();
+        let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?;
+        bpf_map_update_elem(map_fd, Some(&index), &buf.as_fd().as_raw_fd(), 0)
+            .map_err(|(_, io_error)| io_error)?;
+
+        Ok(PerfEventArrayBuffer {
+            buf,
+            _map: self.map.clone(),
+        })
+    }
+}

+ 31 - 0
aya/src/pin.rs

@@ -0,0 +1,31 @@
+//! Pinning BPF objects to the BPF filesystem.
+
+use std::string::String;
+
+use thiserror::Error;
+
+use crate::sys::SyscallError;
+
+/// An error ocurred working with a pinned BPF object.
+#[derive(Error, Debug)]
+pub enum PinError {
+    /// The object FD is not known by Aya.
+    #[error("the BPF object `{name}`'s FD is not known")]
+    NoFd {
+        /// Object name.
+        name: String,
+    },
+    /// The path for the BPF object is not valid.
+    #[error("invalid pin path `{}`", path.display())]
+    InvalidPinPath {
+        /// The path.
+        path: std::path::PathBuf,
+
+        #[source]
+        /// The source error.
+        error: std::ffi::NulError,
+    },
+    /// An error ocurred making a syscall.
+    #[error(transparent)]
+    SyscallError(#[from] SyscallError),
+}

+ 192 - 0
aya/src/programs/extension.rs

@@ -0,0 +1,192 @@
+use std::os::fd::{AsFd, BorrowedFd, OwnedFd};
+
+use aya_obj::{
+    btf::{Btf, BtfKind},
+    generated::{bpf_attach_type::BPF_CGROUP_INET_INGRESS, bpf_prog_type::BPF_PROG_TYPE_EXT},
+};
+use object::Endianness;
+use thiserror::Error;
+
+use crate::{
+    programs::{
+        links::{define_link_wrapper, FdLink, FdLinkId},
+        load_program, ProgramData, ProgramError, ProgramFd,
+    },
+    sys,
+    sys::{bpf_link_create, LinkTarget, SyscallError},
+};
+
+/// The type returned when loading or attaching an [`Extension`] fails.
+#[derive(Debug, Error)]
+pub enum ExtensionError {
+    /// Target BPF program does not have BTF loaded to the kernel.
+    #[error("target BPF program does not have BTF loaded to the kernel")]
+    NoBTF,
+}
+
+#[derive(Debug)]
+#[doc(alias = "BPF_PROG_TYPE_EXT")]
+pub struct Extension {
+    pub(crate) data: ProgramData<ExtensionLink>,
+}
+
+impl Extension {
+    pub fn name(&self) -> Option<String> {
+        self.data.name.clone()
+    }
+    /// Loads the extension inside the kernel.
+    ///
+    /// Prepares the code included in the extension to replace the code of the function
+    /// `func_name` within the eBPF program represented by the `program` file descriptor.
+    /// This requires that both the [`Extension`] and `program` have had their BTF
+    /// loaded into the kernel.
+    ///
+    /// The BPF verifier requires that we specify the target program and function name
+    /// at load time, so it can identify that the program and target are BTF compatible
+    /// and to enforce this constraint when programs are attached.
+    ///
+    /// The extension code will be loaded but inactive until it's attached.
+    /// There are no restrictions on what functions may be replaced, so you could replace
+    /// the main entry point of your program with an extension.
+    pub fn load(&mut self, program: ProgramFd, func_name: &str) -> Result<(), ProgramError> {
+        let (btf_fd, btf_id) = get_btf_info(program.as_fd(), func_name)?;
+
+        self.data.attach_btf_obj_fd = Some(btf_fd);
+        self.data.attach_prog_fd = Some(program);
+        self.data.attach_btf_id = Some(btf_id);
+        load_program(BPF_PROG_TYPE_EXT, &mut self.data)
+    }
+
+    /// Attaches the extension.
+    ///
+    /// Attaches the extension to the program and function name specified at load time,
+    /// effectively replacing the original target function.
+    ///
+    /// The returned value can be used to detach the extension and restore the
+    /// original function, see [Extension::detach].
+    pub fn attach(&mut self) -> Result<ExtensionLinkId, ProgramError> {
+        let prog_fd = self.fd()?;
+        let prog_fd = prog_fd.as_fd();
+        let target_fd = self
+            .data
+            .attach_prog_fd
+            .as_ref()
+            .ok_or(ProgramError::NotLoaded)?;
+        let target_fd = target_fd.as_fd();
+        let btf_id = self.data.attach_btf_id.ok_or(ProgramError::NotLoaded)?;
+        // the attach type must be set as 0, which is bpf_attach_type::BPF_CGROUP_INET_INGRESS
+        let link_fd = bpf_link_create(
+            prog_fd,
+            LinkTarget::Fd(target_fd),
+            BPF_CGROUP_INET_INGRESS,
+            Some(btf_id),
+            0,
+        )
+        .map_err(|(_, io_error)| SyscallError {
+            call: "bpf_link_create",
+            io_error,
+        })?;
+        self.data
+            .links
+            .insert(ExtensionLink::new(FdLink::new(link_fd)))
+    }
+
+    /// Attaches the extension to another program.
+    ///
+    /// Attaches the extension to a program and/or function other than the one provided
+    /// at load time. You may only attach to another program/function if the BTF
+    /// type signature is identical to that which was verified on load. Attempting to
+    /// attach to an invalid program/function will result in an error.
+    ///
+    /// Once attached, the extension effectively replaces the original target function.
+    ///
+    /// The returned value can be used to detach the extension and restore the
+    /// original function, see [Extension::detach].
+    pub fn attach_to_program(
+        &mut self,
+        program: &ProgramFd,
+        func_name: &str,
+    ) -> Result<ExtensionLinkId, ProgramError> {
+        let target_fd = program.as_fd();
+        let (_, btf_id) = get_btf_info(target_fd, func_name)?;
+        let prog_fd = self.fd()?;
+        let prog_fd = prog_fd.as_fd();
+        // the attach type must be set as 0, which is bpf_attach_type::BPF_CGROUP_INET_INGRESS
+        let link_fd = bpf_link_create(
+            prog_fd,
+            LinkTarget::Fd(target_fd),
+            BPF_CGROUP_INET_INGRESS,
+            Some(btf_id),
+            0,
+        )
+        .map_err(|(_, io_error)| SyscallError {
+            call: "bpf_link_create",
+            io_error,
+        })?;
+        self.data
+            .links
+            .insert(ExtensionLink::new(FdLink::new(link_fd)))
+    }
+
+    /// Detaches the extension.
+    ///
+    /// Detaching restores the original code overridden by the extension program.
+    /// See [Extension::attach].
+    pub fn detach(&mut self, link_id: ExtensionLinkId) -> Result<(), ProgramError> {
+        self.data.links.remove(link_id)
+    }
+
+    /// Takes ownership of the link referenced by the provided link_id.
+    ///
+    /// The link will be detached on `Drop` and the caller is now responsible
+    /// for managing its lifetime.
+    pub fn take_link(&mut self, link_id: ExtensionLinkId) -> Result<ExtensionLink, ProgramError> {
+        self.data.take_link(link_id)
+    }
+}
+
+/// Retrieves the FD of the BTF object for the provided `prog_fd` and the BTF ID of the function
+/// with the name `func_name` within that BTF object.
+fn get_btf_info(prog_fd: BorrowedFd<'_>, func_name: &str) -> Result<(OwnedFd, u32), ProgramError> {
+    // retrieve program information
+    let info = sys::bpf_prog_get_info_by_fd(prog_fd, &mut [])?;
+
+    // btf_id refers to the ID of the program btf that was loaded with bpf(BPF_BTF_LOAD)
+    if info.btf_id == 0 {
+        return Err(ProgramError::ExtensionError(ExtensionError::NoBTF));
+    }
+
+    // the bpf fd of the BTF object
+    let btf_fd = sys::bpf_btf_get_fd_by_id(info.btf_id)?;
+
+    // we need to read the btf bytes into a buffer but we don't know the size ahead of time.
+    // assume 4kb. if this is too small we can resize based on the size obtained in the response.
+    let mut buf = vec![0u8; 4096];
+    loop {
+        let info = sys::btf_obj_get_info_by_fd(btf_fd.as_fd(), &mut buf)?;
+        let btf_size = info.btf_size as usize;
+        if btf_size > buf.len() {
+            buf.resize(btf_size, 0u8);
+            continue;
+        }
+        buf.truncate(btf_size);
+        break;
+    }
+
+    let btf = Btf::parse(&buf, Endianness::default()).map_err(ProgramError::Btf)?;
+
+    let btf_id = btf
+        .id_by_type_name_kind(func_name, BtfKind::Func)
+        .map_err(ProgramError::Btf)?;
+
+    Ok((btf_fd, btf_id))
+}
+
+define_link_wrapper!(
+    /// The link used by [Extension] programs.
+    ExtensionLink,
+    /// The type returned by [Extension::attach]. Can be passed to [Extension::detach].
+    ExtensionLinkId,
+    FdLink,
+    FdLinkId
+);

+ 177 - 0
aya/src/programs/kprobe.rs

@@ -0,0 +1,177 @@
+use std::{
+    ffi::OsStr,
+    io,
+    os::fd::AsFd,
+    path::{Path, PathBuf},
+    vec::Vec,
+};
+
+use aya_obj::{
+    generated::{bpf_insn, bpf_link_type, bpf_prog_type::BPF_PROG_TYPE_KPROBE},
+    obj,
+};
+use thiserror::Error;
+
+use crate::{
+    programs::{
+        links::{define_link_wrapper, FdLink, LinkError},
+        load_program,
+        perf_attach::{PerfLinkIdInner, PerfLinkInner},
+        probe::{attach, ProbeKind},
+        ProgramData, ProgramError,
+    },
+    sys::bpf_link_get_info_by_fd,
+    VerifierLogLevel,
+};
+
+define_link_wrapper!(
+    /// The link used by [KProbe] programs.
+    KProbeLink,
+    /// The type returned by [KProbe::attach]. Can be passed to [KProbe::detach].
+    KProbeLinkId,
+    PerfLinkInner,
+    PerfLinkIdInner
+);
+
+/// The type returned when attaching a [`KProbe`] fails.
+#[derive(Debug, Error)]
+pub enum KProbeError {
+    /// Error detaching from debugfs
+    #[error("`{filename}`")]
+    FileError {
+        /// The file name
+        filename: PathBuf,
+        /// The [`io::Error`] returned from the file operation
+        #[source]
+        io_error: io::Error,
+    },
+}
+
+#[derive(Debug)]
+#[doc(alias = "BPF_PROG_TYPE_KPROBE")]
+pub struct KProbe {
+    pub(crate) data: ProgramData<KProbeLink>,
+    pub(crate) kind: ProbeKind,
+}
+
+impl KProbe {
+    /// Loads the program inside the kernel.
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(BPF_PROG_TYPE_KPROBE, &mut self.data)
+    }
+
+    pub fn name(&self) -> Option<String> {
+        self.data.name.clone()
+    }
+    /// Returns the instructions of the program.
+    #[allow(unused)]
+    pub fn inst(&mut self) -> Result<Vec<bpf_insn>, ProgramError> {
+        let ProgramData {
+            name,
+            obj,
+            fd,
+            links: _,
+            expected_attach_type,
+            attach_btf_obj_fd,
+            attach_btf_id,
+            attach_prog_fd,
+            btf_fd,
+            verifier_log_level,
+            path: _,
+            flags,
+        } = &self.data;
+        let obj = obj.as_ref().unwrap();
+        let (
+            obj::Program {
+                license,
+                kernel_version,
+                ..
+            },
+            obj::Function {
+                instructions,
+                func_info,
+                line_info,
+                func_info_rec_size,
+                line_info_rec_size,
+                ..
+            },
+        ) = obj;
+        Ok(instructions.clone())
+    }
+
+    /// Returns `KProbe` if the program is a `kprobe`, or `KRetProbe` if the
+    /// program is a `kretprobe`.
+    pub fn kind(&self) -> ProbeKind {
+        self.kind
+    }
+
+    /// Attaches the program.
+    ///
+    /// Attaches the probe to the given function name inside the kernel. If
+    /// `offset` is non-zero, it is added to the address of the target
+    /// function.
+    ///
+    /// If the program is a `kprobe`, it is attached to the *start* address of the target function.
+    /// Conversely if the program is a `kretprobe`, it is attached to the return address of the
+    /// target function.
+    ///
+    /// The returned value can be used to detach from the given function, see [KProbe::detach].
+    pub fn attach<T: AsRef<OsStr>>(
+        &mut self,
+        fn_name: T,
+        offset: u64,
+    ) -> Result<KProbeLinkId, ProgramError> {
+        attach(&mut self.data, self.kind, fn_name.as_ref(), offset, None)
+    }
+
+    /// Detaches the program.
+    ///
+    /// See [KProbe::attach].
+    pub fn detach(&mut self, link_id: KProbeLinkId) -> Result<(), ProgramError> {
+        self.data.links.remove(link_id)
+    }
+
+    /// Takes ownership of the link referenced by the provided link_id.
+    ///
+    /// The link will be detached on `Drop` and the caller is now responsible
+    /// for managing its lifetime.
+    pub fn take_link(&mut self, link_id: KProbeLinkId) -> Result<KProbeLink, ProgramError> {
+        // self.data.take_link(link_id)
+        unimplemented!("take_link")
+    }
+
+    /// Creates a program from a pinned entry on a bpffs.
+    ///
+    /// Existing links will not be populated. To work with existing links you should use [`crate::programs::links::PinnedLink`].
+    ///
+    /// On drop, any managed links are detached and the program is unloaded. This will not result in
+    /// the program being unloaded from the kernel if it is still pinned.
+    pub fn from_pin<P: AsRef<Path>>(path: P, kind: ProbeKind) -> Result<Self, ProgramError> {
+        let data = ProgramData::from_pinned_path(path, VerifierLogLevel::default())?;
+        Ok(Self { data, kind })
+    }
+}
+
+impl TryFrom<KProbeLink> for FdLink {
+    type Error = LinkError;
+
+    fn try_from(value: KProbeLink) -> Result<Self, Self::Error> {
+        if let PerfLinkInner::FdLink(fd) = value.into_inner() {
+            Ok(fd)
+        } else {
+            Err(LinkError::InvalidLink)
+        }
+    }
+}
+
+impl TryFrom<FdLink> for KProbeLink {
+    type Error = LinkError;
+
+    fn try_from(fd_link: FdLink) -> Result<Self, Self::Error> {
+        let info = bpf_link_get_info_by_fd(fd_link.fd.as_fd())?;
+        if info.type_ == (bpf_link_type::BPF_LINK_TYPE_KPROBE_MULTI as u32) {
+            return Ok(Self::new(PerfLinkInner::FdLink(fd_link)));
+        }
+        Err(LinkError::InvalidLink)
+    }
+}

+ 290 - 0
aya/src/programs/links.rs

@@ -0,0 +1,290 @@
+use std::{
+    collections::{hash_map::Entry, HashMap},
+    ffi::CString,
+    io,
+    os::fd::{AsFd, AsRawFd, OwnedFd, RawFd},
+    path::{Path, PathBuf},
+};
+
+use thiserror::Error;
+
+/// A Link.
+pub trait Link: core::fmt::Debug + 'static {
+    /// Unique Id
+    type Id: core::fmt::Debug + core::hash::Hash + Eq + PartialEq;
+
+    /// Returns the link id
+    fn id(&self) -> Self::Id;
+
+    /// Detaches the LinkOwnedLink is gone... but this doesn't work :(
+    fn detach(self) -> Result<(), ProgramError>;
+}
+
+#[derive(Debug)]
+pub(crate) struct LinkMap<T: Link> {
+    links: HashMap<T::Id, T>,
+}
+
+impl<T: Link> LinkMap<T> {
+    pub(crate) fn new() -> Self {
+        Self {
+            links: HashMap::new(),
+        }
+    }
+
+    pub(crate) fn insert(&mut self, link: T) -> Result<T::Id, ProgramError> {
+        let id = link.id();
+
+        match self.links.entry(link.id()) {
+            Entry::Occupied(_) => return Err(ProgramError::AlreadyAttached),
+            Entry::Vacant(e) => e.insert(link),
+        };
+
+        Ok(id)
+    }
+
+    pub(crate) fn remove(&mut self, link_id: T::Id) -> Result<(), ProgramError> {
+        self.links
+            .remove(&link_id)
+            .ok_or(ProgramError::NotAttached)?
+            .detach()
+    }
+
+    pub(crate) fn remove_all(&mut self) -> Result<(), ProgramError> {
+        for (_, link) in self.links.drain() {
+            link.detach()?;
+        }
+        Ok(())
+    }
+
+    pub(crate) fn forget(&mut self, link_id: T::Id) -> Result<T, ProgramError> {
+        self.links.remove(&link_id).ok_or(ProgramError::NotAttached)
+    }
+}
+
+impl<T: Link> Drop for LinkMap<T> {
+    fn drop(&mut self) {
+        let _ = self.remove_all();
+    }
+}
+
+/// The identifier of an `FdLink`.
+#[derive(Debug, Hash, Eq, PartialEq)]
+pub struct FdLinkId(pub(crate) RawFd);
+
+/// A file descriptor link.
+///
+/// Fd links are returned directly when attaching some program types (for
+/// instance [`crate::programs::cgroup_skb::CgroupSkb`]), or can be obtained by
+/// converting other link types (see the `TryFrom` implementations).
+///
+/// An important property of fd links is that they can be pinned. Pinning
+/// can be used keep a link attached "in background" even after the program
+/// that has created the link terminates.
+///
+/// # Example
+///
+///```no_run
+/// # let mut bpf = Ebpf::load_file("ebpf_programs.o")?;
+/// use aya::{Ebpf, programs::{links::FdLink, KProbe}};
+///
+/// let program: &mut KProbe = bpf.program_mut("intercept_wakeups").unwrap().try_into()?;
+/// program.load()?;
+/// let link_id = program.attach("try_to_wake_up", 0)?;
+/// let link = program.take_link(link_id).unwrap();
+/// let fd_link: FdLink = link.try_into().unwrap();
+/// fd_link.pin("/sys/fs/bpf/intercept_wakeups_link").unwrap();
+///
+/// # Ok::<(), aya::EbpfError>(())
+/// ```
+#[derive(Debug)]
+pub struct FdLink {
+    pub(crate) fd: OwnedFd,
+}
+impl FdLink {
+    pub(crate) fn new(fd: OwnedFd) -> Self {
+        Self { fd }
+    }
+    /// Pins the link to a BPF file system.
+    ///
+    /// When a link is pinned it will remain attached even after the link instance is dropped,
+    /// and will only be detached once the pinned file is removed. To unpin, see [`PinnedLink::unpin()`].
+    ///
+    /// The parent directories in the provided path must already exist before calling this method,
+    /// and must be on a BPF file system (bpffs).
+    ///
+    /// # Example
+    /// ```no_run
+    /// # use aya::programs::{links::FdLink, Extension};
+    /// # use std::convert::TryInto;
+    /// # #[derive(thiserror::Error, Debug)]
+    /// # enum Error {
+    /// #     #[error(transparent)]
+    /// #     Ebpf(#[from] aya::EbpfError),
+    /// #     #[error(transparent)]
+    /// #     Pin(#[from] aya::pin::PinError),
+    /// #     #[error(transparent)]
+    /// #     Program(#[from] aya::programs::ProgramError)
+    /// # }
+    /// # let mut bpf = aya::Ebpf::load(&[])?;
+    /// # let prog: &mut Extension = bpf.program_mut("example").unwrap().try_into()?;
+    /// let link_id = prog.attach()?;
+    /// let owned_link = prog.take_link(link_id)?;
+    /// let fd_link: FdLink = owned_link.into();
+    /// let pinned_link = fd_link.pin("/sys/fs/bpf/example")?;
+    /// # Ok::<(), Error>(())
+    /// ```
+    pub fn pin<P: AsRef<Path>>(self, path: P) -> Result<PinnedLink, PinError> {
+        use std::os::unix::ffi::OsStrExt as _;
+
+        let path = path.as_ref();
+        let path_string = CString::new(path.as_os_str().as_bytes()).map_err(|error| {
+            PinError::InvalidPinPath {
+                path: path.into(),
+                error,
+            }
+        })?;
+        bpf_pin_object(self.fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
+            call: "BPF_OBJ_PIN",
+            io_error,
+        })?;
+        Ok(PinnedLink::new(path.into(), self))
+    }
+}
+
+impl Link for FdLink {
+    type Id = FdLinkId;
+
+    fn id(&self) -> Self::Id {
+        FdLinkId(self.fd.as_raw_fd())
+    }
+
+    fn detach(self) -> Result<(), ProgramError> {
+        // detach is a noop since it consumes self. once self is consumed, drop will be triggered
+        // and the link will be detached.
+        //
+        // Other links don't need to do this since they use define_link_wrapper!, but FdLink is a
+        // bit special in that it defines a custom ::new() so it can't use the macro.
+        Ok(())
+    }
+}
+
+#[derive(Error, Debug)]
+/// Errors from operations on links.
+pub enum LinkError {
+    /// Invalid link.
+    #[error("Invalid link")]
+    InvalidLink,
+    /// Syscall failed.
+    #[error(transparent)]
+    SyscallError(#[from] SyscallError),
+}
+
+/// A pinned file descriptor link.
+///
+/// This link has been pinned to the BPF filesystem. On drop, the file descriptor that backs
+/// this link will be closed. Whether or not the program remains attached is dependent
+/// on the presence of the file in BPFFS.
+#[derive(Debug)]
+pub struct PinnedLink {
+    inner: FdLink,
+    path: PathBuf,
+}
+
+impl PinnedLink {
+    fn new(path: PathBuf, link: FdLink) -> Self {
+        Self { inner: link, path }
+    }
+
+    /// Creates a [`crate::programs::links::PinnedLink`] from a valid path on bpffs.
+    pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, LinkError> {
+        use std::os::unix::ffi::OsStrExt as _;
+
+        // TODO: avoid this unwrap by adding a new error variant.
+        let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
+        let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| {
+            LinkError::SyscallError(SyscallError {
+                call: "BPF_OBJ_GET",
+                io_error,
+            })
+        })?;
+        Ok(Self::new(path.as_ref().to_path_buf(), FdLink::new(fd)))
+    }
+
+    /// Removes the pinned link from the filesystem and returns an [`FdLink`].
+    pub fn unpin(self) -> Result<FdLink, io::Error> {
+        std::fs::remove_file(self.path)?;
+        Ok(self.inner)
+    }
+}
+
+macro_rules! define_link_wrapper {
+    (#[$doc1:meta] $wrapper:ident, #[$doc2:meta] $wrapper_id:ident, $base:ident, $base_id:ident) => {
+        #[$doc2]
+        #[derive(Debug, Hash, Eq, PartialEq)]
+        pub struct $wrapper_id($base_id);
+
+        #[$doc1]
+        #[derive(Debug)]
+        pub struct $wrapper(Option<$base>);
+
+        #[allow(dead_code)]
+        // allow dead code since currently XDP is the only consumer of inner and
+        // into_inner
+        impl $wrapper {
+            fn new(base: $base) -> $wrapper {
+                $wrapper(Some(base))
+            }
+
+            fn inner(&self) -> &$base {
+                self.0.as_ref().unwrap()
+            }
+
+            fn into_inner(mut self) -> $base {
+                self.0.take().unwrap()
+            }
+        }
+
+        impl Drop for $wrapper {
+            fn drop(&mut self) {
+                use crate::programs::links::Link;
+
+                if let Some(base) = self.0.take() {
+                    let _ = base.detach();
+                }
+            }
+        }
+
+        impl $crate::programs::Link for $wrapper {
+            type Id = $wrapper_id;
+
+            fn id(&self) -> Self::Id {
+                $wrapper_id(self.0.as_ref().unwrap().id())
+            }
+
+            fn detach(mut self) -> Result<(), ProgramError> {
+                self.0.take().unwrap().detach()
+            }
+        }
+
+        impl From<$base> for $wrapper {
+            fn from(b: $base) -> $wrapper {
+                $wrapper(Some(b))
+            }
+        }
+
+        impl From<$wrapper> for $base {
+            fn from(mut w: $wrapper) -> $base {
+                w.0.take().unwrap()
+            }
+        }
+    };
+}
+
+pub(crate) use define_link_wrapper;
+
+use crate::{
+    pin::PinError,
+    programs::ProgramError,
+    sys::{bpf_get_object, bpf_pin_object, SyscallError},
+};

+ 710 - 0
aya/src/programs/mod.rs

@@ -0,0 +1,710 @@
+pub mod extension;
+pub mod kprobe;
+pub mod links;
+mod perf_attach;
+pub mod probe;
+pub mod uprobe;
+mod utils;
+
+use core::num::NonZeroU32;
+use std::{
+    ffi::CString,
+    io,
+    os::fd::{AsFd, BorrowedFd, OwnedFd},
+    path::{Path, PathBuf},
+    string::String,
+    sync::Arc,
+    vec,
+    vec::Vec,
+};
+
+use aya_obj::{
+    btf::BtfError,
+    generated::{bpf_attach_type, bpf_prog_info, bpf_prog_type},
+    obj, VerifierLog,
+};
+use thiserror::Error;
+
+pub use crate::programs::{
+    extension::{Extension, ExtensionError},
+    kprobe::{KProbe, KProbeError},
+    uprobe::{UProbe, UProbeError},
+};
+use crate::{
+    maps::MapError,
+    pin::PinError,
+    programs::{
+        links::{Link, LinkMap},
+        utils::get_fdinfo,
+    },
+    sys::{
+        bpf_btf_get_fd_by_id, bpf_get_object, bpf_load_program, bpf_prog_get_fd_by_id,
+        bpf_prog_get_info_by_fd, iter_prog_ids, retry_with_verifier_logs, EbpfLoadProgramAttrs,
+        SyscallError,
+    },
+    util::{bytes_of_bpf_name, KernelVersion},
+    VerifierLogLevel,
+};
+
+/// Error type returned when working with programs.
+#[derive(Debug, Error)]
+pub enum ProgramError {
+    /// The program is already loaded.
+    #[error("the program is already loaded")]
+    AlreadyLoaded,
+
+    /// The program is not loaded.
+    #[error("the program is not loaded")]
+    NotLoaded,
+
+    /// The program is already attached.
+    #[error("the program was already attached")]
+    AlreadyAttached,
+
+    /// The program is not attached.
+    #[error("the program is not attached")]
+    NotAttached,
+
+    /// Loading the program failed.
+    #[error("the BPF_PROG_LOAD syscall failed. Verifier output: {verifier_log}")]
+    LoadError {
+        /// The [`io::Error`] returned by the `BPF_PROG_LOAD` syscall.
+        #[source]
+        io_error: io::Error,
+        /// The error log produced by the kernel verifier.
+        verifier_log: VerifierLog,
+    },
+
+    /// A syscall failed.
+    #[error(transparent)]
+    SyscallError(#[from] SyscallError),
+
+    /// The network interface does not exist.
+    #[error("unknown network interface {name}")]
+    UnknownInterface {
+        /// interface name
+        name: String,
+    },
+
+    /// The program is not of the expected type.
+    #[error("unexpected program type")]
+    UnexpectedProgramType,
+
+    /// A map error occurred while loading or attaching a program.
+    #[error(transparent)]
+    MapError(#[from] MapError),
+
+    /// An error occurred while working with a [`KProbe`].
+    #[error(transparent)]
+    KProbeError(#[from] KProbeError),
+
+    /// An error occurred while working with an [`UProbe`].
+    #[error(transparent)]
+    UProbeError(#[from] UProbeError),
+
+    // /// An error occurred while working with a [`TracePoint`].
+    // #[error(transparent)]
+    // TracePointError(#[from] TracePointError),
+    //
+    // /// An error occurred while working with a [`SocketFilter`].
+    // #[error(transparent)]
+    // SocketFilterError(#[from] SocketFilterError),
+    //
+    // /// An error occurred while working with an [`Xdp`] program.
+    // #[error(transparent)]
+    // XdpError(#[from] XdpError),
+    //
+    // /// An error occurred while working with a TC program.
+    // #[error(transparent)]
+    // TcError(#[from] TcError),
+    /// An error occurred while working with an [`Extension`] program.
+    #[error(transparent)]
+    ExtensionError(#[from] ExtensionError),
+
+    /// An error occurred while working with BTF.
+    #[error(transparent)]
+    Btf(#[from] BtfError),
+
+    /// The program is not attached.
+    #[error("the program name `{name}` is invalid")]
+    InvalidName {
+        /// program name
+        name: String,
+    },
+
+    /// An error occurred while working with IO.
+    #[error(transparent)]
+    IOError(#[from] io::Error),
+}
+
+/// A [`Program`] file descriptor.
+#[derive(Debug)]
+pub struct ProgramFd(OwnedFd);
+
+impl ProgramFd {
+    /// Creates a new instance that shares the same underlying file description as [`self`].
+    pub fn try_clone(&self) -> io::Result<Self> {
+        let Self(inner) = self;
+        let inner = inner.try_clone()?;
+        Ok(Self(inner))
+    }
+}
+
+impl AsFd for ProgramFd {
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        let Self(fd) = self;
+        fd.as_fd()
+    }
+}
+
+macro_rules! impl_fd {
+    ($($struct_name:ident),+ $(,)?) => {
+        $(
+            impl $struct_name {
+                /// Returns the file descriptor of this Program.
+                pub fn fd(&self) -> Result<&ProgramFd, ProgramError> {
+                    self.data.fd()
+                }
+            }
+        )+
+    }
+}
+impl_fd!(KProbe, Extension,);
+
+macro_rules! impl_program_unload {
+    ($($struct_name:ident),+ $(,)?) => {
+        $(
+            impl $struct_name {
+                /// Unloads the program from the kernel.
+                ///
+                /// Links will be detached before unloading the program.  Note
+                /// that owned links obtained using `take_link()` will not be
+                /// detached.
+                pub fn unload(&mut self) -> Result<(), ProgramError> {
+                    info!("Unloading program for {:?}", self.name());
+                    unload_program(&mut self.data)
+                }
+            }
+
+            impl Drop for $struct_name {
+                fn drop(&mut self) {
+                    let _ = self.unload();
+                }
+            }
+        )+
+    }
+}
+
+impl_program_unload!(KProbe, Extension,);
+
+macro_rules! impl_program_pin{
+    ($($struct_name:ident),+ $(,)?) => {
+        $(
+            impl $struct_name {
+                /// Pins the program to a BPF filesystem.
+                ///
+                /// When a BPF object is pinned to a BPF filesystem it will remain loaded after
+                /// Aya has unloaded the program.
+                /// To remove the program, the file on the BPF filesystem must be removed.
+                /// Any directories in the the path provided should have been created by the caller.
+                pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<(), PinError> {
+                    // self.data.path = Some(path.as_ref().to_path_buf());
+                    // pin_program(&self.data, path)
+                    log::error!("Pinning a program is not yet implemented.");
+                    unimplemented!("Pins the program to a BPF filesystem.")
+                }
+
+                /// Removes the pinned link from the filesystem.
+                pub fn unpin(self) -> Result<(), io::Error> {
+                    // if let Some(path) = self.data.path.take() {
+                    //     std::fs::remove_file(path)?;
+                    // }
+                    // Ok(())
+                    unimplemented!("Removes the pinned link from the filesystem.")
+                }
+            }
+        )+
+    }
+}
+
+impl_program_pin!(KProbe, Extension,);
+
+/// Returns information about a loaded program with the [`ProgramInfo`] structure.
+///
+/// This information is populated at load time by the kernel and can be used
+/// to correlate a given [`Program`] to it's corresponding [`ProgramInfo`]
+/// metadata.
+macro_rules! impl_info {
+    ($($struct_name:ident),+ $(,)?) => {
+        $(
+            impl $struct_name {
+                /// Returns the file descriptor of this Program.
+                pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
+                    let ProgramFd(fd) = self.fd()?;
+
+                    ProgramInfo::new_from_fd(fd.as_fd())
+                }
+            }
+        )+
+    }
+}
+
+impl_info!(KProbe, Extension,);
+
+macro_rules! impl_from_pin {
+    ($($struct_name:ident),+ $(,)?) => {
+        $(
+            impl $struct_name {
+                /// Creates a program from a pinned entry on a bpffs.
+                ///
+                /// Existing links will not be populated. To work with existing links you should use [`crate::programs::links::PinnedLink`].
+                ///
+                /// On drop, any managed links are detached and the program is unloaded. This will not result in
+                /// the program being unloaded from the kernel if it is still pinned.
+                pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, ProgramError> {
+                    let data = ProgramData::from_pinned_path(path, VerifierLogLevel::default())?;
+                    Ok(Self { data })
+                }
+            }
+        )+
+    }
+}
+
+// Use impl_from_pin if the program doesn't require additional data
+impl_from_pin!(Extension,);
+
+macro_rules! impl_try_from_program {
+    ($($ty:ident),+ $(,)?) => {
+        $(
+            impl<'a> TryFrom<&'a Program> for &'a $ty {
+                type Error = ProgramError;
+
+                fn try_from(program: &'a Program) -> Result<&'a $ty, ProgramError> {
+                    match program {
+                        Program::$ty(p) => Ok(p),
+                        _ => Err(ProgramError::UnexpectedProgramType),
+                    }
+                }
+            }
+
+            impl<'a> TryFrom<&'a mut Program> for &'a mut $ty {
+                type Error = ProgramError;
+
+                fn try_from(program: &'a mut Program) -> Result<&'a mut $ty, ProgramError> {
+                    match program {
+                        Program::$ty(p) => Ok(p),
+                        _ => Err(ProgramError::UnexpectedProgramType),
+                    }
+                }
+            }
+        )+
+    }
+}
+impl_try_from_program!(KProbe, Extension,);
+
+/// eBPF program type.
+#[derive(Debug)]
+pub enum Program {
+    /// A [`KProbe`] program
+    KProbe(KProbe),
+    /// A [`Extension`] program
+    Extension(Extension),
+}
+
+impl Program {
+    /// Returns the low level program type.
+    pub fn prog_type(&self) -> bpf_prog_type {
+        use aya_obj::generated::bpf_prog_type::*;
+        match self {
+            Self::KProbe(_) => BPF_PROG_TYPE_KPROBE,
+            Self::Extension(_) => BPF_PROG_TYPE_EXT,
+        }
+    }
+
+    /// Pin the program to the provided path
+    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<(), PinError> {
+        match self {
+            Self::KProbe(p) => p.pin(path),
+            Self::Extension(p) => p.pin(path),
+        }
+    }
+
+    /// Unloads the program from the kernel.
+    pub fn unload(self) -> Result<(), ProgramError> {
+        match self {
+            Self::KProbe(mut p) => p.unload(),
+            Self::Extension(mut p) => p.unload(),
+        }
+    }
+
+    /// Returns the file descriptor of a program.
+    ///
+    /// Can be used to add a program to a [`crate::maps::ProgramArray`] or attach an [`Extension`] program.
+    pub fn fd(&self) -> Result<&ProgramFd, ProgramError> {
+        match self {
+            Self::KProbe(p) => p.fd(),
+            Self::Extension(p) => p.fd(),
+        }
+    }
+    /// Returns information about a loaded program with the [`ProgramInfo`] structure.
+    ///
+    /// This information is populated at load time by the kernel and can be used
+    /// to get kernel details for a given [`Program`].
+    pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
+        match self {
+            Self::KProbe(p) => p.info(),
+            Self::Extension(p) => p.info(),
+        }
+    }
+}
+
+#[derive(Debug)]
+pub(crate) struct ProgramData<T: Link> {
+    pub(crate) name: Option<String>,
+    pub(crate) obj: Option<(obj::Program, obj::Function)>,
+    pub(crate) fd: Option<ProgramFd>,
+    pub(crate) links: LinkMap<T>,
+    pub(crate) expected_attach_type: Option<bpf_attach_type>,
+    pub(crate) attach_btf_obj_fd: Option<OwnedFd>,
+    pub(crate) attach_btf_id: Option<u32>,
+    pub(crate) attach_prog_fd: Option<ProgramFd>,
+    pub(crate) btf_fd: Option<Arc<OwnedFd>>,
+    pub(crate) verifier_log_level: VerifierLogLevel,
+    pub(crate) path: Option<PathBuf>,
+    pub(crate) flags: u32,
+}
+
+impl<T: Link> ProgramData<T> {
+    pub(crate) fn new(
+        name: Option<String>,
+        obj: (obj::Program, obj::Function),
+        btf_fd: Option<Arc<OwnedFd>>,
+        verifier_log_level: VerifierLogLevel,
+    ) -> Self {
+        Self {
+            name,
+            obj: Some(obj),
+            fd: None,
+            links: LinkMap::new(),
+            expected_attach_type: None,
+            attach_btf_obj_fd: None,
+            attach_btf_id: None,
+            attach_prog_fd: None,
+            btf_fd,
+            verifier_log_level,
+            path: None,
+            flags: 0,
+        }
+    }
+    pub(crate) fn from_bpf_prog_info(
+        name: Option<String>,
+        fd: OwnedFd,
+        path: &Path,
+        info: bpf_prog_info,
+        verifier_log_level: VerifierLogLevel,
+    ) -> Result<Self, ProgramError> {
+        let attach_btf_id = if info.attach_btf_id > 0 {
+            Some(info.attach_btf_id)
+        } else {
+            None
+        };
+        let attach_btf_obj_fd = (info.attach_btf_obj_id != 0)
+            .then(|| bpf_btf_get_fd_by_id(info.attach_btf_obj_id))
+            .transpose()?;
+
+        Ok(Self {
+            name,
+            obj: None,
+            fd: Some(ProgramFd(fd)),
+            links: LinkMap::new(),
+            expected_attach_type: None,
+            attach_btf_obj_fd,
+            attach_btf_id,
+            attach_prog_fd: None,
+            btf_fd: None,
+            verifier_log_level,
+            path: Some(path.to_path_buf()),
+            flags: 0,
+        })
+    }
+
+    pub(crate) fn from_pinned_path<P: AsRef<Path>>(
+        path: P,
+        verifier_log_level: VerifierLogLevel,
+    ) -> Result<Self, ProgramError> {
+        use std::os::unix::ffi::OsStrExt as _;
+
+        // TODO: avoid this unwrap by adding a new error variant.
+        let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
+        let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| SyscallError {
+            call: "bpf_obj_get",
+            io_error,
+        })?;
+
+        let info = ProgramInfo::new_from_fd(fd.as_fd())?;
+        let name = info.name_as_str().map(|s| s.to_string());
+        Self::from_bpf_prog_info(name, fd, path.as_ref(), info.0, verifier_log_level)
+    }
+}
+
+impl<T: Link> ProgramData<T> {
+    fn fd(&self) -> Result<&ProgramFd, ProgramError> {
+        self.fd.as_ref().ok_or(ProgramError::NotLoaded)
+    }
+
+    pub(crate) fn take_link(&mut self, link_id: T::Id) -> Result<T, ProgramError> {
+        self.links.forget(link_id)
+    }
+}
+
+/// Provides information about a loaded program, like name, id and statistics
+#[derive(Debug)]
+pub struct ProgramInfo(bpf_prog_info);
+
+impl ProgramInfo {
+    fn new_from_fd(fd: BorrowedFd<'_>) -> Result<Self, ProgramError> {
+        let info = bpf_prog_get_info_by_fd(fd, &mut [])?;
+        Ok(Self(info))
+    }
+
+    /// The name of the program as was provided when it was load. This is limited to 16 bytes
+    pub fn name(&self) -> &[u8] {
+        bytes_of_bpf_name(&self.0.name)
+    }
+
+    /// The name of the program as a &str. If the name was not valid unicode, None is returned.
+    pub fn name_as_str(&self) -> Option<&str> {
+        core::str::from_utf8(self.name()).ok()
+    }
+
+    /// The id for this program. Each program has a unique id.
+    pub fn id(&self) -> u32 {
+        self.0.id
+    }
+
+    /// The program tag.
+    ///
+    /// The program tag is a SHA sum of the program's instructions which be used as an alternative to
+    /// [`Self::id()`]". A program's id can vary every time it's loaded or unloaded, but the tag
+    /// will remain the same.
+    pub fn tag(&self) -> u64 {
+        u64::from_be_bytes(self.0.tag)
+    }
+
+    /// The program type as defined by the linux kernel enum
+    /// [`bpf_prog_type`](https://elixir.bootlin.com/linux/v6.4.4/source/include/uapi/linux/bpf.h#L948).
+    pub fn program_type(&self) -> u32 {
+        self.0.type_
+    }
+
+    /// Returns true if the program is defined with a GPL-compatible license.
+    pub fn gpl_compatible(&self) -> bool {
+        self.0.gpl_compatible() != 0
+    }
+
+    /// The ids of the maps used by the program.
+    pub fn map_ids(&self) -> Result<Vec<u32>, ProgramError> {
+        let ProgramFd(fd) = self.fd()?;
+        let mut map_ids = vec![0u32; self.0.nr_map_ids as usize];
+
+        bpf_prog_get_info_by_fd(fd.as_fd(), &mut map_ids)?;
+
+        Ok(map_ids)
+    }
+
+    /// The btf id for the program.
+    pub fn btf_id(&self) -> Option<NonZeroU32> {
+        NonZeroU32::new(self.0.btf_id)
+    }
+
+    /// The size in bytes of the program's translated eBPF bytecode, which is
+    /// the bytecode after it has been passed though the verifier where it was
+    /// possibly modified by the kernel.
+    pub fn size_translated(&self) -> u32 {
+        self.0.xlated_prog_len
+    }
+
+    /// The size in bytes of the program's JIT-compiled machine code.
+    pub fn size_jitted(&self) -> u32 {
+        self.0.jited_prog_len
+    }
+
+    /// How much memory in bytes has been allocated and locked for the program.
+    pub fn memory_locked(&self) -> Result<u32, ProgramError> {
+        get_fdinfo(self.fd()?.as_fd(), "memlock")
+    }
+
+    /// The number of verified instructions in the program.
+    ///
+    /// This may be less than the total number of instructions in the compiled
+    /// program due to dead code elimination in the verifier.
+    pub fn verified_instruction_count(&self) -> u32 {
+        self.0.verified_insns
+    }
+
+    // The time the program was loaded.
+    // pub fn loaded_at(&self) -> SystemTime {
+    //     boot_time() + Duration::from_nanos(self.0.load_time)
+    // }
+
+    /// Returns a file descriptor referencing the program.
+    ///
+    /// The returned file descriptor can be closed at any time and doing so does
+    /// not influence the life cycle of the program.
+    pub fn fd(&self) -> Result<ProgramFd, ProgramError> {
+        let Self(info) = self;
+        let fd = bpf_prog_get_fd_by_id(info.id)?;
+        Ok(ProgramFd(fd))
+    }
+
+    /// Loads a program from a pinned path in bpffs.
+    pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, ProgramError> {
+        // use std::os::unix::ffi::OsStrExt as _;
+
+        // TODO: avoid this unwrap by adding a new error variant.
+        // let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
+        // let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| SyscallError {
+        //     call: "BPF_OBJ_GET",
+        //     io_error,
+        // })?;
+        //
+        // let info = bpf_prog_get_info_by_fd(fd.as_fd(), &mut [])?;
+        // Ok(Self(info))
+        unimplemented!("Loads a program from a pinned path in bpffs")
+    }
+}
+
+fn unload_program<T: Link>(data: &mut ProgramData<T>) -> Result<(), ProgramError> {
+    data.links.remove_all()?;
+    data.fd
+        .take()
+        .ok_or(ProgramError::NotLoaded)
+        .map(|ProgramFd { .. }| ())
+}
+
+fn load_program<T: Link>(
+    prog_type: bpf_prog_type,
+    data: &mut ProgramData<T>,
+) -> Result<(), ProgramError> {
+    let ProgramData {
+        name,
+        obj,
+        fd,
+        links: _,
+        expected_attach_type,
+        attach_btf_obj_fd,
+        attach_btf_id,
+        attach_prog_fd,
+        btf_fd,
+        verifier_log_level,
+        path: _,
+        flags,
+    } = data;
+    if fd.is_some() {
+        return Err(ProgramError::AlreadyLoaded);
+    }
+    if obj.is_none() {
+        // This program was loaded from a pin in bpffs
+        return Err(ProgramError::AlreadyLoaded);
+    }
+    let obj = obj.as_ref().unwrap();
+    let (
+        obj::Program {
+            license,
+            kernel_version,
+            ..
+        },
+        obj::Function {
+            instructions,
+            func_info,
+            line_info,
+            func_info_rec_size,
+            line_info_rec_size,
+            ..
+        },
+    ) = obj;
+
+    let target_kernel_version =
+        kernel_version.unwrap_or_else(|| KernelVersion::current().unwrap().code());
+
+    let prog_name = if let Some(name) = name {
+        let mut name = name.clone();
+        if name.len() > 15 {
+            name.truncate(15);
+        }
+        let prog_name = CString::new(name.clone())
+            .map_err(|_| ProgramError::InvalidName { name: name.clone() })?;
+        Some(prog_name)
+    } else {
+        None
+    };
+
+    let attr = EbpfLoadProgramAttrs {
+        name: prog_name,
+        ty: prog_type,
+        insns: instructions,
+        license,
+        kernel_version: target_kernel_version,
+        expected_attach_type: *expected_attach_type,
+        prog_btf_fd: btf_fd.as_ref().map(|f| f.as_fd()),
+        attach_btf_obj_fd: attach_btf_obj_fd.as_ref().map(|fd| fd.as_fd()),
+        attach_btf_id: *attach_btf_id,
+        attach_prog_fd: attach_prog_fd.as_ref().map(|fd| fd.as_fd()),
+        func_info_rec_size: *func_info_rec_size,
+        func_info: func_info.clone(),
+        line_info_rec_size: *line_info_rec_size,
+        line_info: line_info.clone(),
+        flags: *flags,
+    };
+
+    let (ret, verifier_log) = retry_with_verifier_logs(10, |logger| {
+        bpf_load_program(&attr, logger, *verifier_log_level)
+    });
+
+    match ret {
+        Ok(prog_fd) => {
+            *fd = Some(ProgramFd(prog_fd));
+            Ok(())
+        }
+        Err((_, io_error)) => Err(ProgramError::LoadError {
+            io_error,
+            verifier_log,
+        }),
+    }
+}
+
+/// Returns an iterator over all loaded bpf programs.
+///
+/// This differs from [`crate::Ebpf::programs`] since it will return all programs
+/// listed on the host system and not only programs a specific [`crate::Ebpf`] instance.
+///
+/// # Example
+/// ```
+/// # use aya::programs::loaded_programs;
+///
+/// for p in loaded_programs() {
+///     match p {
+///         Ok(program) => println!("{}", String::from_utf8_lossy(program.name())),
+///         Err(e) => println!("Error iterating programs: {:?}", e),
+///     }
+/// }
+/// ```
+///
+/// # Errors
+///
+/// Returns [`ProgramError::SyscallError`] if any of the syscalls required to either get
+/// next program id, get the program fd, or the [`ProgramInfo`] fail. In cases where
+/// iteration can't be performed, for example the caller does not have the necessary privileges,
+/// a single item will be yielded containing the error that occurred.
+pub fn loaded_programs() -> impl Iterator<Item = Result<ProgramInfo, ProgramError>> {
+    iter_prog_ids()
+        .map(|id| {
+            let id = id?;
+            bpf_prog_get_fd_by_id(id)
+        })
+        .map(|fd| {
+            let fd = fd?;
+            bpf_prog_get_info_by_fd(fd.as_fd(), &mut [])
+        })
+        .map(|result| result.map(ProgramInfo).map_err(Into::into))
+}

+ 122 - 0
aya/src/programs/perf_attach.rs

@@ -0,0 +1,122 @@
+use std::os::fd::{AsFd, AsRawFd, BorrowedFd, OwnedFd, RawFd};
+
+use aya_obj::generated::bpf_attach_type::BPF_PERF_EVENT;
+
+use crate::{
+    bpf::{FEATURES, PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE, PERF_EVENT_IOC_SET_BPF},
+    programs::{
+        links::{FdLink, Link},
+        probe::{detach_debug_fs, ProbeEvent},
+        ProgramError,
+    },
+    sys::{bpf_link_create, perf_event::perf_event_ioctl, LinkTarget, SysResult, SyscallError},
+};
+
+#[derive(Debug, Hash, Eq, PartialEq)]
+pub(crate) enum PerfLinkIdInner {
+    FdLinkId(<FdLink as Link>::Id),
+    PerfLinkId(<PerfLink as Link>::Id),
+}
+
+#[derive(Debug)]
+pub(crate) enum PerfLinkInner {
+    FdLink(FdLink),
+    PerfLink(PerfLink),
+}
+
+impl Link for PerfLinkInner {
+    type Id = PerfLinkIdInner;
+
+    fn id(&self) -> Self::Id {
+        match self {
+            Self::FdLink(link) => PerfLinkIdInner::FdLinkId(link.id()),
+            Self::PerfLink(link) => PerfLinkIdInner::PerfLinkId(link.id()),
+        }
+    }
+
+    fn detach(self) -> Result<(), ProgramError> {
+        match self {
+            Self::FdLink(link) => link.detach(),
+            Self::PerfLink(link) => link.detach(),
+        }
+    }
+}
+
+/// The identifer of a PerfLink.
+#[derive(Debug, Hash, Eq, PartialEq)]
+pub struct PerfLinkId(RawFd);
+
+/// The attachment type of PerfEvent programs.
+#[derive(Debug)]
+pub struct PerfLink {
+    perf_fd: OwnedFd,
+    event: Option<ProbeEvent>,
+}
+
+impl Link for PerfLink {
+    type Id = PerfLinkId;
+
+    fn id(&self) -> Self::Id {
+        PerfLinkId(self.perf_fd.as_raw_fd())
+    }
+
+    fn detach(self) -> Result<(), ProgramError> {
+        let Self { perf_fd, event } = self;
+        let _: SysResult<_> = perf_event_ioctl(perf_fd.as_fd(), PERF_EVENT_IOC_DISABLE, 0);
+        info!(
+            "perf_link_detach: perf_fd: {:?}, event: {:?}",
+            perf_fd, event
+        );
+        if let Some(event) = event {
+            info!("perf_link_detach: detaching debugfs event: {:?}", event);
+            let _: Result<_, _> = detach_debug_fs(event);
+        }
+        Ok(())
+    }
+}
+
+pub(crate) fn perf_attach(
+    prog_fd: BorrowedFd<'_>,
+    fd: OwnedFd,
+) -> Result<PerfLinkInner, ProgramError> {
+    info!("perf_attach: prog_fd: {:?}, fd: {:?}", prog_fd, fd);
+    if FEATURES.bpf_perf_link() {
+        let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(fd.as_fd()), BPF_PERF_EVENT, None, 0)
+            .map_err(|(_, io_error)| SyscallError {
+                call: "bpf_link_create",
+                io_error,
+            })?;
+        Ok(PerfLinkInner::FdLink(FdLink::new(link_fd)))
+    } else {
+        perf_attach_either(prog_fd, fd, None)
+    }
+}
+
+pub(crate) fn perf_attach_debugfs(
+    prog_fd: BorrowedFd<'_>,
+    fd: OwnedFd,
+    event: ProbeEvent,
+) -> Result<PerfLinkInner, ProgramError> {
+    perf_attach_either(prog_fd, fd, Some(event))
+}
+
+fn perf_attach_either(
+    prog_fd: BorrowedFd<'_>,
+    fd: OwnedFd,
+    event: Option<ProbeEvent>,
+) -> Result<PerfLinkInner, ProgramError> {
+    perf_event_ioctl(fd.as_fd(), PERF_EVENT_IOC_SET_BPF, prog_fd.as_raw_fd()).map_err(
+        |(_, io_error)| SyscallError {
+            call: "PERF_EVENT_IOC_SET_BPF",
+            io_error,
+        },
+    )?;
+    perf_event_ioctl(fd.as_fd(), PERF_EVENT_IOC_ENABLE, 0).map_err(|(_, io_error)| {
+        SyscallError {
+            call: "PERF_EVENT_IOC_ENABLE",
+            io_error,
+        }
+    })?;
+
+    Ok(PerfLinkInner::PerfLink(PerfLink { perf_fd: fd, event }))
+}

+ 238 - 0
aya/src/programs/probe.rs

@@ -0,0 +1,238 @@
+use std::{
+    ffi::{OsStr, OsString},
+    format, fs,
+    fs::OpenOptions,
+    io,
+    io::Write,
+    os::fd::{AsFd, OwnedFd},
+    path::{Path, PathBuf},
+    string::String,
+};
+
+use libc::pid_t;
+
+use crate::{
+    programs::{
+        kprobe::KProbeError,
+        links::Link,
+        perf_attach::{perf_attach, PerfLinkInner},
+        uprobe::UProbeError,
+        utils::find_tracefs_path,
+        ProgramData, ProgramError,
+    },
+    sys::{perf_event::perf_event_open_probe, SyscallError},
+    util::KernelVersion,
+};
+
+/// Kind of probe program
+#[derive(Debug, Copy, Clone)]
+pub enum ProbeKind {
+    /// Kernel probe
+    KProbe,
+    /// Kernel return probe
+    KRetProbe,
+    /// User space probe
+    UProbe,
+    /// User space return probe
+    URetProbe,
+}
+
+impl ProbeKind {
+    fn pmu(&self) -> &'static str {
+        match *self {
+            Self::KProbe | Self::KRetProbe => "kprobe",
+            Self::UProbe | Self::URetProbe => "uprobe",
+        }
+    }
+}
+
+#[derive(Debug)]
+pub(crate) struct ProbeEvent {
+    kind: ProbeKind,
+    event_alias: String,
+}
+
+pub(crate) fn attach<T: Link + From<PerfLinkInner>>(
+    program_data: &mut ProgramData<T>,
+    kind: ProbeKind,
+    // NB: the meaning of this argument is different for kprobe/kretprobe and uprobe/uretprobe; in
+    // the kprobe case it is the name of the function to attach to, in the uprobe case it is a path
+    // to the binary or library.
+    //
+    // TODO: consider encoding the type and the argument in the [`ProbeKind`] enum instead of a
+    // separate argument.
+    fn_name: &OsStr,
+    offset: u64,
+    pid: Option<pid_t>,
+) -> Result<T::Id, ProgramError> {
+    // https://github.com/torvalds/linux/commit/e12f03d7031a977356e3d7b75a68c2185ff8d155
+    // Use debugfs to create probe
+    let prog_fd = program_data.fd()?;
+    let prog_fd = prog_fd.as_fd();
+    let link = if KernelVersion::current().unwrap() < KernelVersion::new(4, 17, 0) {
+        // let (fd, event_alias) = create_as_trace_point(kind, fn_name, offset, pid)?;
+        // perf_attach_debugfs(prog_fd, fd, ProbeEvent { kind, event_alias })
+        unimplemented!("The kernel version is too old to support perf events for probes")
+    } else {
+        let fd = create_as_probe(kind, fn_name, offset, pid)?;
+        perf_attach(prog_fd, fd)
+    }?;
+    program_data.links.insert(T::from(link))
+}
+
+fn create_as_probe(
+    kind: ProbeKind,
+    fn_name: &OsStr,
+    offset: u64,
+    pid: Option<pid_t>,
+) -> Result<OwnedFd, ProgramError> {
+    info!(
+        "create_as_probe: kind: {:?}, fn_name: {:?}, offset: {}, pid: {:?}",
+        kind, fn_name, offset, pid
+    );
+    use ProbeKind::*;
+
+    let perf_ty = match kind {
+        KProbe | KRetProbe => read_sys_fs_perf_type(kind.pmu())
+            .map_err(|(filename, io_error)| KProbeError::FileError { filename, io_error })?,
+        UProbe | URetProbe => read_sys_fs_perf_type(kind.pmu())
+            .map_err(|(filename, io_error)| UProbeError::FileError { filename, io_error })?,
+    };
+
+    let ret_bit = match kind {
+        KRetProbe => Some(
+            read_sys_fs_perf_ret_probe(kind.pmu())
+                .map_err(|(filename, io_error)| KProbeError::FileError { filename, io_error })?,
+        ),
+        URetProbe => Some(
+            read_sys_fs_perf_ret_probe(kind.pmu())
+                .map_err(|(filename, io_error)| UProbeError::FileError { filename, io_error })?,
+        ),
+        _ => None,
+    };
+
+    perf_event_open_probe(perf_ty, ret_bit, fn_name, offset, pid).map_err(|(_code, io_error)| {
+        SyscallError {
+            call: "perf_event_open",
+            io_error,
+        }
+        .into()
+    })
+}
+
+pub(crate) fn detach_debug_fs(event: ProbeEvent) -> Result<(), ProgramError> {
+    use ProbeKind::*;
+
+    let tracefs = find_tracefs_path()?;
+
+    let ProbeEvent {
+        kind,
+        event_alias: _,
+    } = &event;
+    let kind = *kind;
+    let result = delete_probe_event(tracefs, event);
+
+    result.map_err(|(filename, io_error)| match kind {
+        KProbe | KRetProbe => KProbeError::FileError { filename, io_error }.into(),
+        UProbe | URetProbe => UProbeError::FileError { filename, io_error }.into(),
+    })
+}
+
+fn delete_probe_event(tracefs: &Path, event: ProbeEvent) -> Result<(), (PathBuf, io::Error)> {
+    use std::os::unix::ffi::OsStrExt as _;
+
+    let ProbeEvent { kind, event_alias } = event;
+    let events_file_name = tracefs.join(format!("{}_events", kind.pmu()));
+
+    fs::read(&events_file_name)
+        .and_then(|events| {
+            let found = lines(&events).any(|line| {
+                let mut line = line.as_bytes();
+                // See [`create_probe_event`] and the documentation:
+                //
+                // https://docs.kernel.org/trace/kprobetrace.html
+                //
+                // https://docs.kernel.org/trace/uprobetracer.html
+                loop {
+                    match line.split_first() {
+                        None => break false,
+                        Some((b, rest)) => {
+                            line = rest;
+                            if *b == b'/' {
+                                break line.starts_with(event_alias.as_bytes());
+                            }
+                        }
+                    }
+                }
+            });
+
+            if found {
+                OpenOptions::new()
+                    .append(true)
+                    .open(&events_file_name)
+                    .and_then(|mut events_file| {
+                        let mut rm = OsString::new();
+                        rm.push("-:");
+                        rm.push(event_alias);
+                        rm.push("\n");
+
+                        events_file.write_all(rm.as_bytes())
+                    })
+            } else {
+                Ok(())
+            }
+        })
+        .map_err(|e| (events_file_name, e))
+}
+
+pub(crate) fn lines(bytes: &[u8]) -> impl Iterator<Item = &OsStr> {
+    use std::os::unix::ffi::OsStrExt as _;
+
+    bytes.as_ref().split(|b| b == &b'\n').map(|mut line| {
+        while let [stripped @ .., c] = line {
+            if c.is_ascii_whitespace() {
+                line = stripped;
+                continue;
+            }
+            break;
+        }
+        OsStr::from_bytes(line)
+    })
+}
+
+fn read_sys_fs_perf_type(pmu: &str) -> Result<u32, (PathBuf, io::Error)> {
+    // let file = format!("/sys/bus/event_source/devices/{}/type", pmu);
+    // let res = unsafe { extern_read_sys_fs_perf_type(&file) }.unwrap();
+    let res = 6;
+    Ok(res)
+    // fs::read_to_string(&file)
+    //     .and_then(|perf_ty| {
+    //         perf_ty
+    //             .trim()
+    //             .parse::<u32>()
+    //             .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
+    //     })
+    //     .map_err(|e| (file, e))
+}
+
+fn read_sys_fs_perf_ret_probe(pmu: &str) -> Result<u32, (PathBuf, io::Error)> {
+    // let file = Path::new("/sys/bus/event_source/devices")
+    //     .join(pmu)
+    //     .join("format/retprobe");
+    //
+    // fs::read_to_string(&file)
+    //     .and_then(|data| {
+    //         let mut parts = data.trim().splitn(2, ':').skip(1);
+    //         let config = parts
+    //             .next()
+    //             .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid format"))?;
+    //
+    //         config
+    //             .parse::<u32>()
+    //             .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
+    //     })
+    //     .map_err(|e| (file, e))
+    // let file = format!("/sys/bus/event_source/devices/{}/format/retprobe", pmu);
+    // let res = unsafe { extern_read_sys_fs_perf_ret_probe(&file) }.unwrap();
+    Ok(0)
+}

+ 79 - 0
aya/src/programs/uprobe.rs

@@ -0,0 +1,79 @@
+use std::{boxed::Box, error::Error, io, path::PathBuf, string::String, sync::Arc};
+
+use thiserror::Error;
+
+use crate::programs::{
+    links::define_link_wrapper,
+    perf_attach::{PerfLinkIdInner, PerfLinkInner},
+    probe::ProbeKind,
+    ProgramData, ProgramError,
+};
+const LD_SO_CACHE_FILE: &str = "/etc/ld.so.cache";
+const LD_SO_CACHE_HEADER_OLD: &str = "ld.so-1.7.0\0";
+const LD_SO_CACHE_HEADER_NEW: &str = "glibc-ld.so.cache1.1";
+
+/// An user space probe.
+///
+/// User probes are eBPF programs that can be attached to any userspace
+/// function. They can be of two kinds:
+///
+/// - `uprobe`: get attached to the *start* of the target functions
+/// - `uretprobe`: get attached to the *return address* of the target functions
+#[derive(Debug)]
+#[doc(alias = "BPF_PROG_TYPE_KPROBE")]
+pub struct UProbe {
+    pub(crate) data: ProgramData<UProbeLink>,
+    pub(crate) kind: ProbeKind,
+}
+
+impl UProbe {
+    pub fn name(&self) -> Option<String> {
+        self.data.name.clone()
+    }
+}
+/// The type returned when attaching an [`UProbe`] fails.
+#[derive(Debug, Error)]
+pub enum UProbeError {
+    /// There was an error parsing `/etc/ld.so.cache`.
+    #[error("error reading `{}` file", LD_SO_CACHE_FILE)]
+    InvalidLdSoCache {
+        /// the original [`io::Error`]
+        #[source]
+        io_error: Arc<io::Error>,
+    },
+
+    /// The target program could not be found.
+    #[error("could not resolve uprobe target `{path}`")]
+    InvalidTarget {
+        /// path to target
+        path: PathBuf,
+    },
+
+    /// There was an error resolving the target symbol.
+    #[error("error resolving symbol")]
+    SymbolError {
+        /// symbol name
+        symbol: String,
+        /// the original error
+        #[source]
+        error: Box<dyn Error + Send + Sync>,
+    },
+
+    /// There was an error accessing `filename`.
+    #[error("`{filename}`")]
+    FileError {
+        /// The file name
+        filename: PathBuf,
+        /// The [`io::Error`] returned from the file operation
+        #[source]
+        io_error: io::Error,
+    },
+}
+define_link_wrapper!(
+    /// The link used by [UProbe] programs.
+    UProbeLink,
+    /// The type returned by [UProbe::attach]. Can be passed to [UProbe::detach].
+    UProbeLinkId,
+    PerfLinkInner,
+    PerfLinkIdInner
+);

+ 55 - 0
aya/src/programs/utils.rs

@@ -0,0 +1,55 @@
+use std::{
+    fs::File,
+    io,
+    io::{BufRead, BufReader},
+    os::fd::{AsRawFd, BorrowedFd},
+    path::Path,
+};
+
+use crate::programs::ProgramError;
+
+/// Get the specified information from a file descriptor's fdinfo.
+pub(crate) fn get_fdinfo(fd: BorrowedFd<'_>, key: &str) -> Result<u32, ProgramError> {
+    let info = File::open(format!("/proc/self/fdinfo/{}", fd.as_raw_fd()))?;
+    let reader = BufReader::new(info);
+    for line in reader.lines() {
+        let line = line?;
+        if !line.contains(key) {
+            continue;
+        }
+
+        let (_key, val) = line.rsplit_once('\t').unwrap();
+
+        return Ok(val.parse().unwrap());
+    }
+    Ok(0)
+}
+
+/// Find tracefs filesystem path.
+pub(crate) fn find_tracefs_path() -> Result<&'static Path, ProgramError> {
+    lazy_static::lazy_static! {
+        static ref TRACE_FS: Option<&'static Path> = {
+            let known_mounts = [
+                Path::new("/sys/kernel/tracing"),
+                Path::new("/sys/kernel/debug/tracing"),
+            ];
+
+            for mount in known_mounts {
+                // Check that the mount point exists and is not empty
+                // Documented here: (https://www.kernel.org/doc/Documentation/trace/ftrace.txt)
+                // In some cases, tracefs will only mount at /sys/kernel/debug/tracing
+                // but, the kernel will still create the directory /sys/kernel/tracing.
+                // The user may be expected to manually mount the directory in order for it to
+                // exist in /sys/kernel/tracing according to the documentation.
+                if mount.exists() && mount.read_dir().ok()?.next().is_some() {
+                    return Some(mount);
+                }
+            }
+            None
+        };
+    }
+
+    TRACE_FS
+        .as_deref()
+        .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "tracefs not found").into())
+}

+ 789 - 0
aya/src/sys/bpf.rs

@@ -0,0 +1,789 @@
+use core::{
+    cmp,
+    ffi::{c_char, CStr},
+    mem, slice,
+};
+use std::{ffi::CString, format, io, iter, os::fd::*, string::String, vec, vec::Vec};
+
+use assert_matches::assert_matches;
+use aya_obj::{
+    btf::*,
+    copy_instructions,
+    generated::*,
+    maps::{bpf_map_def, LegacyMap},
+    EbpfSectionKind, VerifierLog,
+};
+use libc::{ENOENT, ENOSPC};
+
+use crate::{
+    bpf::BPF_OBJ_NAME_LEN,
+    maps::MapData,
+    sys::{syscall, SysResult, Syscall, SyscallError},
+    util::KernelVersion,
+    Pod, VerifierLogLevel,
+};
+
+pub(crate) fn is_btf_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("int");
+    let int_type = BtfType::Int(Int::new(name_offset, 4, IntEncoding::Signed, 0));
+    btf.add_type(int_type);
+    let btf_bytes = btf.to_bytes();
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn bpf_load_btf(
+    raw_btf: &[u8],
+    log_buf: &mut [u8],
+    verifier_log_level: VerifierLogLevel,
+) -> SysResult<OwnedFd> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_7 };
+    u.btf = raw_btf.as_ptr() as *const _ as u64;
+    u.btf_size = mem::size_of_val(raw_btf) as u32;
+    if !log_buf.is_empty() {
+        u.btf_log_level = verifier_log_level.bits();
+        u.btf_log_buf = log_buf.as_mut_ptr() as u64;
+        u.btf_log_size = log_buf.len() as u32;
+    }
+    // SAFETY: `BPF_BTF_LOAD` returns a newly created fd.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_BTF_LOAD, &mut attr) }
+}
+
+// SAFETY: only use for bpf_cmd that return a new file descriptor on success.
+unsafe fn fd_sys_bpf(cmd: bpf_cmd, attr: &mut bpf_attr) -> SysResult<OwnedFd> {
+    let fd = sys_bpf(cmd, attr)?;
+    let fd = fd.try_into().map_err(|_| {
+        (
+            fd,
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                format!("{cmd:?}: invalid fd returned: {fd}"),
+            ),
+        )
+    })?;
+    Ok(OwnedFd::from_raw_fd(fd))
+}
+
+pub(crate) fn is_btf_func_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("int");
+    let int_type = BtfType::Int(Int::new(name_offset, 4, IntEncoding::Signed, 0));
+    let int_type_id = btf.add_type(int_type);
+
+    let a_name = btf.add_string("a");
+    let b_name = btf.add_string("b");
+    let params = vec![
+        BtfParam {
+            name_offset: a_name,
+            btf_type: int_type_id,
+        },
+        BtfParam {
+            name_offset: b_name,
+            btf_type: int_type_id,
+        },
+    ];
+    let func_proto = BtfType::FuncProto(FuncProto::new(params, int_type_id));
+    let func_proto_type_id = btf.add_type(func_proto);
+
+    let add = btf.add_string("inc");
+    let func = BtfType::Func(Func::new(add, func_proto_type_id, FuncLinkage::Static));
+    btf.add_type(func);
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn is_btf_func_global_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("int");
+    let int_type = BtfType::Int(Int::new(name_offset, 4, IntEncoding::Signed, 0));
+    let int_type_id = btf.add_type(int_type);
+
+    let a_name = btf.add_string("a");
+    let b_name = btf.add_string("b");
+    let params = vec![
+        BtfParam {
+            name_offset: a_name,
+            btf_type: int_type_id,
+        },
+        BtfParam {
+            name_offset: b_name,
+            btf_type: int_type_id,
+        },
+    ];
+    let func_proto = BtfType::FuncProto(FuncProto::new(params, int_type_id));
+    let func_proto_type_id = btf.add_type(func_proto);
+
+    let add = btf.add_string("inc");
+    let func = BtfType::Func(Func::new(add, func_proto_type_id, FuncLinkage::Global));
+    btf.add_type(func);
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn is_btf_datasec_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("int");
+    let int_type = BtfType::Int(Int::new(name_offset, 4, IntEncoding::Signed, 0));
+    let int_type_id = btf.add_type(int_type);
+
+    let name_offset = btf.add_string("foo");
+    let var_type = BtfType::Var(Var::new(name_offset, int_type_id, VarLinkage::Static));
+    let var_type_id = btf.add_type(var_type);
+
+    let name_offset = btf.add_string(".data");
+    let variables = vec![DataSecEntry {
+        btf_type: var_type_id,
+        offset: 0,
+        size: 4,
+    }];
+    let datasec_type = BtfType::DataSec(DataSec::new(name_offset, variables, 4));
+    btf.add_type(datasec_type);
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn is_btf_enum64_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("enum64");
+
+    let enum_64_type = BtfType::Enum64(Enum64::new(
+        name_offset,
+        true,
+        vec![BtfEnum64::new(btf.add_string("a"), 1)],
+    ));
+    btf.add_type(enum_64_type);
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn is_btf_float_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("float");
+    let float_type = BtfType::Float(Float::new(name_offset, 16));
+    btf.add_type(float_type);
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn is_btf_decl_tag_supported() -> bool {
+    let mut btf = Btf::new();
+    let name_offset = btf.add_string("int");
+    let int_type = BtfType::Int(Int::new(name_offset, 4, IntEncoding::Signed, 0));
+    let int_type_id = btf.add_type(int_type);
+
+    let name_offset = btf.add_string("foo");
+    let var_type = BtfType::Var(Var::new(name_offset, int_type_id, VarLinkage::Static));
+    let var_type_id = btf.add_type(var_type);
+
+    let name_offset = btf.add_string("decl_tag");
+    let decl_tag = BtfType::DeclTag(DeclTag::new(name_offset, var_type_id, -1));
+    btf.add_type(decl_tag);
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn is_btf_type_tag_supported() -> bool {
+    let mut btf = Btf::new();
+
+    let int_type = BtfType::Int(Int::new(0, 4, IntEncoding::Signed, 0));
+    let int_type_id = btf.add_type(int_type);
+
+    let name_offset = btf.add_string("int");
+    let type_tag = BtfType::TypeTag(TypeTag::new(name_offset, int_type_id));
+    let type_tag_type = btf.add_type(type_tag);
+
+    btf.add_type(BtfType::Ptr(Ptr::new(0, type_tag_type)));
+
+    let btf_bytes = btf.to_bytes();
+
+    bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
+}
+
+pub(crate) fn retry_with_verifier_logs<T>(
+    max_retries: usize,
+    f: impl Fn(&mut [u8]) -> SysResult<T>,
+) -> (SysResult<T>, VerifierLog) {
+    const MIN_LOG_BUF_SIZE: usize = 1024 * 10;
+    const MAX_LOG_BUF_SIZE: usize = (u32::MAX >> 8) as usize;
+
+    let mut log_buf = Vec::new();
+    let mut retries = 0;
+    loop {
+        let ret = f(log_buf.as_mut_slice());
+        if retries != max_retries {
+            if let Err((_, io_error)) = &ret {
+                if retries == 0 || io_error.raw_os_error() == Some(ENOSPC) {
+                    let len = (log_buf.capacity() * 10).clamp(MIN_LOG_BUF_SIZE, MAX_LOG_BUF_SIZE);
+                    log_buf.resize(len, 0);
+                    if let Some(first) = log_buf.first_mut() {
+                        *first = 0;
+                    }
+                    retries += 1;
+                    continue;
+                }
+            }
+        }
+        if let Some(pos) = log_buf.iter().position(|b| *b == 0) {
+            log_buf.truncate(pos);
+        }
+        let log_buf = String::from_utf8(log_buf).unwrap();
+
+        break (ret, VerifierLog::new(log_buf));
+    }
+}
+
+pub(crate) fn bpf_create_map(
+    name: &CStr,
+    def: &aya_obj::Map,
+    btf_fd: Option<BorrowedFd<'_>>,
+    kernel_version: KernelVersion,
+) -> SysResult<OwnedFd> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_1 };
+    u.map_type = def.map_type();
+    u.key_size = def.key_size();
+    u.value_size = def.value_size();
+    u.max_entries = def.max_entries();
+    u.map_flags = def.map_flags();
+
+    info!("bpf_create_map, name: {:?}, map_type: {:?}, key_size: {:?}, value_size: {:?}, max_entries: {:?}", name, u.map_type, u.key_size, u.value_size, u.max_entries);
+
+    if let aya_obj::Map::Btf(m) = def {
+        use aya_obj::generated::bpf_map_type::*;
+
+        // Mimic https://github.com/libbpf/libbpf/issues/355
+        // Currently a bunch of (usually pretty specialized) BPF maps do not support
+        // specifying BTF types for the key and value.
+        match u.map_type.try_into() {
+            Ok(BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+            | Ok(BPF_MAP_TYPE_CGROUP_ARRAY)
+            | Ok(BPF_MAP_TYPE_STACK_TRACE)
+            | Ok(BPF_MAP_TYPE_ARRAY_OF_MAPS)
+            | Ok(BPF_MAP_TYPE_HASH_OF_MAPS)
+            | Ok(BPF_MAP_TYPE_DEVMAP)
+            | Ok(BPF_MAP_TYPE_DEVMAP_HASH)
+            | Ok(BPF_MAP_TYPE_CPUMAP)
+            | Ok(BPF_MAP_TYPE_XSKMAP)
+            | Ok(BPF_MAP_TYPE_SOCKMAP)
+            | Ok(BPF_MAP_TYPE_SOCKHASH)
+            | Ok(BPF_MAP_TYPE_QUEUE)
+            | Ok(BPF_MAP_TYPE_STACK)
+            | Ok(BPF_MAP_TYPE_RINGBUF) => {
+                u.btf_key_type_id = 0;
+                u.btf_value_type_id = 0;
+                u.btf_fd = 0;
+            }
+            _ => {
+                u.btf_key_type_id = m.def.btf_key_type_id;
+                u.btf_value_type_id = m.def.btf_value_type_id;
+                u.btf_fd = btf_fd.map(|fd| fd.as_raw_fd()).unwrap_or_default() as u32;
+            }
+        }
+    }
+
+    // https://github.com/torvalds/linux/commit/ad5b177bd73f5107d97c36f56395c4281fb6f089
+    // The map name was added as a parameter in kernel 4.15+ so we skip adding it on
+    // older kernels for compatibility
+    if kernel_version >= KernelVersion::new(4, 15, 0) {
+        // u.map_name is 16 bytes max and must be NULL terminated
+        let name_len = cmp::min(name.to_bytes().len(), BPF_OBJ_NAME_LEN - 1);
+        u.map_name[..name_len]
+            .copy_from_slice(unsafe { slice::from_raw_parts(name.as_ptr(), name_len) });
+    }
+
+    // SAFETY: BPF_MAP_CREATE returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) }
+}
+
+pub(crate) fn bpf_map_update_elem_ptr<K, V>(
+    fd: BorrowedFd<'_>,
+    key: *const K,
+    value: *mut V,
+    flags: u64,
+) -> SysResult<i64> {
+    log::info!("update key-value for map, it fd is: {}", fd.as_raw_fd());
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd.as_raw_fd() as u32;
+    u.key = key as u64;
+    u.__bindgen_anon_1.value = value as u64;
+    u.flags = flags;
+
+    sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &mut attr)
+}
+
+pub(crate) fn bpf_map_update_elem<K: Pod, V: Pod>(
+    fd: BorrowedFd<'_>,
+    key: Option<&K>,
+    value: &V,
+    flags: u64,
+) -> SysResult<i64> {
+    // log::warn!("insert key-value for map, it fd is: {}",fd.as_raw_fd());
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd.as_raw_fd() as u32;
+    if let Some(key) = key {
+        u.key = key as *const _ as u64;
+    }
+    u.__bindgen_anon_1.value = value as *const _ as u64;
+    u.flags = flags;
+
+    sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &mut attr)
+}
+// since kernel 5.2
+pub(crate) fn bpf_map_freeze(fd: BorrowedFd<'_>) -> SysResult<i64> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd.as_raw_fd() as u32;
+    sys_bpf(bpf_cmd::BPF_MAP_FREEZE, &mut attr)
+}
+
+pub(crate) fn bpf_map_get_info_by_fd(fd: BorrowedFd<'_>) -> Result<bpf_map_info, SyscallError> {
+    bpf_obj_get_info_by_fd(fd, |_| {})
+}
+
+pub(crate) fn iter_map_ids() -> impl Iterator<Item = Result<u32, SyscallError>> {
+    iter_obj_ids(bpf_cmd::BPF_MAP_GET_NEXT_ID, "bpf_map_get_next_id")
+}
+
+fn iter_obj_ids(
+    cmd: bpf_cmd,
+    name: &'static str,
+) -> impl Iterator<Item = Result<u32, SyscallError>> {
+    let mut current_id = Some(0);
+    iter::from_fn(move || {
+        let next_id = {
+            let current_id = current_id?;
+            bpf_obj_get_next_id(current_id, cmd, name).transpose()
+        };
+        current_id = next_id.as_ref().and_then(|next_id| match next_id {
+            Ok(next_id) => Some(*next_id),
+            Err(SyscallError { .. }) => None,
+        });
+        next_id
+    })
+}
+
+fn bpf_obj_get_next_id(
+    id: u32,
+    cmd: bpf_cmd,
+    name: &'static str,
+) -> Result<Option<u32>, SyscallError> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_6 };
+    u.__bindgen_anon_1.start_id = id;
+    match sys_bpf(cmd, &mut attr) {
+        Ok(code) => {
+            assert_eq!(code, 0);
+            Ok(Some(unsafe { attr.__bindgen_anon_6.next_id }))
+        }
+        Err((code, io_error)) => {
+            assert_eq!(code, -1);
+            if io_error.raw_os_error() == Some(ENOENT) {
+                Ok(None)
+            } else {
+                Err(SyscallError {
+                    call: name,
+                    io_error,
+                })
+            }
+        }
+    }
+}
+pub(crate) fn iter_prog_ids() -> impl Iterator<Item = Result<u32, SyscallError>> {
+    iter_obj_ids(bpf_cmd::BPF_PROG_GET_NEXT_ID, "bpf_prog_get_next_id")
+}
+
+pub(crate) fn bpf_map_get_fd_by_id(map_id: u32) -> Result<OwnedFd, SyscallError> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    attr.__bindgen_anon_6.__bindgen_anon_1.map_id = map_id;
+
+    // SAFETY: BPF_MAP_GET_FD_BY_ID returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_GET_FD_BY_ID, &mut attr) }.map_err(|(code, io_error)| {
+        assert_eq!(code, -1);
+        SyscallError {
+            call: "bpf_map_get_fd_by_id",
+            io_error,
+        }
+    })
+}
+
+pub(crate) fn bpf_btf_get_fd_by_id(id: u32) -> Result<OwnedFd, SyscallError> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    attr.__bindgen_anon_6.__bindgen_anon_1.btf_id = id;
+
+    // SAFETY: BPF_BTF_GET_FD_BY_ID returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_BTF_GET_FD_BY_ID, &mut attr) }.map_err(|(code, io_error)| {
+        assert_eq!(code, -1);
+        SyscallError {
+            call: "bpf_btf_get_fd_by_id",
+            io_error,
+        }
+    })
+}
+
+pub(crate) fn bpf_pin_object(fd: BorrowedFd<'_>, path: &CStr) -> SysResult<i64> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_4 };
+    u.bpf_fd = fd.as_raw_fd() as u32;
+    u.pathname = path.as_ptr() as u64;
+    sys_bpf(bpf_cmd::BPF_OBJ_PIN, &mut attr)
+}
+
+pub(crate) fn btf_obj_get_info_by_fd(
+    fd: BorrowedFd<'_>,
+    buf: &mut [u8],
+) -> Result<bpf_btf_info, SyscallError> {
+    bpf_obj_get_info_by_fd(fd, |info: &mut bpf_btf_info| {
+        info.btf = buf.as_mut_ptr() as _;
+        info.btf_size = buf.len() as _;
+    })
+}
+
+pub(crate) fn bpf_prog_get_info_by_fd(
+    fd: BorrowedFd<'_>,
+    map_ids: &mut [u32],
+) -> Result<bpf_prog_info, SyscallError> {
+    bpf_obj_get_info_by_fd(fd, |info: &mut bpf_prog_info| {
+        info.nr_map_ids = map_ids.len() as _;
+        info.map_ids = map_ids.as_mut_ptr() as _;
+    })
+}
+fn bpf_obj_get_info_by_fd<T, F: FnOnce(&mut T)>(
+    fd: BorrowedFd<'_>,
+    init: F,
+) -> Result<T, SyscallError> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let mut info = unsafe { mem::zeroed() };
+
+    init(&mut info);
+
+    attr.info.bpf_fd = fd.as_raw_fd() as u32;
+    attr.info.info = &info as *const _ as u64;
+    attr.info.info_len = mem::size_of_val(&info) as u32;
+
+    match sys_bpf(bpf_cmd::BPF_OBJ_GET_INFO_BY_FD, &mut attr) {
+        Ok(code) => {
+            assert_eq!(code, 0);
+            Ok(info)
+        }
+        Err((code, io_error)) => {
+            assert_eq!(code, -1);
+            Err(SyscallError {
+                call: "bpf_obj_get_info_by_fd",
+                io_error,
+            })
+        }
+    }
+}
+
+pub(crate) fn bpf_prog_get_fd_by_id(prog_id: u32) -> Result<OwnedFd, SyscallError> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    attr.__bindgen_anon_6.__bindgen_anon_1.prog_id = prog_id;
+
+    // SAFETY: BPF_PROG_GET_FD_BY_ID returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_PROG_GET_FD_BY_ID, &mut attr) }.map_err(|(code, io_error)| {
+        assert_eq!(code, -1);
+        SyscallError {
+            call: "bpf_prog_get_fd_by_id",
+            io_error,
+        }
+    })
+}
+
+pub(crate) fn bpf_get_object(path: &CStr) -> SysResult<OwnedFd> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_4 };
+    u.pathname = path.as_ptr() as u64;
+    // SAFETY: BPF_OBJ_GET returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_OBJ_GET, &mut attr) }
+}
+
+pub(crate) struct EbpfLoadProgramAttrs<'a> {
+    pub(crate) name: Option<CString>,
+    pub(crate) ty: bpf_prog_type,
+    pub(crate) insns: &'a [bpf_insn],
+    pub(crate) license: &'a CStr,
+    pub(crate) kernel_version: u32,
+    pub(crate) expected_attach_type: Option<bpf_attach_type>,
+    pub(crate) prog_btf_fd: Option<BorrowedFd<'a>>,
+    pub(crate) attach_btf_obj_fd: Option<BorrowedFd<'a>>,
+    pub(crate) attach_btf_id: Option<u32>,
+    pub(crate) attach_prog_fd: Option<BorrowedFd<'a>>,
+    pub(crate) func_info_rec_size: usize,
+    pub(crate) func_info: FuncSecInfo,
+    pub(crate) line_info_rec_size: usize,
+    pub(crate) line_info: LineSecInfo,
+    pub(crate) flags: u32,
+}
+
+pub(crate) fn bpf_load_program(
+    aya_attr: &EbpfLoadProgramAttrs<'_>,
+    log_buf: &mut [u8],
+    verifier_log_level: VerifierLogLevel,
+) -> SysResult<OwnedFd> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_3 };
+
+    if let Some(prog_name) = &aya_attr.name {
+        let mut name: [c_char; 16] = [0; 16];
+        let name_bytes = prog_name.to_bytes();
+        let len = cmp::min(name.len(), name_bytes.len());
+        name[..len].copy_from_slice(unsafe {
+            slice::from_raw_parts(name_bytes.as_ptr() as *const c_char, len)
+        });
+        u.prog_name = name;
+    }
+
+    u.prog_flags = aya_attr.flags;
+    u.prog_type = aya_attr.ty as u32;
+    if let Some(v) = aya_attr.expected_attach_type {
+        u.expected_attach_type = v as u32;
+    }
+    u.insns = aya_attr.insns.as_ptr() as u64;
+    u.insn_cnt = aya_attr.insns.len() as u32;
+    u.license = aya_attr.license.as_ptr() as u64;
+    u.kern_version = aya_attr.kernel_version;
+
+    // these must be allocated here to ensure the slice outlives the pointer
+    // so .as_ptr below won't point to garbage
+    let line_info_buf = aya_attr.line_info.line_info_bytes();
+    let func_info_buf = aya_attr.func_info.func_info_bytes();
+
+    if let Some(btf_fd) = aya_attr.prog_btf_fd {
+        u.prog_btf_fd = btf_fd.as_raw_fd() as u32;
+        if aya_attr.line_info_rec_size > 0 {
+            u.line_info = line_info_buf.as_ptr() as *const _ as u64;
+            u.line_info_cnt = aya_attr.line_info.len() as u32;
+            u.line_info_rec_size = aya_attr.line_info_rec_size as u32;
+        }
+        if aya_attr.func_info_rec_size > 0 {
+            u.func_info = func_info_buf.as_ptr() as *const _ as u64;
+            u.func_info_cnt = aya_attr.func_info.len() as u32;
+            u.func_info_rec_size = aya_attr.func_info_rec_size as u32;
+        }
+    }
+    if !log_buf.is_empty() {
+        u.log_level = verifier_log_level.bits();
+        u.log_buf = log_buf.as_mut_ptr() as u64;
+        u.log_size = log_buf.len() as u32;
+    }
+    if let Some(v) = aya_attr.attach_btf_obj_fd {
+        u.__bindgen_anon_1.attach_btf_obj_fd = v.as_raw_fd() as _;
+    }
+    if let Some(v) = aya_attr.attach_prog_fd {
+        u.__bindgen_anon_1.attach_prog_fd = v.as_raw_fd() as u32;
+    }
+
+    if let Some(v) = aya_attr.attach_btf_id {
+        u.attach_btf_id = v;
+    }
+    bpf_prog_load(&mut attr)
+}
+
+pub(crate) fn bpf_link_get_info_by_fd(fd: BorrowedFd<'_>) -> Result<bpf_link_info, SyscallError> {
+    bpf_obj_get_info_by_fd(fd, |_| {})
+}
+
+pub(crate) enum LinkTarget<'f> {
+    Fd(BorrowedFd<'f>),
+    IfIndex(u32),
+}
+
+// since kernel 5.7
+pub(crate) fn bpf_link_create(
+    prog_fd: BorrowedFd<'_>,
+    target: LinkTarget<'_>,
+    attach_type: bpf_attach_type,
+    btf_id: Option<u32>,
+    flags: u32,
+) -> SysResult<OwnedFd> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    attr.link_create.__bindgen_anon_1.prog_fd = prog_fd.as_raw_fd() as u32;
+
+    match target {
+        LinkTarget::Fd(fd) => {
+            attr.link_create.__bindgen_anon_2.target_fd = fd.as_raw_fd() as u32;
+        }
+        LinkTarget::IfIndex(ifindex) => {
+            attr.link_create.__bindgen_anon_2.target_ifindex = ifindex;
+        }
+    };
+    attr.link_create.attach_type = attach_type as u32;
+    attr.link_create.flags = flags;
+    if let Some(btf_id) = btf_id {
+        attr.link_create.__bindgen_anon_3.target_btf_id = btf_id;
+    }
+
+    // SAFETY: BPF_LINK_CREATE returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_LINK_CREATE, &mut attr) }
+}
+
+pub(crate) fn is_prog_name_supported() -> bool {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_3 };
+    let mut name: [c_char; 16] = [0; 16];
+    let cstring = CString::new("aya_name_check").unwrap();
+    let name_bytes = cstring.to_bytes();
+    let len = cmp::min(name.len(), name_bytes.len());
+    name[..len].copy_from_slice(unsafe {
+        slice::from_raw_parts(name_bytes.as_ptr() as *const c_char, len)
+    });
+    u.prog_name = name;
+
+    let prog: &[u8] = &[
+        0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov64 r0 = 0
+        0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
+    ];
+
+    let gpl = b"GPL\0";
+    u.license = gpl.as_ptr() as u64;
+
+    let insns = copy_instructions(prog).unwrap();
+    u.insn_cnt = insns.len() as u32;
+    u.insns = insns.as_ptr() as u64;
+    u.prog_type = bpf_prog_type::BPF_PROG_TYPE_SOCKET_FILTER as u32;
+
+    bpf_prog_load(&mut attr).is_ok()
+}
+
+pub(crate) fn is_probe_read_kernel_supported() -> bool {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_3 };
+
+    let prog: &[u8] = &[
+        0xbf, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = r10
+        0x07, 0x01, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, // r1 -= 8
+        0xb7, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // r2 = 8
+        0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
+        0x85, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, // call 113
+        0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
+    ];
+
+    let gpl = b"GPL\0";
+    u.license = gpl.as_ptr() as u64;
+
+    let insns = copy_instructions(prog).unwrap();
+    u.insn_cnt = insns.len() as u32;
+    u.insns = insns.as_ptr() as u64;
+    u.prog_type = bpf_prog_type::BPF_PROG_TYPE_TRACEPOINT as u32;
+
+    bpf_prog_load(&mut attr).is_ok()
+}
+
+pub(crate) fn is_bpf_global_data_supported() -> bool {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_3 };
+
+    let prog: &[u8] = &[
+        0x18, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ld_pseudo r1, 0x2, 0x0
+        0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, //
+        0x7a, 0x01, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, // stdw [r1 + 0x0], 0x2a
+        0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov64 r0 = 0
+        0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
+    ];
+
+    let mut insns = copy_instructions(prog).unwrap();
+
+    let map = MapData::create(
+        aya_obj::Map::Legacy(LegacyMap {
+            def: bpf_map_def {
+                map_type: bpf_map_type::BPF_MAP_TYPE_ARRAY as u32,
+                key_size: 4,
+                value_size: 32,
+                max_entries: 1,
+                ..Default::default()
+            },
+            section_index: 0,
+            section_kind: EbpfSectionKind::Maps,
+            symbol_index: None,
+            data: Vec::new(),
+        }),
+        "aya_global",
+        None,
+    );
+
+    if let Ok(map) = map {
+        insns[0].imm = map.fd().as_fd().as_raw_fd();
+
+        let gpl = b"GPL\0";
+        u.license = gpl.as_ptr() as u64;
+        u.insn_cnt = insns.len() as u32;
+        u.insns = insns.as_ptr() as u64;
+        u.prog_type = bpf_prog_type::BPF_PROG_TYPE_SOCKET_FILTER as u32;
+
+        bpf_prog_load(&mut attr).is_ok()
+    } else {
+        false
+    }
+}
+/// Tests whether CpuMap, DevMap and DevMapHash support program ids
+pub(crate) fn is_prog_id_supported(map_type: bpf_map_type) -> bool {
+    assert_matches!(
+        map_type,
+        bpf_map_type::BPF_MAP_TYPE_CPUMAP
+            | bpf_map_type::BPF_MAP_TYPE_DEVMAP
+            | bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH
+    );
+
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_1 };
+
+    u.map_type = map_type as u32;
+    u.key_size = 4;
+    u.value_size = 8; // 4 for CPU ID, 8 for CPU ID + prog ID
+    u.max_entries = 1;
+    u.map_flags = 0;
+
+    // SAFETY: BPF_MAP_CREATE returns a new file descriptor.
+    let fd = unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) };
+    let fd = fd.map(crate::MockableFd::from_fd);
+    fd.is_ok()
+}
+pub(crate) fn is_bpf_cookie_supported() -> bool {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let u = unsafe { &mut attr.__bindgen_anon_3 };
+
+    let prog: &[u8] = &[
+        0x85, 0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, // call bpf_get_attach_cookie
+        0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
+    ];
+
+    let gpl = b"GPL\0";
+    u.license = gpl.as_ptr() as u64;
+
+    let insns = copy_instructions(prog).unwrap();
+    u.insn_cnt = insns.len() as u32;
+    u.insns = insns.as_ptr() as u64;
+    u.prog_type = bpf_prog_type::BPF_PROG_TYPE_KPROBE as u32;
+
+    bpf_prog_load(&mut attr).is_ok()
+}
+fn bpf_prog_load(attr: &mut bpf_attr) -> SysResult<OwnedFd> {
+    // SAFETY: BPF_PROG_LOAD returns a new file descriptor.
+    unsafe { fd_sys_bpf(bpf_cmd::BPF_PROG_LOAD, attr) }
+}
+
+fn sys_bpf(cmd: bpf_cmd, attr: &mut bpf_attr) -> SysResult<i64> {
+    syscall(Syscall::Ebpf { cmd, attr })
+}

+ 21 - 0
aya/src/sys/fake.rs

@@ -0,0 +1,21 @@
+use std::{cell::RefCell, ffi::c_void, io, ptr};
+
+use super::{SysResult, Syscall};
+
+type SyscallFn = unsafe fn(Syscall<'_>) -> SysResult<i64>;
+
+#[cfg(test)]
+thread_local! {
+    pub(crate) static TEST_SYSCALL: RefCell<SyscallFn> = RefCell::new(test_syscall);
+    pub(crate) static TEST_MMAP_RET: RefCell<*mut c_void> = const { RefCell::new(ptr::null_mut()) };
+}
+
+#[cfg(test)]
+unsafe fn test_syscall(_call: Syscall<'_>) -> SysResult<i64> {
+    Err((-1, io::Error::from_raw_os_error(libc::EINVAL)))
+}
+
+#[cfg(test)]
+pub(crate) fn override_syscall(call: unsafe fn(Syscall<'_>) -> SysResult<i64>) {
+    TEST_SYSCALL.with(|test_impl| *test_impl.borrow_mut() = call);
+}

+ 131 - 0
aya/src/sys/mod.rs

@@ -0,0 +1,131 @@
+pub(crate) mod bpf;
+pub(crate) mod fake;
+pub(crate) mod perf_event;
+
+use core::{ffi::c_int, mem};
+use std::{
+    ffi::c_void,
+    io,
+    os::fd::{AsRawFd, BorrowedFd},
+};
+
+use aya_obj::generated::{bpf_attr, bpf_cmd, perf_event_attr};
+pub(crate) use bpf::*;
+use libc::{pid_t, SYS_bpf, SYS_perf_event_open};
+use thiserror::Error;
+
+#[cfg(test)]
+use crate::sys::fake::{TEST_MMAP_RET, TEST_SYSCALL};
+
+pub(crate) type SysResult<T> = Result<T, (i64, io::Error)>;
+
+pub(crate) enum Syscall<'a> {
+    Ebpf {
+        cmd: bpf_cmd,
+        attr: &'a mut bpf_attr,
+    },
+    PerfEventOpen {
+        attr: perf_event_attr,
+        pid: pid_t,
+        cpu: i32,
+        group: i32,
+        flags: u32,
+    },
+    PerfEventIoctl {
+        fd: BorrowedFd<'a>,
+        request: c_int,
+        arg: c_int,
+    },
+}
+impl std::fmt::Debug for Syscall<'_> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            Self::Ebpf { cmd, attr: _ } => f
+                .debug_struct("Syscall::Ebpf")
+                .field("cmd", cmd)
+                .field("attr", &format_args!("_"))
+                .finish(),
+            Self::PerfEventOpen {
+                attr: _,
+                pid,
+                cpu,
+                group,
+                flags,
+            } => f
+                .debug_struct("Syscall::PerfEventOpen")
+                .field("attr", &format_args!("_"))
+                .field("pid", pid)
+                .field("cpu", cpu)
+                .field("group", group)
+                .field("flags", flags)
+                .finish(),
+            Self::PerfEventIoctl { fd, request, arg } => f
+                .debug_struct("Syscall::PerfEventIoctl")
+                .field("fd", fd)
+                .field("request", request)
+                .field("arg", arg)
+                .finish(),
+        }
+    }
+}
+#[derive(Debug, Error)]
+#[error("`{call}` failed")]
+pub struct SyscallError {
+    /// The name of the syscall which failed.
+    pub call: &'static str,
+    /// The [`io::Error`] returned by the syscall.
+    #[source]
+    pub io_error: io::Error,
+}
+
+fn syscall(call: Syscall<'_>) -> SysResult<i64> {
+    #[cfg(test)]
+    return TEST_SYSCALL.with(|test_impl| unsafe { test_impl.borrow()(call) });
+
+    info!("syscall: {:?}", call);
+    #[cfg_attr(test, allow(unreachable_code))]
+    {
+        let ret = unsafe {
+            match call {
+                Syscall::Ebpf { cmd, attr } => {
+                    libc::syscall(SYS_bpf, cmd, attr, mem::size_of::<bpf_attr>())
+                }
+                Syscall::PerfEventOpen {
+                    attr,
+                    pid,
+                    cpu,
+                    group,
+                    flags,
+                } => libc::syscall(SYS_perf_event_open, &attr, pid, cpu, group, flags),
+                Syscall::PerfEventIoctl { fd, request, arg } => {
+                    let ret = libc::ioctl(fd.as_raw_fd(), request.try_into().unwrap(), arg);
+                    // `libc::ioctl` returns i32 on x86_64 while `libc::syscall` returns i64.
+                    #[allow(clippy::useless_conversion)]
+                    ret.into()
+                }
+            }
+        };
+
+        // `libc::syscall` returns i32 on armv7.
+        #[allow(clippy::useless_conversion)]
+        match ret.into() {
+            ret @ 0.. => Ok(ret),
+            ret => Err((ret, io::Error::last_os_error())),
+        }
+    }
+}
+
+#[cfg_attr(test, allow(unused_variables))]
+pub(crate) unsafe fn mmap(
+    addr: *mut c_void,
+    len: usize,
+    prot: c_int,
+    flags: c_int,
+    fd: BorrowedFd<'_>,
+    offset: libc::off_t,
+) -> *mut c_void {
+    #[cfg(not(test))]
+    return libc::mmap(addr, len, prot, flags, fd.as_raw_fd(), offset);
+    #[cfg(test)]
+    TEST_MMAP_RET.with(|ret| *ret.borrow())
+}

+ 113 - 0
aya/src/sys/perf_event.rs

@@ -0,0 +1,113 @@
+use core::{ffi::c_int, mem};
+use std::{
+    ffi::{CString, OsStr},
+    format, io,
+    os::fd::{BorrowedFd, FromRawFd, OwnedFd},
+};
+
+use aya_obj::generated::{
+    perf_event_attr, perf_event_sample_format::PERF_SAMPLE_RAW,
+    perf_sw_ids::PERF_COUNT_SW_BPF_OUTPUT, perf_type_id::PERF_TYPE_SOFTWARE, PERF_FLAG_FD_CLOEXEC,
+};
+use libc::pid_t;
+
+use crate::sys::{syscall, SysResult, Syscall};
+
+#[allow(clippy::too_many_arguments)]
+pub(crate) fn perf_event_open(
+    perf_type: u32,
+    config: u64,
+    pid: pid_t,
+    cpu: c_int,
+    sample_period: u64,
+    sample_frequency: Option<u64>,
+    wakeup: bool,
+    inherit: bool,
+    flags: u32,
+) -> SysResult<OwnedFd> {
+    let mut attr = unsafe { mem::zeroed::<perf_event_attr>() };
+
+    attr.config = config;
+    attr.size = mem::size_of::<perf_event_attr>() as u32;
+    attr.type_ = perf_type;
+    attr.sample_type = PERF_SAMPLE_RAW as u64;
+    attr.set_inherit(if inherit { 1 } else { 0 });
+    attr.__bindgen_anon_2.wakeup_events = u32::from(wakeup);
+
+    if let Some(frequency) = sample_frequency {
+        attr.set_freq(1);
+        attr.__bindgen_anon_1.sample_freq = frequency;
+    } else {
+        attr.__bindgen_anon_1.sample_period = sample_period;
+    }
+
+    perf_event_sys(attr, pid, cpu, flags)
+}
+pub(crate) fn perf_event_open_probe(
+    ty: u32,
+    ret_bit: Option<u32>,
+    name: &OsStr,
+    offset: u64,
+    pid: Option<pid_t>,
+) -> SysResult<OwnedFd> {
+    use std::os::unix::ffi::OsStrExt as _;
+
+    let mut attr = unsafe { mem::zeroed::<perf_event_attr>() };
+
+    if let Some(ret_bit) = ret_bit {
+        attr.config = 1 << ret_bit;
+    }
+
+    let c_name = CString::new(name.as_bytes()).unwrap();
+
+    attr.size = mem::size_of::<perf_event_attr>() as u32;
+    attr.type_ = ty;
+    attr.__bindgen_anon_3.config1 = c_name.as_ptr() as u64;
+    attr.__bindgen_anon_4.config2 = offset;
+
+    let cpu = if pid.is_some() { -1 } else { 0 };
+    let pid = pid.unwrap_or(-1);
+
+    perf_event_sys(attr, pid, cpu, PERF_FLAG_FD_CLOEXEC)
+}
+
+pub(crate) fn perf_event_open_bpf(cpu: c_int) -> SysResult<OwnedFd> {
+    perf_event_open(
+        PERF_TYPE_SOFTWARE as u32,
+        PERF_COUNT_SW_BPF_OUTPUT as u64,
+        -1,
+        cpu,
+        1,
+        None,
+        true,
+        false,
+        PERF_FLAG_FD_CLOEXEC,
+    )
+}
+fn perf_event_sys(attr: perf_event_attr, pid: pid_t, cpu: i32, flags: u32) -> SysResult<OwnedFd> {
+    let fd = syscall(Syscall::PerfEventOpen {
+        attr,
+        pid,
+        cpu,
+        group: -1,
+        flags,
+    })?;
+
+    let fd = fd.try_into().map_err(|_| {
+        (
+            fd,
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                format!("perf_event_open: invalid fd returned: {fd}"),
+            ),
+        )
+    })?;
+
+    // SAFETY: perf_event_open returns a new file descriptor on success.
+    unsafe { Ok(OwnedFd::from_raw_fd(fd)) }
+}
+
+pub(crate) fn perf_event_ioctl(fd: BorrowedFd<'_>, request: c_int, arg: c_int) -> SysResult<i64> {
+    let call = Syscall::PerfEventIoctl { fd, request, arg };
+    syscall(call)
+}

+ 177 - 0
aya/src/util.rs

@@ -0,0 +1,177 @@
+use core::{
+    slice,
+    str::{FromStr, Utf8Error},
+};
+use std::io;
+
+/// Represents a kernel version, in major.minor.release version.
+// Adapted from https://docs.rs/procfs/latest/procfs/sys/kernel/struct.Version.html.
+#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd)]
+pub struct KernelVersion {
+    pub(crate) major: u8,
+    pub(crate) minor: u8,
+    pub(crate) patch: u16,
+}
+
+#[derive(thiserror::Error, Debug)]
+enum CurrentKernelVersionError {
+    #[error("failed to read kernel version")]
+    IO(#[from] io::Error),
+    #[error("failed to parse kernel version")]
+    ParseError(String),
+    #[error("kernel version string is not valid UTF-8")]
+    Utf8(#[from] Utf8Error),
+}
+
+impl KernelVersion {
+    /// Constructor.
+    pub fn new(major: u8, minor: u8, patch: u16) -> Self {
+        Self {
+            major,
+            minor,
+            patch,
+        }
+    }
+    /// Returns the kernel version of the currently running kernel.
+    pub fn current() -> Result<Self, &'static str> {
+        // Self::get_kernel_version()
+        Ok(Self::new(0xff, 0xff, 0xff))
+    }
+
+    pub fn code(self) -> u32 {
+        let Self {
+            major,
+            minor,
+            mut patch,
+        } = self;
+
+        // Certain LTS kernels went above the "max" 255 patch so
+        // backports were done to cap the patch version
+        let max_patch = match (major, minor) {
+            // On 4.4 + 4.9, any patch 257 or above was hardcoded to 255.
+            // See: https://github.com/torvalds/linux/commit/a15813a +
+            // https://github.com/torvalds/linux/commit/42efb098
+            (4, 4 | 9) => 257,
+            // On 4.14, any patch 252 or above was hardcoded to 255.
+            // See: https://github.com/torvalds/linux/commit/e131e0e
+            (4, 14) => 252,
+            // On 4.19, any patch 222 or above was hardcoded to 255.
+            // See: https://github.com/torvalds/linux/commit/a256aac
+            (4, 19) => 222,
+            // For other kernels (i.e., newer LTS kernels as other
+            // ones won't reach 255+ patches) clamp it to 255. See:
+            // https://github.com/torvalds/linux/commit/9b82f13e
+            _ => 255,
+        };
+
+        // anything greater or equal to `max_patch` is hardcoded to
+        // 255.
+        if patch >= max_patch {
+            patch = 255;
+        }
+
+        (u32::from(major) << 16) + (u32::from(minor) << 8) + u32::from(patch)
+    }
+}
+
+/// Include bytes from a file for use in a subsequent [`crate::Ebpf::load`].
+///
+/// This macro differs from the standard `include_bytes!` macro since it also ensures that
+/// the bytes are correctly aligned to be parsed as an ELF binary. This avoid some nasty
+/// compilation errors when the resulting byte array is not the correct alignment.
+///
+/// # Examples
+/// ```ignore
+/// use aya::{Ebpf, include_bytes_aligned};
+///
+/// let mut bpf = Ebpf::load(include_bytes_aligned!(
+///     "/path/to/bpf.o"
+/// ))?;
+///
+/// # Ok::<(), aya::EbpfError>(())
+/// ```
+#[macro_export]
+macro_rules! include_bytes_aligned {
+    ($path:expr) => {{
+        #[repr(align(32))]
+        pub struct Aligned32;
+
+        #[repr(C)]
+        pub struct Aligned<Bytes: ?Sized> {
+            pub _align: [Aligned32; 0],
+            pub bytes: Bytes,
+        }
+
+        const ALIGNED: &Aligned<[u8]> = &Aligned {
+            _align: [],
+            bytes: *include_bytes!($path),
+        };
+
+        &ALIGNED.bytes
+    }};
+}
+
+pub(crate) fn bytes_of_bpf_name(bpf_name: &[core::ffi::c_char; 16]) -> &[u8] {
+    let length = bpf_name
+        .iter()
+        .rposition(|ch| *ch != 0)
+        .map(|pos| pos + 1)
+        .unwrap_or(0);
+    unsafe { slice::from_raw_parts(bpf_name.as_ptr() as *const _, length) }
+}
+
+const ONLINE_CPUS: &str = "/sys/devices/system/cpu/online";
+pub(crate) const POSSIBLE_CPUS: &str = "/sys/devices/system/cpu/possible";
+
+/// Get the list of possible cpus.
+///
+/// See `/sys/devices/system/cpu/possible`.
+pub(crate) fn possible_cpus() -> Result<Vec<u32>, io::Error> {
+    // let data = fs::read_to_string(POSSIBLE_CPUS)?;
+    // parse_cpu_ranges(data.trim()).map_err(|_| {
+    //     io::Error::new(
+    //         io::ErrorKind::Other,
+    //         format!("unexpected {POSSIBLE_CPUS} format"),
+    //     )
+    // })
+    Ok(vec![0])
+}
+
+fn parse_cpu_ranges(data: &str) -> Result<Vec<u32>, ()> {
+    let mut cpus = Vec::new();
+    for range in data.split(',') {
+        cpus.extend({
+            match range
+                .splitn(2, '-')
+                .map(u32::from_str)
+                .collect::<Result<Vec<_>, _>>()
+                .map_err(|_| ())?
+                .as_slice()
+            {
+                &[] | &[_, _, _, ..] => return Err(()),
+                &[start] => start..=start,
+                &[start, end] => start..=end,
+            }
+        })
+    }
+
+    Ok(cpus)
+}
+
+/// Returns the numeric IDs of the CPUs currently online.
+pub fn online_cpus() -> Result<Vec<u32>, io::Error> {
+    // let data = fs::read_to_string(ONLINE_CPUS)?;
+    // parse_cpu_ranges(data.trim()).map_err(|_| {
+    //     io::Error::new(
+    //         io::ErrorKind::Other,
+    //         format!("unexpected {ONLINE_CPUS} format"),
+    //     )
+    // })
+    Ok(vec![0])
+}
+
+pub(crate) fn page_size() -> usize {
+    // Safety: libc
+    // (unsafe { sysconf(_SC_PAGESIZE) }) as usize
+    4096
+}

+ 4 - 0
rustfmt.toml

@@ -0,0 +1,4 @@
+group_imports="StdExternalCrate"
+reorder_imports=true
+imports_granularity="Crate"
+unstable_features = true

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů