Browse Source

Initial commit

Alessandro Decina 4 years ago
commit
15be301f8c

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
+Cargo.lock
+target/

+ 12 - 0
Cargo.toml

@@ -0,0 +1,12 @@
+[package]
+name = "aya"
+version = "0.1.0"
+authors = ["Alessandro Decina <[email protected]>"]
+edition = "2018"
+
+[dependencies]
+libc = "0.2"
+thiserror = "1"
+object = "0.23"
+bytes = "1"
+lazy_static = "1"

+ 64 - 0
scripts/gen-bindings

@@ -0,0 +1,64 @@
+#!/usr/bin/env sh
+
+LIBBPF_DIR=$1
+OUTPUT_DIR=$2
+
+if test -z "$LIBBPF_DIR"; then
+    echo "error: no libbpf dir provided"
+    exit 1
+fi
+
+if test -z "$OUTPUT_DIR"; then
+    echo "error: no output dir provided"
+    exit 1
+fi
+
+BPF_TYPES="\
+    bpf_cmd \
+    bpf_insn \
+    bpf_attr \
+    bpf_map_type \
+    bpf_prog_type \
+    bpf_attach_type
+    "
+
+BPF_VARS="\
+    BPF_PSEUDO_.*
+    "
+
+PERF_TYPES="\
+    perf_event_attr \
+    perf_sw_ids \
+    perf_event_sample_format \
+    perf_event_mmap_page \
+    perf_event_header \
+    perf_type_id \
+    perf_event_type
+    "
+
+PERF_VARS="\
+    PERF_FLAG_.* \
+    PERF_EVENT_.*
+    "
+
+bindgen $LIBBPF_DIR/include/uapi/linux/bpf.h \
+    --no-layout-tests \
+    --default-enum-style moduleconsts \
+    $(for ty in $BPF_TYPES; do
+        echo --whitelist-type "$ty"
+    done) \
+    $(for var in $BPF_VARS; do
+        echo --whitelist-var "$var"
+    done) \
+    > $OUTPUT_DIR/bpf_bindings.rs
+
+bindgen include/perf_wrapper.h \
+    --no-layout-tests \
+    --default-enum-style moduleconsts \
+    $(for ty in $PERF_TYPES; do
+        echo --whitelist-type "$ty"
+    done) \
+    $(for var in $PERF_VARS; do
+        echo --whitelist-var "$var"
+    done) \
+    > $OUTPUT_DIR/perf_bindings.rs

+ 187 - 0
src/bpf.rs

@@ -0,0 +1,187 @@
+use std::collections::HashMap;
+
+use thiserror::Error;
+
+use crate::{
+    generated::bpf_insn,
+    maps::{Map, MapError},
+    obj::{relocate, Object, ParseError, RelocationError},
+    programs::{KProbe, Program, ProgramData, ProgramError, SocketFilter, TracePoint, UProbe, Xdp},
+    syscalls::bpf_map_update_elem_ptr,
+};
+
+pub use object::Pod;
+
+unsafe impl object::Pod for bpf_insn {}
+
+pub(crate) const BPF_OBJ_NAME_LEN: usize = 16;
+
+/* FIXME: these are arch dependent */
+pub(crate) const PERF_EVENT_IOC_ENABLE: libc::c_ulong = 9216;
+pub(crate) const PERF_EVENT_IOC_DISABLE: libc::c_ulong = 9217;
+pub(crate) const PERF_EVENT_IOC_SET_BPF: libc::c_ulong = 1074013192;
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct bpf_map_def {
+    pub(crate) map_type: u32,
+    pub(crate) key_size: u32,
+    pub(crate) value_size: u32,
+    pub(crate) max_entries: u32,
+    pub(crate) map_flags: u32,
+}
+
+unsafe impl object::Pod for bpf_map_def {}
+
+#[derive(Debug)]
+pub struct Bpf {
+    maps: HashMap<String, Map>,
+    programs: HashMap<String, Program>,
+}
+
+impl Bpf {
+    pub fn load(data: &[u8]) -> Result<Bpf, BpfError> {
+        let mut obj = Object::parse(data)?;
+
+        let mut maps = Vec::new();
+        for (_, obj) in obj.maps.drain() {
+            let mut map = Map { obj, fd: None };
+            let fd = map.create()?;
+            if !map.obj.data.is_empty() && map.obj.name != ".bss" {
+                bpf_map_update_elem_ptr(fd, &0 as *const _, map.obj.data.as_ptr(), 0)
+                    .map_err(|(code, io_error)| MapError::UpdateElementFailed { code, io_error })?;
+            }
+            maps.push(map);
+        }
+
+        relocate(&mut obj, maps.as_slice())?;
+
+        let programs = obj
+            .programs
+            .drain()
+            .map(|(name, obj)| {
+                let kind = obj.kind;
+                let data = ProgramData {
+                    obj,
+                    name: name.clone(),
+                    fd: None,
+                    links: Vec::new(),
+                };
+                let program = match kind {
+                    crate::obj::ProgramKind::KProbe => Program::KProbe(KProbe { data }),
+                    crate::obj::ProgramKind::UProbe => Program::UProbe(UProbe { data }),
+                    crate::obj::ProgramKind::TracePoint => Program::TracePoint(TracePoint { data }),
+                    crate::obj::ProgramKind::Xdp => Program::Xdp(Xdp { data }),
+                };
+
+                (name, program)
+            })
+            .collect();
+
+        Ok(Bpf {
+            maps: maps
+                .drain(..)
+                .map(|map| (map.obj.name.clone(), map))
+                .collect(),
+            programs,
+        })
+    }
+
+    pub fn map(&self, name: &str) -> Option<&Map> {
+        self.maps.get(name)
+    }
+
+    pub fn map_mut(&mut self, name: &str) -> Option<&mut Map> {
+        self.maps.get_mut(name)
+    }
+
+    pub fn program(&self, name: &str) -> Option<&Program> {
+        self.programs.get(name)
+    }
+
+    pub fn program_mut(&mut self, name: &str) -> Option<&mut Program> {
+        self.programs.get_mut(name)
+    }
+
+    pub fn kprobe(&self, name: &str) -> Option<&KProbe> {
+        match self.programs.get(name) {
+            Some(Program::KProbe(kprobe)) => Some(kprobe),
+            _ => None,
+        }
+    }
+
+    pub fn kprobe_mut(&mut self, name: &str) -> Option<&mut KProbe> {
+        match self.programs.get_mut(name) {
+            Some(Program::KProbe(kprobe)) => Some(kprobe),
+            _ => None,
+        }
+    }
+
+    pub fn uprobe(&self, name: &str) -> Option<&UProbe> {
+        match self.programs.get(name) {
+            Some(Program::UProbe(uprobe)) => Some(uprobe),
+            _ => None,
+        }
+    }
+
+    pub fn uprobe_mut(&mut self, name: &str) -> Option<&mut UProbe> {
+        match self.programs.get_mut(name) {
+            Some(Program::UProbe(uprobe)) => Some(uprobe),
+            _ => None,
+        }
+    }
+
+    pub fn trace_point(&self, name: &str) -> Option<&TracePoint> {
+        match self.programs.get(name) {
+            Some(Program::TracePoint(trace_point)) => Some(trace_point),
+            _ => None,
+        }
+    }
+
+    pub fn trace_point_mut(&mut self, name: &str) -> Option<&mut TracePoint> {
+        match self.programs.get_mut(name) {
+            Some(Program::TracePoint(trace_point)) => Some(trace_point),
+            _ => None,
+        }
+    }
+
+    pub fn socket_filter(&self, name: &str) -> Option<&SocketFilter> {
+        match self.programs.get(name) {
+            Some(Program::SocketFilter(socket_filter)) => Some(socket_filter),
+            _ => None,
+        }
+    }
+
+    pub fn socket_filter_mut(&mut self, name: &str) -> Option<&mut SocketFilter> {
+        match self.programs.get_mut(name) {
+            Some(Program::SocketFilter(socket_filter)) => Some(socket_filter),
+            _ => None,
+        }
+    }
+
+    pub fn xdp(&self, name: &str) -> Option<&Xdp> {
+        match self.programs.get(name) {
+            Some(Program::Xdp(xdp)) => Some(xdp),
+            _ => None,
+        }
+    }
+
+    pub fn xdp_mut(&mut self, name: &str) -> Option<&mut Xdp> {
+        match self.programs.get_mut(name) {
+            Some(Program::Xdp(xdp)) => Some(xdp),
+            _ => None,
+        }
+    }
+}
+
+#[derive(Debug, Error)]
+pub enum BpfError {
+    #[error("error parsing BPF object: {0}")]
+    ParseError(#[from] ParseError),
+    #[error("error relocating BPF object: {0}")]
+    RelocationError(#[from] RelocationError),
+    #[error("map error: {0}")]
+    MapError(#[from] MapError),
+    #[error("program error: {0}")]
+    ProgramError(#[from] ProgramError),
+}

+ 542 - 0
src/generated/bpf_bindings.rs

@@ -0,0 +1,542 @@
+/* automatically generated by rust-bindgen 0.55.1 */
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit<Storage, Align> {
+    storage: Storage,
+    align: [Align; 0],
+}
+impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align> {
+    #[inline]
+    pub const fn new(storage: Storage) -> Self {
+        Self { storage, align: [] }
+    }
+}
+impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align>
+where
+    Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+    #[inline]
+    pub fn get_bit(&self, index: usize) -> bool {
+        debug_assert!(index / 8 < self.storage.as_ref().len());
+        let byte_index = index / 8;
+        let byte = self.storage.as_ref()[byte_index];
+        let bit_index = if cfg!(target_endian = "big") {
+            7 - (index % 8)
+        } else {
+            index % 8
+        };
+        let mask = 1 << bit_index;
+        byte & mask == mask
+    }
+    #[inline]
+    pub fn set_bit(&mut self, index: usize, val: bool) {
+        debug_assert!(index / 8 < self.storage.as_ref().len());
+        let byte_index = index / 8;
+        let byte = &mut self.storage.as_mut()[byte_index];
+        let bit_index = if cfg!(target_endian = "big") {
+            7 - (index % 8)
+        } else {
+            index % 8
+        };
+        let mask = 1 << bit_index;
+        if val {
+            *byte |= mask;
+        } else {
+            *byte &= !mask;
+        }
+    }
+    #[inline]
+    pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+        debug_assert!(bit_width <= 64);
+        debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+        debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+        let mut val = 0;
+        for i in 0..(bit_width as usize) {
+            if self.get_bit(i + bit_offset) {
+                let index = if cfg!(target_endian = "big") {
+                    bit_width as usize - 1 - i
+                } else {
+                    i
+                };
+                val |= 1 << index;
+            }
+        }
+        val
+    }
+    #[inline]
+    pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+        debug_assert!(bit_width <= 64);
+        debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+        debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+        for i in 0..(bit_width as usize) {
+            let mask = 1 << i;
+            let val_bit_is_set = val & mask == mask;
+            let index = if cfg!(target_endian = "big") {
+                bit_width as usize - 1 - i
+            } else {
+                i
+            };
+            self.set_bit(index + bit_offset, val_bit_is_set);
+        }
+    }
+}
+pub const BPF_PSEUDO_MAP_FD: u32 = 1;
+pub const BPF_PSEUDO_MAP_VALUE: u32 = 2;
+pub const BPF_PSEUDO_BTF_ID: u32 = 3;
+pub const BPF_PSEUDO_CALL: u32 = 1;
+pub type __u8 = ::std::os::raw::c_uchar;
+pub type __s16 = ::std::os::raw::c_short;
+pub type __s32 = ::std::os::raw::c_int;
+pub type __u32 = ::std::os::raw::c_uint;
+pub type __u64 = ::std::os::raw::c_ulonglong;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_insn {
+    pub code: __u8,
+    pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize], u8>,
+    pub off: __s16,
+    pub imm: __s32,
+}
+impl bpf_insn {
+    #[inline]
+    pub fn dst_reg(&self) -> __u8 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) }
+    }
+    #[inline]
+    pub fn set_dst_reg(&mut self, val: __u8) {
+        unsafe {
+            let val: u8 = ::std::mem::transmute(val);
+            self._bitfield_1.set(0usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn src_reg(&self) -> __u8 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
+    }
+    #[inline]
+    pub fn set_src_reg(&mut self, val: __u8) {
+        unsafe {
+            let val: u8 = ::std::mem::transmute(val);
+            self._bitfield_1.set(4usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize], u8> {
+        let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize], u8> =
+            Default::default();
+        __bindgen_bitfield_unit.set(0usize, 4u8, {
+            let dst_reg: u8 = unsafe { ::std::mem::transmute(dst_reg) };
+            dst_reg as u64
+        });
+        __bindgen_bitfield_unit.set(4usize, 4u8, {
+            let src_reg: u8 = unsafe { ::std::mem::transmute(src_reg) };
+            src_reg as u64
+        });
+        __bindgen_bitfield_unit
+    }
+}
+pub mod bpf_cmd {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const BPF_MAP_CREATE: Type = 0;
+    pub const BPF_MAP_LOOKUP_ELEM: Type = 1;
+    pub const BPF_MAP_UPDATE_ELEM: Type = 2;
+    pub const BPF_MAP_DELETE_ELEM: Type = 3;
+    pub const BPF_MAP_GET_NEXT_KEY: Type = 4;
+    pub const BPF_PROG_LOAD: Type = 5;
+    pub const BPF_OBJ_PIN: Type = 6;
+    pub const BPF_OBJ_GET: Type = 7;
+    pub const BPF_PROG_ATTACH: Type = 8;
+    pub const BPF_PROG_DETACH: Type = 9;
+    pub const BPF_PROG_TEST_RUN: Type = 10;
+    pub const BPF_PROG_GET_NEXT_ID: Type = 11;
+    pub const BPF_MAP_GET_NEXT_ID: Type = 12;
+    pub const BPF_PROG_GET_FD_BY_ID: Type = 13;
+    pub const BPF_MAP_GET_FD_BY_ID: Type = 14;
+    pub const BPF_OBJ_GET_INFO_BY_FD: Type = 15;
+    pub const BPF_PROG_QUERY: Type = 16;
+    pub const BPF_RAW_TRACEPOINT_OPEN: Type = 17;
+    pub const BPF_BTF_LOAD: Type = 18;
+    pub const BPF_BTF_GET_FD_BY_ID: Type = 19;
+    pub const BPF_TASK_FD_QUERY: Type = 20;
+    pub const BPF_MAP_LOOKUP_AND_DELETE_ELEM: Type = 21;
+    pub const BPF_MAP_FREEZE: Type = 22;
+    pub const BPF_BTF_GET_NEXT_ID: Type = 23;
+    pub const BPF_MAP_LOOKUP_BATCH: Type = 24;
+    pub const BPF_MAP_LOOKUP_AND_DELETE_BATCH: Type = 25;
+    pub const BPF_MAP_UPDATE_BATCH: Type = 26;
+    pub const BPF_MAP_DELETE_BATCH: Type = 27;
+    pub const BPF_LINK_CREATE: Type = 28;
+    pub const BPF_LINK_UPDATE: Type = 29;
+    pub const BPF_LINK_GET_FD_BY_ID: Type = 30;
+    pub const BPF_LINK_GET_NEXT_ID: Type = 31;
+    pub const BPF_ENABLE_STATS: Type = 32;
+    pub const BPF_ITER_CREATE: Type = 33;
+    pub const BPF_LINK_DETACH: Type = 34;
+    pub const BPF_PROG_BIND_MAP: Type = 35;
+}
+pub mod bpf_map_type {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const BPF_MAP_TYPE_UNSPEC: Type = 0;
+    pub const BPF_MAP_TYPE_HASH: Type = 1;
+    pub const BPF_MAP_TYPE_ARRAY: Type = 2;
+    pub const BPF_MAP_TYPE_PROG_ARRAY: Type = 3;
+    pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: Type = 4;
+    pub const BPF_MAP_TYPE_PERCPU_HASH: Type = 5;
+    pub const BPF_MAP_TYPE_PERCPU_ARRAY: Type = 6;
+    pub const BPF_MAP_TYPE_STACK_TRACE: Type = 7;
+    pub const BPF_MAP_TYPE_CGROUP_ARRAY: Type = 8;
+    pub const BPF_MAP_TYPE_LRU_HASH: Type = 9;
+    pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: Type = 10;
+    pub const BPF_MAP_TYPE_LPM_TRIE: Type = 11;
+    pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: Type = 12;
+    pub const BPF_MAP_TYPE_HASH_OF_MAPS: Type = 13;
+    pub const BPF_MAP_TYPE_DEVMAP: Type = 14;
+    pub const BPF_MAP_TYPE_SOCKMAP: Type = 15;
+    pub const BPF_MAP_TYPE_CPUMAP: Type = 16;
+    pub const BPF_MAP_TYPE_XSKMAP: Type = 17;
+    pub const BPF_MAP_TYPE_SOCKHASH: Type = 18;
+    pub const BPF_MAP_TYPE_CGROUP_STORAGE: Type = 19;
+    pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: Type = 20;
+    pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: Type = 21;
+    pub const BPF_MAP_TYPE_QUEUE: Type = 22;
+    pub const BPF_MAP_TYPE_STACK: Type = 23;
+    pub const BPF_MAP_TYPE_SK_STORAGE: Type = 24;
+    pub const BPF_MAP_TYPE_DEVMAP_HASH: Type = 25;
+    pub const BPF_MAP_TYPE_STRUCT_OPS: Type = 26;
+    pub const BPF_MAP_TYPE_RINGBUF: Type = 27;
+    pub const BPF_MAP_TYPE_INODE_STORAGE: Type = 28;
+    pub const BPF_MAP_TYPE_TASK_STORAGE: Type = 29;
+}
+pub mod bpf_prog_type {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const BPF_PROG_TYPE_UNSPEC: Type = 0;
+    pub const BPF_PROG_TYPE_SOCKET_FILTER: Type = 1;
+    pub const BPF_PROG_TYPE_KPROBE: Type = 2;
+    pub const BPF_PROG_TYPE_SCHED_CLS: Type = 3;
+    pub const BPF_PROG_TYPE_SCHED_ACT: Type = 4;
+    pub const BPF_PROG_TYPE_TRACEPOINT: Type = 5;
+    pub const BPF_PROG_TYPE_XDP: Type = 6;
+    pub const BPF_PROG_TYPE_PERF_EVENT: Type = 7;
+    pub const BPF_PROG_TYPE_CGROUP_SKB: Type = 8;
+    pub const BPF_PROG_TYPE_CGROUP_SOCK: Type = 9;
+    pub const BPF_PROG_TYPE_LWT_IN: Type = 10;
+    pub const BPF_PROG_TYPE_LWT_OUT: Type = 11;
+    pub const BPF_PROG_TYPE_LWT_XMIT: Type = 12;
+    pub const BPF_PROG_TYPE_SOCK_OPS: Type = 13;
+    pub const BPF_PROG_TYPE_SK_SKB: Type = 14;
+    pub const BPF_PROG_TYPE_CGROUP_DEVICE: Type = 15;
+    pub const BPF_PROG_TYPE_SK_MSG: Type = 16;
+    pub const BPF_PROG_TYPE_RAW_TRACEPOINT: Type = 17;
+    pub const BPF_PROG_TYPE_CGROUP_SOCK_ADDR: Type = 18;
+    pub const BPF_PROG_TYPE_LWT_SEG6LOCAL: Type = 19;
+    pub const BPF_PROG_TYPE_LIRC_MODE2: Type = 20;
+    pub const BPF_PROG_TYPE_SK_REUSEPORT: Type = 21;
+    pub const BPF_PROG_TYPE_FLOW_DISSECTOR: Type = 22;
+    pub const BPF_PROG_TYPE_CGROUP_SYSCTL: Type = 23;
+    pub const BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: Type = 24;
+    pub const BPF_PROG_TYPE_CGROUP_SOCKOPT: Type = 25;
+    pub const BPF_PROG_TYPE_TRACING: Type = 26;
+    pub const BPF_PROG_TYPE_STRUCT_OPS: Type = 27;
+    pub const BPF_PROG_TYPE_EXT: Type = 28;
+    pub const BPF_PROG_TYPE_LSM: Type = 29;
+    pub const BPF_PROG_TYPE_SK_LOOKUP: Type = 30;
+}
+pub mod bpf_attach_type {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const BPF_CGROUP_INET_INGRESS: Type = 0;
+    pub const BPF_CGROUP_INET_EGRESS: Type = 1;
+    pub const BPF_CGROUP_INET_SOCK_CREATE: Type = 2;
+    pub const BPF_CGROUP_SOCK_OPS: Type = 3;
+    pub const BPF_SK_SKB_STREAM_PARSER: Type = 4;
+    pub const BPF_SK_SKB_STREAM_VERDICT: Type = 5;
+    pub const BPF_CGROUP_DEVICE: Type = 6;
+    pub const BPF_SK_MSG_VERDICT: Type = 7;
+    pub const BPF_CGROUP_INET4_BIND: Type = 8;
+    pub const BPF_CGROUP_INET6_BIND: Type = 9;
+    pub const BPF_CGROUP_INET4_CONNECT: Type = 10;
+    pub const BPF_CGROUP_INET6_CONNECT: Type = 11;
+    pub const BPF_CGROUP_INET4_POST_BIND: Type = 12;
+    pub const BPF_CGROUP_INET6_POST_BIND: Type = 13;
+    pub const BPF_CGROUP_UDP4_SENDMSG: Type = 14;
+    pub const BPF_CGROUP_UDP6_SENDMSG: Type = 15;
+    pub const BPF_LIRC_MODE2: Type = 16;
+    pub const BPF_FLOW_DISSECTOR: Type = 17;
+    pub const BPF_CGROUP_SYSCTL: Type = 18;
+    pub const BPF_CGROUP_UDP4_RECVMSG: Type = 19;
+    pub const BPF_CGROUP_UDP6_RECVMSG: Type = 20;
+    pub const BPF_CGROUP_GETSOCKOPT: Type = 21;
+    pub const BPF_CGROUP_SETSOCKOPT: Type = 22;
+    pub const BPF_TRACE_RAW_TP: Type = 23;
+    pub const BPF_TRACE_FENTRY: Type = 24;
+    pub const BPF_TRACE_FEXIT: Type = 25;
+    pub const BPF_MODIFY_RETURN: Type = 26;
+    pub const BPF_LSM_MAC: Type = 27;
+    pub const BPF_TRACE_ITER: Type = 28;
+    pub const BPF_CGROUP_INET4_GETPEERNAME: Type = 29;
+    pub const BPF_CGROUP_INET6_GETPEERNAME: Type = 30;
+    pub const BPF_CGROUP_INET4_GETSOCKNAME: Type = 31;
+    pub const BPF_CGROUP_INET6_GETSOCKNAME: Type = 32;
+    pub const BPF_XDP_DEVMAP: Type = 33;
+    pub const BPF_CGROUP_INET_SOCK_RELEASE: Type = 34;
+    pub const BPF_XDP_CPUMAP: Type = 35;
+    pub const BPF_SK_LOOKUP: Type = 36;
+    pub const BPF_XDP: Type = 37;
+    pub const __MAX_BPF_ATTACH_TYPE: Type = 38;
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union bpf_attr {
+    pub __bindgen_anon_1: bpf_attr__bindgen_ty_1,
+    pub __bindgen_anon_2: bpf_attr__bindgen_ty_2,
+    pub batch: bpf_attr__bindgen_ty_3,
+    pub __bindgen_anon_3: bpf_attr__bindgen_ty_4,
+    pub __bindgen_anon_4: bpf_attr__bindgen_ty_5,
+    pub __bindgen_anon_5: bpf_attr__bindgen_ty_6,
+    pub test: bpf_attr__bindgen_ty_7,
+    pub __bindgen_anon_6: bpf_attr__bindgen_ty_8,
+    pub info: bpf_attr__bindgen_ty_9,
+    pub query: bpf_attr__bindgen_ty_10,
+    pub raw_tracepoint: bpf_attr__bindgen_ty_11,
+    pub __bindgen_anon_7: bpf_attr__bindgen_ty_12,
+    pub task_fd_query: bpf_attr__bindgen_ty_13,
+    pub link_create: bpf_attr__bindgen_ty_14,
+    pub link_update: bpf_attr__bindgen_ty_15,
+    pub link_detach: bpf_attr__bindgen_ty_16,
+    pub enable_stats: bpf_attr__bindgen_ty_17,
+    pub iter_create: bpf_attr__bindgen_ty_18,
+    pub prog_bind_map: bpf_attr__bindgen_ty_19,
+    _bindgen_union_align: [u64; 15usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_1 {
+    pub map_type: __u32,
+    pub key_size: __u32,
+    pub value_size: __u32,
+    pub max_entries: __u32,
+    pub map_flags: __u32,
+    pub inner_map_fd: __u32,
+    pub numa_node: __u32,
+    pub map_name: [::std::os::raw::c_char; 16usize],
+    pub map_ifindex: __u32,
+    pub btf_fd: __u32,
+    pub btf_key_type_id: __u32,
+    pub btf_value_type_id: __u32,
+    pub btf_vmlinux_value_type_id: __u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_2 {
+    pub map_fd: __u32,
+    pub key: __u64,
+    pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1,
+    pub flags: __u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 {
+    pub value: __u64,
+    pub next_key: __u64,
+    _bindgen_union_align: u64,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_3 {
+    pub in_batch: __u64,
+    pub out_batch: __u64,
+    pub keys: __u64,
+    pub values: __u64,
+    pub count: __u32,
+    pub map_fd: __u32,
+    pub elem_flags: __u64,
+    pub flags: __u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_4 {
+    pub prog_type: __u32,
+    pub insn_cnt: __u32,
+    pub insns: __u64,
+    pub license: __u64,
+    pub log_level: __u32,
+    pub log_size: __u32,
+    pub log_buf: __u64,
+    pub kern_version: __u32,
+    pub prog_flags: __u32,
+    pub prog_name: [::std::os::raw::c_char; 16usize],
+    pub prog_ifindex: __u32,
+    pub expected_attach_type: __u32,
+    pub prog_btf_fd: __u32,
+    pub func_info_rec_size: __u32,
+    pub func_info: __u64,
+    pub func_info_cnt: __u32,
+    pub line_info_rec_size: __u32,
+    pub line_info: __u64,
+    pub line_info_cnt: __u32,
+    pub attach_btf_id: __u32,
+    pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 {
+    pub attach_prog_fd: __u32,
+    pub attach_btf_obj_fd: __u32,
+    _bindgen_union_align: u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_5 {
+    pub pathname: __u64,
+    pub bpf_fd: __u32,
+    pub file_flags: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_6 {
+    pub target_fd: __u32,
+    pub attach_bpf_fd: __u32,
+    pub attach_type: __u32,
+    pub attach_flags: __u32,
+    pub replace_bpf_fd: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_7 {
+    pub prog_fd: __u32,
+    pub retval: __u32,
+    pub data_size_in: __u32,
+    pub data_size_out: __u32,
+    pub data_in: __u64,
+    pub data_out: __u64,
+    pub repeat: __u32,
+    pub duration: __u32,
+    pub ctx_size_in: __u32,
+    pub ctx_size_out: __u32,
+    pub ctx_in: __u64,
+    pub ctx_out: __u64,
+    pub flags: __u32,
+    pub cpu: __u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_8 {
+    pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1,
+    pub next_id: __u32,
+    pub open_flags: __u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 {
+    pub start_id: __u32,
+    pub prog_id: __u32,
+    pub map_id: __u32,
+    pub btf_id: __u32,
+    pub link_id: __u32,
+    _bindgen_union_align: u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_9 {
+    pub bpf_fd: __u32,
+    pub info_len: __u32,
+    pub info: __u64,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_10 {
+    pub target_fd: __u32,
+    pub attach_type: __u32,
+    pub query_flags: __u32,
+    pub attach_flags: __u32,
+    pub prog_ids: __u64,
+    pub prog_cnt: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_11 {
+    pub name: __u64,
+    pub prog_fd: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_12 {
+    pub btf: __u64,
+    pub btf_log_buf: __u64,
+    pub btf_size: __u32,
+    pub btf_log_size: __u32,
+    pub btf_log_level: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_13 {
+    pub pid: __u32,
+    pub fd: __u32,
+    pub flags: __u32,
+    pub buf_len: __u32,
+    pub buf: __u64,
+    pub prog_id: __u32,
+    pub fd_type: __u32,
+    pub probe_offset: __u64,
+    pub probe_addr: __u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_14 {
+    pub prog_fd: __u32,
+    pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1,
+    pub attach_type: __u32,
+    pub flags: __u32,
+    pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 {
+    pub target_fd: __u32,
+    pub target_ifindex: __u32,
+    _bindgen_union_align: u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 {
+    pub target_btf_id: __u32,
+    pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1,
+    _bindgen_union_align: [u64; 2usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1 {
+    pub iter_info: __u64,
+    pub iter_info_len: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_15 {
+    pub link_fd: __u32,
+    pub new_prog_fd: __u32,
+    pub flags: __u32,
+    pub old_prog_fd: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_16 {
+    pub link_fd: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_17 {
+    pub type_: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_18 {
+    pub link_fd: __u32,
+    pub flags: __u32,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct bpf_attr__bindgen_ty_19 {
+    pub prog_fd: __u32,
+    pub map_fd: __u32,
+    pub flags: __u32,
+}

+ 9 - 0
src/generated/mod.rs

@@ -0,0 +1,9 @@
+#![allow(dead_code, non_camel_case_types, non_snake_case)]
+
+// FIXME: generate for x86_64 and aarch64
+
+mod bpf_bindings;
+mod perf_bindings;
+
+pub use bpf_bindings::*;
+pub use perf_bindings::*;

+ 917 - 0
src/generated/perf_bindings.rs

@@ -0,0 +1,917 @@
+/* automatically generated by rust-bindgen 0.55.1 */
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit<Storage, Align> {
+    storage: Storage,
+    align: [Align; 0],
+}
+impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align> {
+    #[inline]
+    pub const fn new(storage: Storage) -> Self {
+        Self { storage, align: [] }
+    }
+}
+impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align>
+where
+    Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+    #[inline]
+    pub fn get_bit(&self, index: usize) -> bool {
+        debug_assert!(index / 8 < self.storage.as_ref().len());
+        let byte_index = index / 8;
+        let byte = self.storage.as_ref()[byte_index];
+        let bit_index = if cfg!(target_endian = "big") {
+            7 - (index % 8)
+        } else {
+            index % 8
+        };
+        let mask = 1 << bit_index;
+        byte & mask == mask
+    }
+    #[inline]
+    pub fn set_bit(&mut self, index: usize, val: bool) {
+        debug_assert!(index / 8 < self.storage.as_ref().len());
+        let byte_index = index / 8;
+        let byte = &mut self.storage.as_mut()[byte_index];
+        let bit_index = if cfg!(target_endian = "big") {
+            7 - (index % 8)
+        } else {
+            index % 8
+        };
+        let mask = 1 << bit_index;
+        if val {
+            *byte |= mask;
+        } else {
+            *byte &= !mask;
+        }
+    }
+    #[inline]
+    pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+        debug_assert!(bit_width <= 64);
+        debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+        debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+        let mut val = 0;
+        for i in 0..(bit_width as usize) {
+            if self.get_bit(i + bit_offset) {
+                let index = if cfg!(target_endian = "big") {
+                    bit_width as usize - 1 - i
+                } else {
+                    i
+                };
+                val |= 1 << index;
+            }
+        }
+        val
+    }
+    #[inline]
+    pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+        debug_assert!(bit_width <= 64);
+        debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+        debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+        for i in 0..(bit_width as usize) {
+            let mask = 1 << i;
+            let val_bit_is_set = val & mask == mask;
+            let index = if cfg!(target_endian = "big") {
+                bit_width as usize - 1 - i
+            } else {
+                i
+            };
+            self.set_bit(index + bit_offset, val_bit_is_set);
+        }
+    }
+}
+pub const PERF_FLAG_FD_NO_GROUP: u32 = 1;
+pub const PERF_FLAG_FD_OUTPUT: u32 = 2;
+pub const PERF_FLAG_PID_CGROUP: u32 = 4;
+pub const PERF_FLAG_FD_CLOEXEC: u32 = 8;
+pub type __u8 = ::std::os::raw::c_uchar;
+pub type __u16 = ::std::os::raw::c_ushort;
+pub type __s32 = ::std::os::raw::c_int;
+pub type __u32 = ::std::os::raw::c_uint;
+pub type __s64 = ::std::os::raw::c_longlong;
+pub type __u64 = ::std::os::raw::c_ulonglong;
+pub mod perf_type_id {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const PERF_TYPE_HARDWARE: Type = 0;
+    pub const PERF_TYPE_SOFTWARE: Type = 1;
+    pub const PERF_TYPE_TRACEPOINT: Type = 2;
+    pub const PERF_TYPE_HW_CACHE: Type = 3;
+    pub const PERF_TYPE_RAW: Type = 4;
+    pub const PERF_TYPE_BREAKPOINT: Type = 5;
+    pub const PERF_TYPE_MAX: Type = 6;
+}
+pub mod perf_sw_ids {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const PERF_COUNT_SW_CPU_CLOCK: Type = 0;
+    pub const PERF_COUNT_SW_TASK_CLOCK: Type = 1;
+    pub const PERF_COUNT_SW_PAGE_FAULTS: Type = 2;
+    pub const PERF_COUNT_SW_CONTEXT_SWITCHES: Type = 3;
+    pub const PERF_COUNT_SW_CPU_MIGRATIONS: Type = 4;
+    pub const PERF_COUNT_SW_PAGE_FAULTS_MIN: Type = 5;
+    pub const PERF_COUNT_SW_PAGE_FAULTS_MAJ: Type = 6;
+    pub const PERF_COUNT_SW_ALIGNMENT_FAULTS: Type = 7;
+    pub const PERF_COUNT_SW_EMULATION_FAULTS: Type = 8;
+    pub const PERF_COUNT_SW_DUMMY: Type = 9;
+    pub const PERF_COUNT_SW_BPF_OUTPUT: Type = 10;
+    pub const PERF_COUNT_SW_MAX: Type = 11;
+}
+pub mod perf_event_sample_format {
+    pub type Type = ::std::os::raw::c_ulong;
+    pub const PERF_SAMPLE_IP: Type = 1;
+    pub const PERF_SAMPLE_TID: Type = 2;
+    pub const PERF_SAMPLE_TIME: Type = 4;
+    pub const PERF_SAMPLE_ADDR: Type = 8;
+    pub const PERF_SAMPLE_READ: Type = 16;
+    pub const PERF_SAMPLE_CALLCHAIN: Type = 32;
+    pub const PERF_SAMPLE_ID: Type = 64;
+    pub const PERF_SAMPLE_CPU: Type = 128;
+    pub const PERF_SAMPLE_PERIOD: Type = 256;
+    pub const PERF_SAMPLE_STREAM_ID: Type = 512;
+    pub const PERF_SAMPLE_RAW: Type = 1024;
+    pub const PERF_SAMPLE_BRANCH_STACK: Type = 2048;
+    pub const PERF_SAMPLE_REGS_USER: Type = 4096;
+    pub const PERF_SAMPLE_STACK_USER: Type = 8192;
+    pub const PERF_SAMPLE_WEIGHT: Type = 16384;
+    pub const PERF_SAMPLE_DATA_SRC: Type = 32768;
+    pub const PERF_SAMPLE_IDENTIFIER: Type = 65536;
+    pub const PERF_SAMPLE_TRANSACTION: Type = 131072;
+    pub const PERF_SAMPLE_REGS_INTR: Type = 262144;
+    pub const PERF_SAMPLE_PHYS_ADDR: Type = 524288;
+    pub const PERF_SAMPLE_AUX: Type = 1048576;
+    pub const PERF_SAMPLE_CGROUP: Type = 2097152;
+    pub const PERF_SAMPLE_MAX: Type = 4194304;
+    pub const __PERF_SAMPLE_CALLCHAIN_EARLY: Type = 9223372036854775808;
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct perf_event_attr {
+    pub type_: __u32,
+    pub size: __u32,
+    pub config: __u64,
+    pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1,
+    pub sample_type: __u64,
+    pub read_format: __u64,
+    pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize], u32>,
+    pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2,
+    pub bp_type: __u32,
+    pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3,
+    pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4,
+    pub branch_sample_type: __u64,
+    pub sample_regs_user: __u64,
+    pub sample_stack_user: __u32,
+    pub clockid: __s32,
+    pub sample_regs_intr: __u64,
+    pub aux_watermark: __u32,
+    pub sample_max_stack: __u16,
+    pub __reserved_2: __u16,
+    pub aux_sample_size: __u32,
+    pub __reserved_3: __u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_1 {
+    pub sample_period: __u64,
+    pub sample_freq: __u64,
+    _bindgen_union_align: u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_2 {
+    pub wakeup_events: __u32,
+    pub wakeup_watermark: __u32,
+    _bindgen_union_align: u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_3 {
+    pub bp_addr: __u64,
+    pub kprobe_func: __u64,
+    pub uprobe_path: __u64,
+    pub config1: __u64,
+    _bindgen_union_align: u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_4 {
+    pub bp_len: __u64,
+    pub kprobe_addr: __u64,
+    pub probe_offset: __u64,
+    pub config2: __u64,
+    _bindgen_union_align: u64,
+}
+impl perf_event_attr {
+    #[inline]
+    pub fn disabled(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_disabled(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(0usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn inherit(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_inherit(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(1usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn pinned(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_pinned(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(2usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclusive(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclusive(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(3usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_user(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_user(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(4usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_kernel(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_kernel(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(5usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_hv(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_hv(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(6usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_idle(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_idle(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(7usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn mmap(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_mmap(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(8usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn comm(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_comm(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(9usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn freq(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_freq(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(10usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn inherit_stat(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_inherit_stat(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(11usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn enable_on_exec(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_enable_on_exec(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(12usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn task(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_task(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(13usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn watermark(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_watermark(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(14usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn precise_ip(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) }
+    }
+    #[inline]
+    pub fn set_precise_ip(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(15usize, 2u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn mmap_data(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_mmap_data(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(17usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn sample_id_all(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_sample_id_all(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(18usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_host(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_host(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(19usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_guest(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_guest(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(20usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_callchain_kernel(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_callchain_kernel(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(21usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn exclude_callchain_user(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_exclude_callchain_user(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(22usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn mmap2(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_mmap2(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(23usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn comm_exec(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_comm_exec(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(24usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn use_clockid(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_use_clockid(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(25usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn context_switch(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_context_switch(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(26usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn write_backward(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_write_backward(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(27usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn namespaces(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_namespaces(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(28usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn ksymbol(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_ksymbol(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(29usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn bpf_event(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_bpf_event(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(30usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn aux_output(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_aux_output(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(31usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cgroup(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cgroup(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(32usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn __reserved_1(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 31u8) as u64) }
+    }
+    #[inline]
+    pub fn set___reserved_1(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(33usize, 31u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn new_bitfield_1(
+        disabled: __u64,
+        inherit: __u64,
+        pinned: __u64,
+        exclusive: __u64,
+        exclude_user: __u64,
+        exclude_kernel: __u64,
+        exclude_hv: __u64,
+        exclude_idle: __u64,
+        mmap: __u64,
+        comm: __u64,
+        freq: __u64,
+        inherit_stat: __u64,
+        enable_on_exec: __u64,
+        task: __u64,
+        watermark: __u64,
+        precise_ip: __u64,
+        mmap_data: __u64,
+        sample_id_all: __u64,
+        exclude_host: __u64,
+        exclude_guest: __u64,
+        exclude_callchain_kernel: __u64,
+        exclude_callchain_user: __u64,
+        mmap2: __u64,
+        comm_exec: __u64,
+        use_clockid: __u64,
+        context_switch: __u64,
+        write_backward: __u64,
+        namespaces: __u64,
+        ksymbol: __u64,
+        bpf_event: __u64,
+        aux_output: __u64,
+        cgroup: __u64,
+        __reserved_1: __u64,
+    ) -> __BindgenBitfieldUnit<[u8; 8usize], u32> {
+        let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize], u32> =
+            Default::default();
+        __bindgen_bitfield_unit.set(0usize, 1u8, {
+            let disabled: u64 = unsafe { ::std::mem::transmute(disabled) };
+            disabled as u64
+        });
+        __bindgen_bitfield_unit.set(1usize, 1u8, {
+            let inherit: u64 = unsafe { ::std::mem::transmute(inherit) };
+            inherit as u64
+        });
+        __bindgen_bitfield_unit.set(2usize, 1u8, {
+            let pinned: u64 = unsafe { ::std::mem::transmute(pinned) };
+            pinned as u64
+        });
+        __bindgen_bitfield_unit.set(3usize, 1u8, {
+            let exclusive: u64 = unsafe { ::std::mem::transmute(exclusive) };
+            exclusive as u64
+        });
+        __bindgen_bitfield_unit.set(4usize, 1u8, {
+            let exclude_user: u64 = unsafe { ::std::mem::transmute(exclude_user) };
+            exclude_user as u64
+        });
+        __bindgen_bitfield_unit.set(5usize, 1u8, {
+            let exclude_kernel: u64 = unsafe { ::std::mem::transmute(exclude_kernel) };
+            exclude_kernel as u64
+        });
+        __bindgen_bitfield_unit.set(6usize, 1u8, {
+            let exclude_hv: u64 = unsafe { ::std::mem::transmute(exclude_hv) };
+            exclude_hv as u64
+        });
+        __bindgen_bitfield_unit.set(7usize, 1u8, {
+            let exclude_idle: u64 = unsafe { ::std::mem::transmute(exclude_idle) };
+            exclude_idle as u64
+        });
+        __bindgen_bitfield_unit.set(8usize, 1u8, {
+            let mmap: u64 = unsafe { ::std::mem::transmute(mmap) };
+            mmap as u64
+        });
+        __bindgen_bitfield_unit.set(9usize, 1u8, {
+            let comm: u64 = unsafe { ::std::mem::transmute(comm) };
+            comm as u64
+        });
+        __bindgen_bitfield_unit.set(10usize, 1u8, {
+            let freq: u64 = unsafe { ::std::mem::transmute(freq) };
+            freq as u64
+        });
+        __bindgen_bitfield_unit.set(11usize, 1u8, {
+            let inherit_stat: u64 = unsafe { ::std::mem::transmute(inherit_stat) };
+            inherit_stat as u64
+        });
+        __bindgen_bitfield_unit.set(12usize, 1u8, {
+            let enable_on_exec: u64 = unsafe { ::std::mem::transmute(enable_on_exec) };
+            enable_on_exec as u64
+        });
+        __bindgen_bitfield_unit.set(13usize, 1u8, {
+            let task: u64 = unsafe { ::std::mem::transmute(task) };
+            task as u64
+        });
+        __bindgen_bitfield_unit.set(14usize, 1u8, {
+            let watermark: u64 = unsafe { ::std::mem::transmute(watermark) };
+            watermark as u64
+        });
+        __bindgen_bitfield_unit.set(15usize, 2u8, {
+            let precise_ip: u64 = unsafe { ::std::mem::transmute(precise_ip) };
+            precise_ip as u64
+        });
+        __bindgen_bitfield_unit.set(17usize, 1u8, {
+            let mmap_data: u64 = unsafe { ::std::mem::transmute(mmap_data) };
+            mmap_data as u64
+        });
+        __bindgen_bitfield_unit.set(18usize, 1u8, {
+            let sample_id_all: u64 = unsafe { ::std::mem::transmute(sample_id_all) };
+            sample_id_all as u64
+        });
+        __bindgen_bitfield_unit.set(19usize, 1u8, {
+            let exclude_host: u64 = unsafe { ::std::mem::transmute(exclude_host) };
+            exclude_host as u64
+        });
+        __bindgen_bitfield_unit.set(20usize, 1u8, {
+            let exclude_guest: u64 = unsafe { ::std::mem::transmute(exclude_guest) };
+            exclude_guest as u64
+        });
+        __bindgen_bitfield_unit.set(21usize, 1u8, {
+            let exclude_callchain_kernel: u64 =
+                unsafe { ::std::mem::transmute(exclude_callchain_kernel) };
+            exclude_callchain_kernel as u64
+        });
+        __bindgen_bitfield_unit.set(22usize, 1u8, {
+            let exclude_callchain_user: u64 =
+                unsafe { ::std::mem::transmute(exclude_callchain_user) };
+            exclude_callchain_user as u64
+        });
+        __bindgen_bitfield_unit.set(23usize, 1u8, {
+            let mmap2: u64 = unsafe { ::std::mem::transmute(mmap2) };
+            mmap2 as u64
+        });
+        __bindgen_bitfield_unit.set(24usize, 1u8, {
+            let comm_exec: u64 = unsafe { ::std::mem::transmute(comm_exec) };
+            comm_exec as u64
+        });
+        __bindgen_bitfield_unit.set(25usize, 1u8, {
+            let use_clockid: u64 = unsafe { ::std::mem::transmute(use_clockid) };
+            use_clockid as u64
+        });
+        __bindgen_bitfield_unit.set(26usize, 1u8, {
+            let context_switch: u64 = unsafe { ::std::mem::transmute(context_switch) };
+            context_switch as u64
+        });
+        __bindgen_bitfield_unit.set(27usize, 1u8, {
+            let write_backward: u64 = unsafe { ::std::mem::transmute(write_backward) };
+            write_backward as u64
+        });
+        __bindgen_bitfield_unit.set(28usize, 1u8, {
+            let namespaces: u64 = unsafe { ::std::mem::transmute(namespaces) };
+            namespaces as u64
+        });
+        __bindgen_bitfield_unit.set(29usize, 1u8, {
+            let ksymbol: u64 = unsafe { ::std::mem::transmute(ksymbol) };
+            ksymbol as u64
+        });
+        __bindgen_bitfield_unit.set(30usize, 1u8, {
+            let bpf_event: u64 = unsafe { ::std::mem::transmute(bpf_event) };
+            bpf_event as u64
+        });
+        __bindgen_bitfield_unit.set(31usize, 1u8, {
+            let aux_output: u64 = unsafe { ::std::mem::transmute(aux_output) };
+            aux_output as u64
+        });
+        __bindgen_bitfield_unit.set(32usize, 1u8, {
+            let cgroup: u64 = unsafe { ::std::mem::transmute(cgroup) };
+            cgroup as u64
+        });
+        __bindgen_bitfield_unit.set(33usize, 31u8, {
+            let __reserved_1: u64 = unsafe { ::std::mem::transmute(__reserved_1) };
+            __reserved_1 as u64
+        });
+        __bindgen_bitfield_unit
+    }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct perf_event_mmap_page {
+    pub version: __u32,
+    pub compat_version: __u32,
+    pub lock: __u32,
+    pub index: __u32,
+    pub offset: __s64,
+    pub time_enabled: __u64,
+    pub time_running: __u64,
+    pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1,
+    pub pmc_width: __u16,
+    pub time_shift: __u16,
+    pub time_mult: __u32,
+    pub time_offset: __u64,
+    pub time_zero: __u64,
+    pub size: __u32,
+    pub __reserved: [__u8; 948usize],
+    pub data_head: __u64,
+    pub data_tail: __u64,
+    pub data_offset: __u64,
+    pub data_size: __u64,
+    pub aux_head: __u64,
+    pub aux_tail: __u64,
+    pub aux_offset: __u64,
+    pub aux_size: __u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_mmap_page__bindgen_ty_1 {
+    pub capabilities: __u64,
+    pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1,
+    _bindgen_union_align: u64,
+}
+#[repr(C)]
+#[repr(align(8))]
+#[derive(Debug, Copy, Clone)]
+pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 {
+    pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize], u64>,
+}
+impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 {
+    #[inline]
+    pub fn cap_bit0(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cap_bit0(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(0usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cap_bit0_is_deprecated(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(1usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cap_user_rdpmc(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cap_user_rdpmc(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(2usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cap_user_time(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cap_user_time(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(3usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cap_user_time_zero(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cap_user_time_zero(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(4usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cap_____res(&self) -> __u64 {
+        unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 59u8) as u64) }
+    }
+    #[inline]
+    pub fn set_cap_____res(&mut self, val: __u64) {
+        unsafe {
+            let val: u64 = ::std::mem::transmute(val);
+            self._bitfield_1.set(5usize, 59u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn new_bitfield_1(
+        cap_bit0: __u64,
+        cap_bit0_is_deprecated: __u64,
+        cap_user_rdpmc: __u64,
+        cap_user_time: __u64,
+        cap_user_time_zero: __u64,
+        cap_____res: __u64,
+    ) -> __BindgenBitfieldUnit<[u8; 8usize], u64> {
+        let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize], u64> =
+            Default::default();
+        __bindgen_bitfield_unit.set(0usize, 1u8, {
+            let cap_bit0: u64 = unsafe { ::std::mem::transmute(cap_bit0) };
+            cap_bit0 as u64
+        });
+        __bindgen_bitfield_unit.set(1usize, 1u8, {
+            let cap_bit0_is_deprecated: u64 =
+                unsafe { ::std::mem::transmute(cap_bit0_is_deprecated) };
+            cap_bit0_is_deprecated as u64
+        });
+        __bindgen_bitfield_unit.set(2usize, 1u8, {
+            let cap_user_rdpmc: u64 = unsafe { ::std::mem::transmute(cap_user_rdpmc) };
+            cap_user_rdpmc as u64
+        });
+        __bindgen_bitfield_unit.set(3usize, 1u8, {
+            let cap_user_time: u64 = unsafe { ::std::mem::transmute(cap_user_time) };
+            cap_user_time as u64
+        });
+        __bindgen_bitfield_unit.set(4usize, 1u8, {
+            let cap_user_time_zero: u64 = unsafe { ::std::mem::transmute(cap_user_time_zero) };
+            cap_user_time_zero as u64
+        });
+        __bindgen_bitfield_unit.set(5usize, 59u8, {
+            let cap_____res: u64 = unsafe { ::std::mem::transmute(cap_____res) };
+            cap_____res as u64
+        });
+        __bindgen_bitfield_unit
+    }
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct perf_event_header {
+    pub type_: __u32,
+    pub misc: __u16,
+    pub size: __u16,
+}
+pub mod perf_event_type {
+    pub type Type = ::std::os::raw::c_uint;
+    pub const PERF_RECORD_MMAP: Type = 1;
+    pub const PERF_RECORD_LOST: Type = 2;
+    pub const PERF_RECORD_COMM: Type = 3;
+    pub const PERF_RECORD_EXIT: Type = 4;
+    pub const PERF_RECORD_THROTTLE: Type = 5;
+    pub const PERF_RECORD_UNTHROTTLE: Type = 6;
+    pub const PERF_RECORD_FORK: Type = 7;
+    pub const PERF_RECORD_READ: Type = 8;
+    pub const PERF_RECORD_SAMPLE: Type = 9;
+    pub const PERF_RECORD_MMAP2: Type = 10;
+    pub const PERF_RECORD_AUX: Type = 11;
+    pub const PERF_RECORD_ITRACE_START: Type = 12;
+    pub const PERF_RECORD_LOST_SAMPLES: Type = 13;
+    pub const PERF_RECORD_SWITCH: Type = 14;
+    pub const PERF_RECORD_SWITCH_CPU_WIDE: Type = 15;
+    pub const PERF_RECORD_NAMESPACES: Type = 16;
+    pub const PERF_RECORD_KSYMBOL: Type = 17;
+    pub const PERF_RECORD_BPF_EVENT: Type = 18;
+    pub const PERF_RECORD_CGROUP: Type = 19;
+    pub const PERF_RECORD_MAX: Type = 20;
+}

+ 13 - 0
src/lib.rs

@@ -0,0 +1,13 @@
+#![deny(clippy::all)]
+
+#[macro_use]
+extern crate lazy_static;
+
+mod bpf;
+mod generated;
+pub mod maps;
+mod obj;
+pub mod programs;
+mod syscalls;
+
+pub use bpf::*;

+ 725 - 0
src/maps/hash_map.rs

@@ -0,0 +1,725 @@
+use std::{convert::TryFrom, marker::PhantomData, mem};
+
+use crate::{
+    generated::bpf_map_type::BPF_MAP_TYPE_HASH,
+    syscalls::{
+        bpf_map_delete_elem, bpf_map_get_next_key, bpf_map_lookup_and_delete_elem,
+        bpf_map_lookup_elem, bpf_map_update_elem,
+    },
+};
+
+use super::{Map, MapError};
+use crate::Pod;
+
+pub struct HashMap<T: AsRef<Map>, K, V> {
+    inner: T,
+    _k: PhantomData<K>,
+    _v: PhantomData<V>,
+}
+
+impl<T: AsRef<Map>, K: Pod, V: Pod> HashMap<T, K, V> {
+    pub fn new(map: T) -> Result<HashMap<T, K, V>, MapError> {
+        let inner = map.as_ref();
+        let map_type = inner.obj.def.map_type;
+        if map_type != BPF_MAP_TYPE_HASH {
+            return Err(MapError::InvalidMapType {
+                map_type: map_type as u32,
+            })?;
+        }
+        let size = mem::size_of::<K>();
+        let expected = inner.obj.def.key_size as usize;
+        if size != expected {
+            return Err(MapError::InvalidKeySize { size, expected });
+        }
+
+        let size = mem::size_of::<V>();
+        let expected = inner.obj.def.value_size as usize;
+        if size != expected {
+            return Err(MapError::InvalidValueSize { size, expected });
+        }
+
+        Ok(HashMap {
+            inner: map,
+            _k: PhantomData,
+            _v: PhantomData,
+        })
+    }
+
+    pub unsafe fn get(&self, key: &K, flags: u64) -> Result<Option<V>, MapError> {
+        let fd = self.inner.as_ref().fd_or_err()?;
+        bpf_map_lookup_elem(fd, key, flags)
+            .map_err(|(code, io_error)| MapError::LookupElementFailed { code, io_error })
+    }
+
+    pub unsafe fn iter<'coll>(&'coll self) -> MapIter<'coll, T, K, V> {
+        MapIter::new(self)
+    }
+
+    pub unsafe fn keys<'coll>(&'coll self) -> MapKeys<'coll, T, K, V> {
+        MapKeys::new(self)
+    }
+}
+
+impl<T: AsRef<Map> + AsMut<Map>, K: Pod, V: Pod> HashMap<T, K, V> {
+    pub fn insert(&mut self, key: K, value: V, flags: u64) -> Result<(), MapError> {
+        let fd = self.inner.as_ref().fd_or_err()?;
+        bpf_map_update_elem(fd, &key, &value, flags)
+            .map_err(|(code, io_error)| MapError::UpdateElementFailed { code, io_error })?;
+        Ok(())
+    }
+
+    pub unsafe fn pop(&mut self, key: &K) -> Result<Option<V>, MapError> {
+        let fd = self.inner.as_ref().fd_or_err()?;
+        bpf_map_lookup_and_delete_elem(fd, key)
+            .map_err(|(code, io_error)| MapError::LookupAndDeleteElementFailed { code, io_error })
+    }
+
+    pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
+        let fd = self.inner.as_ref().fd_or_err()?;
+        bpf_map_delete_elem(fd, key)
+            .map(|_| ())
+            .map_err(|(code, io_error)| MapError::DeleteElementFailed { code, io_error })
+    }
+}
+
+impl<'a, K: Pod, V: Pod> TryFrom<&'a Map> for HashMap<&'a Map, K, V> {
+    type Error = MapError;
+
+    fn try_from(inner: &'a Map) -> Result<HashMap<&'a Map, K, V>, MapError> {
+        HashMap::new(inner)
+    }
+}
+
+impl<'a, K: Pod, V: Pod> TryFrom<&'a mut Map> for HashMap<&'a mut Map, K, V> {
+    type Error = MapError;
+
+    fn try_from(inner: &'a mut Map) -> Result<HashMap<&'a mut Map, K, V>, MapError> {
+        HashMap::new(inner)
+    }
+}
+
+pub struct MapKeys<'coll, T: AsRef<Map>, K: Clone + Pod, V: Clone + Pod> {
+    map: &'coll HashMap<T, K, V>,
+    err: bool,
+    key: Option<K>,
+}
+
+impl<'coll, T: AsRef<Map>, K: Clone + Pod, V: Clone + Pod> MapKeys<'coll, T, K, V> {
+    fn new(map: &'coll HashMap<T, K, V>) -> MapKeys<'coll, T, K, V> {
+        MapKeys {
+            map,
+            err: false,
+            key: None,
+        }
+    }
+}
+
+impl<T: AsRef<Map>, K: Clone + Pod, V: Clone + Pod> Iterator for MapKeys<'_, T, K, V> {
+    type Item = Result<K, MapError>;
+
+    fn next(&mut self) -> Option<Result<K, MapError>> {
+        if self.err {
+            return None;
+        }
+
+        let fd = match self.map.inner.as_ref().fd_or_err() {
+            Ok(fd) => fd,
+            Err(e) => {
+                self.err = true;
+                return Some(Err(e));
+            }
+        };
+
+        match bpf_map_get_next_key(fd, self.key.as_ref()) {
+            Ok(Some(key)) => {
+                self.key = Some(key);
+                return Some(Ok(key));
+            }
+            Ok(None) => {
+                self.key = None;
+                return None;
+            }
+            Err((code, io_error)) => {
+                self.err = true;
+                return Some(Err(MapError::GetNextKeyFailed { code, io_error }));
+            }
+        }
+    }
+}
+
+pub struct MapIter<'coll, T: AsRef<Map>, K: Clone + Pod, V: Clone + Pod> {
+    inner: MapKeys<'coll, T, K, V>,
+}
+
+impl<'coll, T: AsRef<Map>, K: Clone + Pod, V: Clone + Pod> MapIter<'coll, T, K, V> {
+    fn new(map: &'coll HashMap<T, K, V>) -> MapIter<'coll, T, K, V> {
+        MapIter {
+            inner: MapKeys::new(map),
+        }
+    }
+}
+
+impl<T: AsRef<Map>, K: Clone + Pod, V: Clone + Pod> Iterator for MapIter<'_, T, K, V> {
+    type Item = Result<(K, V), MapError>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        loop {
+            match self.inner.next() {
+                Some(Ok(key)) => {
+                    let value = unsafe { self.inner.map.get(&key, 0) };
+                    match value {
+                        Ok(None) => continue,
+                        Ok(Some(value)) => return Some(Ok((key, value))),
+                        Err(e) => return Some(Err(e)),
+                    }
+                }
+                Some(Err(e)) => return Some(Err(e)),
+                None => return None,
+            }
+        }
+    }
+}
+
+impl AsRef<Map> for &Map {
+    fn as_ref(&self) -> &Map {
+        self
+    }
+}
+
+impl AsRef<Map> for &mut Map {
+    fn as_ref(&self) -> &Map {
+        self
+    }
+}
+
+impl AsMut<Map> for &mut Map {
+    fn as_mut(&mut self) -> &mut Map {
+        self
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::io;
+
+    use libc::{EFAULT, ENOENT};
+
+    use crate::{
+        bpf_map_def,
+        generated::{
+            bpf_attr, bpf_cmd,
+            bpf_map_type::{BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERF_EVENT_ARRAY},
+        },
+        obj,
+        syscalls::{override_syscall, SysResult, Syscall},
+    };
+
+    use super::*;
+
+    fn new_obj_map(name: &str) -> obj::Map {
+        obj::Map {
+            name: name.to_string(),
+            def: bpf_map_def {
+                map_type: BPF_MAP_TYPE_HASH,
+                key_size: 4,
+                value_size: 4,
+                max_entries: 1024,
+                map_flags: 0,
+            },
+            section_index: 0,
+            data: Vec::new(),
+        }
+    }
+
+    fn sys_error(value: i32) -> SysResult {
+        Err((-1, io::Error::from_raw_os_error(value)))
+    }
+
+    #[test]
+    fn test_wrong_key_size() {
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        assert!(matches!(
+            HashMap::<_, u8, u32>::new(&map),
+            Err(MapError::InvalidKeySize {
+                size: 1,
+                expected: 4
+            })
+        ));
+    }
+
+    #[test]
+    fn test_wrong_value_size() {
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        assert!(matches!(
+            HashMap::<_, u32, u16>::new(&map),
+            Err(MapError::InvalidValueSize {
+                size: 2,
+                expected: 4
+            })
+        ));
+    }
+
+    #[test]
+    fn test_try_from_wrong_map() {
+        let map = Map {
+            obj: obj::Map {
+                name: "TEST".to_string(),
+                def: bpf_map_def {
+                    map_type: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+                    key_size: 4,
+                    value_size: 4,
+                    max_entries: 1024,
+                    map_flags: 0,
+                },
+                section_index: 0,
+                data: Vec::new(),
+            },
+            fd: None,
+        };
+
+        assert!(matches!(
+            HashMap::<_, u32, u32>::try_from(&map),
+            Err(MapError::InvalidMapType { .. })
+        ));
+    }
+
+    #[test]
+    fn test_try_from_ok() {
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        assert!(HashMap::<_, u32, u32>::try_from(&map).is_ok())
+    }
+
+    #[test]
+    fn test_insert_not_created() {
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(
+            hm.insert(1, 42, 0),
+            Err(MapError::NotCreated { .. })
+        ));
+    }
+
+    #[test]
+    fn test_insert_syscall_error() {
+        override_syscall(|_| sys_error(EFAULT));
+
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(
+            hm.insert(1, 42, 0),
+            Err(MapError::UpdateElementFailed { code: -1, io_error }) if io_error.raw_os_error() == Some(EFAULT)
+        ));
+    }
+
+    #[test]
+    fn test_insert_ok() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_UPDATE_ELEM,
+                ..
+            } => Ok(1),
+            _ => sys_error(EFAULT),
+        });
+
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(hm.insert(1, 42, 0).is_ok());
+    }
+
+    #[test]
+    fn test_remove_not_created() {
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(hm.remove(&1), Err(MapError::NotCreated { .. })));
+    }
+
+    #[test]
+    fn test_remove_syscall_error() {
+        override_syscall(|_| sys_error(EFAULT));
+
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(
+            hm.remove(&1),
+            Err(MapError::DeleteElementFailed { code: -1, io_error }) if io_error.raw_os_error() == Some(EFAULT)
+        ));
+    }
+
+    #[test]
+    fn test_remove_ok() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_DELETE_ELEM,
+                ..
+            } => Ok(1),
+            _ => sys_error(EFAULT),
+        });
+
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(hm.remove(&1).is_ok());
+    }
+
+    #[test]
+    fn test_get_not_created() {
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        assert!(matches!(
+            unsafe { hm.get(&1, 0) },
+            Err(MapError::NotCreated { .. })
+        ));
+    }
+
+    #[test]
+    fn test_get_syscall_error() {
+        override_syscall(|_| sys_error(EFAULT));
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        assert!(matches!(
+            unsafe { hm.get(&1, 0) },
+            Err(MapError::LookupElementFailed { code: -1, io_error }) if io_error.raw_os_error() == Some(EFAULT)
+        ));
+    }
+
+    #[test]
+    fn test_get_not_found() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_LOOKUP_ELEM,
+                ..
+            } => sys_error(ENOENT),
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        assert!(matches!(unsafe { hm.get(&1, 0) }, Ok(None)));
+    }
+
+    #[test]
+    fn test_pop_not_created() {
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: None,
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(
+            unsafe { hm.pop(&1) },
+            Err(MapError::NotCreated { .. })
+        ));
+    }
+
+    #[test]
+    fn test_pop_syscall_error() {
+        override_syscall(|_| sys_error(EFAULT));
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(
+            unsafe { hm.pop(&1) },
+            Err(MapError::LookupAndDeleteElementFailed { code: -1, io_error }) if io_error.raw_os_error() == Some(EFAULT)
+        ));
+    }
+
+    #[test]
+    fn test_pop_not_found() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+                ..
+            } => sys_error(ENOENT),
+            _ => sys_error(EFAULT),
+        });
+        let mut map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let mut hm = HashMap::<_, u32, u32>::new(&mut map).unwrap();
+
+        assert!(matches!(unsafe { hm.pop(&1) }, Ok(None)));
+    }
+
+    fn bpf_key<T: Copy>(attr: &bpf_attr) -> Option<T> {
+        match unsafe { attr.__bindgen_anon_2.key } as *const T {
+            p if p.is_null() => None,
+            p => Some(unsafe { *p }),
+        }
+    }
+
+    fn set_next_key<T: Copy>(attr: &bpf_attr, next: T) {
+        let key = unsafe { attr.__bindgen_anon_2.__bindgen_anon_1.next_key } as *const T as *mut T;
+        unsafe { *key = next };
+    }
+
+    fn set_ret<T: Copy>(attr: &bpf_attr, ret: T) {
+        let value = unsafe { attr.__bindgen_anon_2.__bindgen_anon_1.value } as *const T as *mut T;
+        unsafe { *value = ret };
+    }
+
+    #[test]
+    fn test_keys_empty() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                ..
+            } => sys_error(ENOENT),
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+        let keys = unsafe { hm.keys() }.collect::<Result<Vec<_>, _>>();
+        assert!(matches!(keys, Ok(ks) if ks.is_empty()))
+    }
+
+    fn get_next_key(attr: &bpf_attr) -> SysResult {
+        match bpf_key(&attr) {
+            None => set_next_key(&attr, 10),
+            Some(10) => set_next_key(&attr, 20),
+            Some(20) => set_next_key(&attr, 30),
+            Some(30) => return sys_error(ENOENT),
+            Some(_) => return sys_error(EFAULT),
+        };
+
+        Ok(1)
+    }
+
+    fn lookup_elem(attr: &bpf_attr) -> SysResult {
+        match bpf_key(&attr) {
+            Some(10) => set_ret(&attr, 100),
+            Some(20) => set_ret(&attr, 200),
+            Some(30) => set_ret(&attr, 300),
+            Some(_) => return sys_error(ENOENT),
+            None => return sys_error(EFAULT),
+        };
+
+        Ok(1)
+    }
+
+    #[test]
+    fn test_keys() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                attr,
+            } => get_next_key(&attr),
+            _ => sys_error(EFAULT),
+        });
+
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        let keys = unsafe { hm.keys() }.collect::<Result<Vec<_>, _>>().unwrap();
+        assert_eq!(&keys, &[10, 20, 30])
+    }
+
+    #[test]
+    fn test_keys_error() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                attr,
+            } => {
+                match bpf_key(&attr) {
+                    None => set_next_key(&attr, 10),
+                    Some(10) => set_next_key(&attr, 20),
+                    Some(_) => return sys_error(EFAULT),
+                };
+
+                Ok(1)
+            }
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        let mut keys = unsafe { hm.keys() };
+        assert!(matches!(keys.next(), Some(Ok(10))));
+        assert!(matches!(keys.next(), Some(Ok(20))));
+        assert!(matches!(keys.next(), Some(Err(MapError::GetNextKeyFailed { .. }))));
+        assert!(matches!(keys.next(), None));
+    }
+
+    #[test]
+    fn test_iter() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                attr,
+            } => get_next_key(&attr),
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_LOOKUP_ELEM,
+                attr,
+            } => lookup_elem(&attr),
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+        let items = unsafe { hm.iter() }.collect::<Result<Vec<_>, _>>().unwrap();
+        assert_eq!(&items, &[(10, 100), (20, 200), (30, 300)])
+    }
+
+    #[test]
+    fn test_iter_key_deleted() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                attr,
+            } => get_next_key(&attr),
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_LOOKUP_ELEM,
+                attr,
+            } => {
+                match bpf_key(&attr) {
+                    Some(10) => set_ret(&attr, 100),
+                    Some(20) => return sys_error(ENOENT),
+                    Some(30) => set_ret(&attr, 300),
+                    Some(_) => return sys_error(ENOENT),
+                    None => return sys_error(EFAULT),
+                };
+
+                Ok(1)
+            }
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        let items = unsafe { hm.iter() }.collect::<Result<Vec<_>, _>>().unwrap();
+        assert_eq!(&items, &[(10, 100), (30, 300)])
+    }
+
+    #[test]
+    fn test_iter_key_error() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                attr,
+            } => {
+                match bpf_key(&attr) {
+                    None => set_next_key(&attr, 10),
+                    Some(10) => set_next_key(&attr, 20),
+                    Some(20) => return sys_error(EFAULT),
+                    Some(30) => return sys_error(ENOENT),
+                    Some(_) => panic!(),
+                };
+
+                Ok(1)
+            }
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_LOOKUP_ELEM,
+                attr,
+            } => lookup_elem(&attr),
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        let mut iter = unsafe { hm.iter() };
+        assert!(matches!(iter.next(), Some(Ok((10, 100)))));
+        assert!(matches!(iter.next(), Some(Ok((20, 200)))));
+        assert!(matches!(iter.next(), Some(Err(MapError::GetNextKeyFailed { .. }))));
+        assert!(matches!(iter.next(), None));
+    }
+
+    #[test]
+    fn test_iter_value_error() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_GET_NEXT_KEY,
+                attr,
+            } => get_next_key(&attr),
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_LOOKUP_ELEM,
+                attr,
+            } => {
+                match bpf_key(&attr) {
+                    Some(10) => set_ret(&attr, 100),
+                    Some(20) => return sys_error(EFAULT),
+                    Some(30) => set_ret(&attr, 300),
+                    Some(_) => return sys_error(ENOENT),
+                    None => return sys_error(EFAULT),
+                };
+
+                Ok(1)
+            }
+            _ => sys_error(EFAULT),
+        });
+        let map = Map {
+            obj: new_obj_map("TEST"),
+            fd: Some(42),
+        };
+        let hm = HashMap::<_, u32, u32>::new(&map).unwrap();
+
+        let mut iter = unsafe { hm.iter() };
+        assert!(matches!(iter.next(), Some(Ok((10, 100)))));
+        assert!(matches!(iter.next(), Some(Err(MapError::LookupElementFailed { .. }))));
+        assert!(matches!(iter.next(), Some(Ok((30, 300)))));
+        assert!(matches!(iter.next(), None));
+    }
+}

+ 164 - 0
src/maps/mod.rs

@@ -0,0 +1,164 @@
+use std::{ffi::CString, io};
+use thiserror::Error;
+
+use crate::{obj, syscalls::bpf_create_map, RawFd};
+
+mod hash_map;
+pub use hash_map::*;
+
+mod perf_map;
+pub use perf_map::*;
+
+#[derive(Error, Debug)]
+pub enum MapError {
+    #[error("invalid map type {map_type}")]
+    InvalidMapType { map_type: u32 },
+
+    #[error("invalid map name `{name}`")]
+    InvalidName { name: String },
+
+    #[error("the map `{name}` has not been created")]
+    NotCreated { name: String },
+
+    #[error("the map `{name}` has already been created")]
+    AlreadyCreated { name: String },
+
+    #[error("failed to create map `{name}`: {code}")]
+    CreateFailed {
+        name: String,
+        code: i64,
+        io_error: io::Error,
+    },
+
+    #[error("invalid key size {size}, expected {expected}")]
+    InvalidKeySize { size: usize, expected: usize },
+
+    #[error("invalid value size {size}, expected {expected}")]
+    InvalidValueSize { size: usize, expected: usize },
+
+    #[error("the BPF_MAP_UPDATE_ELEM syscall failed with code {code} io_error {io_error}")]
+    UpdateElementFailed { code: i64, io_error: io::Error },
+
+    #[error("the BPF_MAP_LOOKUP_ELEM syscall failed with code {code} io_error {io_error}")]
+    LookupElementFailed { code: i64, io_error: io::Error },
+
+    #[error("the BPF_MAP_DELETE_ELEM syscall failed with code {code} io_error {io_error}")]
+    DeleteElementFailed { code: i64, io_error: io::Error },
+
+    #[error(
+        "the BPF_MAP_LOOKUP_AND_DELETE_ELEM syscall failed with code {code} io_error {io_error}"
+    )]
+    LookupAndDeleteElementFailed { code: i64, io_error: io::Error },
+
+    #[error("the BPF_MAP_GET_NEXT_KEY syscall failed with code {code} io_error {io_error}")]
+    GetNextKeyFailed { code: i64, io_error: io::Error },
+}
+
+#[derive(Debug)]
+pub struct Map {
+    pub(crate) obj: obj::Map,
+    pub(crate) fd: Option<RawFd>,
+}
+
+impl Map {
+    pub fn create(&mut self) -> Result<RawFd, MapError> {
+        let name = self.obj.name.clone();
+        if self.fd.is_some() {
+            return Err(MapError::AlreadyCreated { name: name.clone() });
+        }
+
+        let c_name =
+            CString::new(name.clone()).map_err(|_| MapError::InvalidName { name: name.clone() })?;
+
+        let fd = bpf_create_map(&c_name, &self.obj.def).map_err(|(code, io_error)| {
+            MapError::CreateFailed {
+                name,
+                code,
+                io_error,
+            }
+        })? as RawFd;
+
+        self.fd = Some(fd);
+
+        Ok(fd)
+    }
+
+    pub(crate) fn fd_or_err(&self) -> Result<RawFd, MapError> {
+        self.fd.ok_or_else(|| MapError::NotCreated {
+            name: self.obj.name.clone(),
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use libc::EFAULT;
+
+    use crate::{
+        bpf_map_def,
+        generated::{bpf_cmd, bpf_map_type::BPF_MAP_TYPE_HASH},
+        syscalls::{override_syscall, Syscall},
+    };
+
+    use super::*;
+
+    fn new_obj_map(name: &str) -> obj::Map {
+        obj::Map {
+            name: name.to_string(),
+            def: bpf_map_def {
+                map_type: BPF_MAP_TYPE_HASH,
+                key_size: 4,
+                value_size: 4,
+                max_entries: 1024,
+                map_flags: 0,
+            },
+            section_index: 0,
+            data: Vec::new(),
+        }
+    }
+
+    fn new_map(name: &str) -> Map {
+        Map {
+            obj: new_obj_map(name),
+            fd: None,
+        }
+    }
+
+    #[test]
+    fn test_create() {
+        override_syscall(|call| match call {
+            Syscall::Bpf {
+                cmd: bpf_cmd::BPF_MAP_CREATE,
+                ..
+            } => Ok(42),
+            _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
+        });
+
+        let mut map = new_map("foo");
+        assert!(matches!(map.create(), Ok(42)));
+        assert_eq!(map.fd, Some(42));
+        assert!(matches!(map.create(), Err(MapError::AlreadyCreated { .. })));
+    }
+
+    #[test]
+    fn test_create_failed() {
+        override_syscall(|_| {
+            return Err((-42, io::Error::from_raw_os_error(EFAULT)));
+        });
+
+        let mut map = new_map("foo");
+        let ret = map.create();
+        assert!(matches!(ret, Err(MapError::CreateFailed { .. })));
+        if let Err(MapError::CreateFailed {
+            name,
+            code,
+            io_error,
+        }) = ret
+        {
+            assert_eq!(name, "foo");
+            assert_eq!(code, -42);
+            assert_eq!(io_error.raw_os_error(), Some(EFAULT));
+        }
+        assert_eq!(map.fd, None);
+    }
+}

+ 700 - 0
src/maps/perf_map.rs

@@ -0,0 +1,700 @@
+use std::{
+    convert::TryFrom,
+    ffi::c_void,
+    fs, io, mem, ptr, slice,
+    str::FromStr,
+    sync::atomic::{self, AtomicPtr, Ordering},
+};
+
+use bytes::BytesMut;
+use libc::{
+    c_int, close, munmap, sysconf, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE, _SC_PAGESIZE,
+};
+use thiserror::Error;
+
+use crate::{
+    generated::{
+        bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_header, perf_event_mmap_page,
+        perf_event_type::*,
+    },
+    maps::{Map, MapError},
+    syscalls::{bpf_map_update_elem, perf_event_ioctl, perf_event_open},
+    RawFd, PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE,
+};
+
+const ONLINE_CPUS: &str = "/sys/devices/system/cpu/online";
+
+#[derive(Error, Debug)]
+pub enum PerfBufferError {
+    #[error("invalid page count {page_count}, the value must be a power of two")]
+    InvalidPageCount { page_count: usize },
+
+    #[error("perf_event_open failed: {io_error}")]
+    OpenFailed {
+        #[source]
+        io_error: io::Error,
+    },
+
+    #[error("mmap failed: {io_error}")]
+    MMapFailed {
+        #[source]
+        io_error: io::Error,
+    },
+
+    #[error("PERF_EVENT_IOC_ENABLE failed: {io_error}")]
+    PerfEventEnableFailed {
+        #[source]
+        io_error: io::Error,
+    },
+
+    #[error("read_events() was called with no output buffers")]
+    NoBuffers,
+
+    #[error("the buffer needs to be of at least {size} bytes")]
+    MoreSpaceNeeded { size: usize },
+}
+
+#[derive(Debug, PartialEq)]
+pub struct Events {
+    pub read: usize,
+    pub lost: usize,
+}
+
+struct PerfBuffer {
+    buf: AtomicPtr<perf_event_mmap_page>,
+    size: usize,
+    page_size: usize,
+    fd: RawFd,
+}
+
+impl PerfBuffer {
+    fn open(
+        cpu_id: u32,
+        page_size: usize,
+        page_count: usize,
+    ) -> Result<PerfBuffer, PerfBufferError> {
+        if !page_count.is_power_of_two() {
+            return Err(PerfBufferError::InvalidPageCount { page_count });
+        }
+
+        let fd = perf_event_open(cpu_id as i32)
+            .map_err(|(_, io_error)| PerfBufferError::OpenFailed { io_error })?
+            as RawFd;
+        let size = page_size * page_count;
+        let buf = unsafe {
+            mmap(
+                ptr::null_mut(),
+                size + page_size,
+                PROT_READ | PROT_WRITE,
+                MAP_SHARED,
+                fd,
+                0,
+            )
+        };
+        if buf == MAP_FAILED {
+            return Err(PerfBufferError::MMapFailed {
+                io_error: io::Error::last_os_error(),
+            });
+        }
+
+        let perf_buf = PerfBuffer {
+            buf: AtomicPtr::new(buf as *mut perf_event_mmap_page),
+            fd,
+            size,
+            page_size,
+        };
+
+        perf_event_ioctl(fd, PERF_EVENT_IOC_ENABLE, 0)
+            .map_err(|(_, io_error)| PerfBufferError::PerfEventEnableFailed { io_error })?;
+
+        Ok(perf_buf)
+    }
+
+    pub fn read_events(&mut self, buffers: &mut [BytesMut]) -> Result<Events, PerfBufferError> {
+        if buffers.is_empty() {
+            return Err(PerfBufferError::NoBuffers);
+        }
+        let header = self.buf.load(Ordering::SeqCst);
+        let base = header as usize + self.page_size;
+
+        let mut events = Events { read: 0, lost: 0 };
+        let mut buf_n = 0;
+
+        let fill_buf = |start_off, base, mmap_size, out_buf: &mut [u8]| {
+            let len = out_buf.len();
+
+            let end = (start_off + len) % mmap_size;
+            let start = start_off % mmap_size;
+
+            if start < end {
+                out_buf.copy_from_slice(unsafe {
+                    slice::from_raw_parts((base + start) as *const u8, len)
+                });
+            } else {
+                let size = mmap_size - start;
+                unsafe {
+                    out_buf[..size]
+                        .copy_from_slice(slice::from_raw_parts((base + start) as *const u8, size));
+                    out_buf[size..]
+                        .copy_from_slice(slice::from_raw_parts(base as *const u8, len - size));
+                }
+            }
+        };
+
+        let read_event = |event_start, event_type, base, buf: &mut BytesMut| {
+            let sample_size = match event_type {
+                PERF_RECORD_SAMPLE | PERF_RECORD_LOST => {
+                    let mut size = [0u8; mem::size_of::<u32>()];
+                    fill_buf(
+                        event_start + mem::size_of::<perf_event_header>(),
+                        base,
+                        self.size,
+                        &mut size,
+                    );
+                    u32::from_ne_bytes(size)
+                }
+                _ => return Ok(None),
+            } as usize;
+
+            let sample_start =
+                (event_start + mem::size_of::<perf_event_header>() + mem::size_of::<u32>())
+                    % self.size;
+
+            match event_type {
+                PERF_RECORD_SAMPLE => {
+                    buf.clear();
+                    if sample_size > buf.capacity() {
+                        return Err(PerfBufferError::MoreSpaceNeeded { size: sample_size });
+                    }
+
+                    unsafe { buf.set_len(sample_size) };
+
+                    fill_buf(sample_start, base, self.size, buf);
+
+                    Ok(Some((1, 0)))
+                }
+                PERF_RECORD_LOST => {
+                    let mut count = [0u8; mem::size_of::<u64>()];
+                    fill_buf(
+                        event_start + mem::size_of::<perf_event_header>() + mem::size_of::<u64>(),
+                        base,
+                        self.size,
+                        &mut count,
+                    );
+                    Ok(Some((0, u64::from_ne_bytes(count) as usize)))
+                }
+                _ => Ok(None),
+            }
+        };
+
+        let head = unsafe { (*header).data_head } as usize;
+        let mut tail = unsafe { (*header).data_tail } as usize;
+        while head != tail {
+            if buf_n == buffers.len() {
+                break;
+            }
+
+            let buf = &mut buffers[buf_n];
+
+            let event_start = tail % self.size;
+            let event =
+                unsafe { ptr::read_unaligned((base + event_start) as *const perf_event_header) };
+            let event_size = event.size as usize;
+
+            match read_event(event_start, event.type_, base, buf) {
+                Ok(Some((read, lost))) => {
+                    if read > 0 {
+                        buf_n += 1;
+                        events.read += read;
+                    }
+                    events.lost += lost;
+                }
+                Ok(None) => { /* skip unknown event type */ }
+                Err(PerfBufferError::MoreSpaceNeeded { .. }) if events.read > 0 => {
+                    // we have processed some events so we're going to return those. In the
+                    // next read_events() we'll return an error unless the caller increases the
+                    // buffer size
+                    break;
+                }
+                Err(e) => {
+                    // we got an error and we didn't process any events, propagate the error
+                    // and give the caller a chance to increase buffers
+                    atomic::fence(Ordering::SeqCst);
+                    unsafe { (*header).data_tail = tail as u64 };
+                    return Err(e);
+                }
+            }
+            tail += event_size;
+        }
+
+        atomic::fence(Ordering::SeqCst);
+        unsafe { (*header).data_tail = tail as u64 };
+
+        return Ok(events);
+    }
+}
+
+impl Drop for PerfBuffer {
+    fn drop(&mut self) {
+        unsafe {
+            let _ = perf_event_ioctl(self.fd, PERF_EVENT_IOC_DISABLE, 0);
+            munmap(
+                self.buf.load(Ordering::SeqCst) as *mut c_void,
+                self.size + self.page_size,
+            );
+            close(self.fd);
+        }
+    }
+}
+
+#[derive(Error, Debug)]
+pub enum PerfMapError {
+    #[error("error parsing /sys/devices/system/cpu/online")]
+    InvalidOnlineCpuFile,
+
+    #[error("no CPUs specified")]
+    NoCpus,
+
+    #[error("invalid cpu {cpu_id}")]
+    InvalidCpu { cpu_id: u32 },
+
+    #[error("map error: {map_error}")]
+    MapError {
+        #[from]
+        map_error: MapError,
+    },
+
+    #[error("perf buffer error: {buf_error}")]
+    PerfBufferError {
+        #[from]
+        buf_error: PerfBufferError,
+    },
+
+    #[error("bpf_map_update_elem failed: {io_error}")]
+    UpdateElementFailed {
+        #[source]
+        io_error: io::Error,
+    },
+}
+
+pub struct PerfMap<'map> {
+    map: &'map Map,
+    cpu_fds: Vec<(u32, RawFd)>,
+    buffers: Vec<Option<PerfBuffer>>,
+}
+
+impl<'map> PerfMap<'map> {
+    pub fn new(
+        map: &'map Map,
+        cpu_ids: Option<Vec<u32>>,
+        page_count: Option<usize>,
+    ) -> Result<PerfMap<'map>, PerfMapError> {
+        let map_type = map.obj.def.map_type;
+        if map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY {
+            return Err(MapError::InvalidMapType {
+                map_type: map_type as u32,
+            })?;
+        }
+
+        let mut cpu_ids = match cpu_ids {
+            Some(ids) => ids,
+            None => online_cpus().map_err(|_| PerfMapError::InvalidOnlineCpuFile)?,
+        };
+        if cpu_ids.is_empty() {
+            return Err(PerfMapError::NoCpus);
+        }
+        cpu_ids.sort();
+        let min_cpu = cpu_ids.first().unwrap();
+        let max_cpu = cpu_ids.last().unwrap();
+        let mut buffers = (*min_cpu..=*max_cpu).map(|_| None).collect::<Vec<_>>();
+
+        let map_fd = map.fd_or_err()?;
+        let page_size = unsafe { sysconf(_SC_PAGESIZE) } as usize;
+
+        let mut cpu_fds = Vec::new();
+        for cpu_id in &cpu_ids {
+            let buf = PerfBuffer::open(*cpu_id, page_size, page_count.unwrap_or(2))?;
+            bpf_map_update_elem(map_fd, cpu_id, &buf.fd, 0)
+                .map_err(|(_, io_error)| PerfMapError::UpdateElementFailed { io_error })?;
+            cpu_fds.push((*cpu_id, buf.fd));
+            buffers[*cpu_id as usize] = Some(buf);
+        }
+
+        Ok(PerfMap {
+            map,
+            cpu_fds,
+            buffers,
+        })
+    }
+
+    pub fn cpu_file_descriptors(&self) -> &[(u32, RawFd)] {
+        self.cpu_fds.as_slice()
+    }
+
+    pub fn read_cpu_events(
+        &mut self,
+        cpu_id: u32,
+        buffers: &mut [BytesMut],
+    ) -> Result<Events, PerfMapError> {
+        let buf = match self.buffers.get_mut(cpu_id as usize) {
+            None | Some(None) => return Err(PerfMapError::InvalidCpu { cpu_id }),
+            Some(Some(buf)) => buf,
+        };
+
+        Ok(buf.read_events(buffers)?)
+    }
+}
+
+impl<'inner> TryFrom<&'inner Map> for PerfMap<'inner> {
+    type Error = PerfMapError;
+
+    fn try_from(inner: &'inner Map) -> Result<PerfMap<'inner>, PerfMapError> {
+        PerfMap::new(inner, None, None)
+    }
+}
+
+fn online_cpus() -> Result<Vec<u32>, ()> {
+    let data = fs::read_to_string(ONLINE_CPUS).map_err(|_| ())?;
+    parse_online_cpus(data.trim())
+}
+
+fn parse_online_cpus(data: &str) -> Result<Vec<u32>, ()> {
+    let mut cpus = Vec::new();
+    for range in data.split(',') {
+        cpus.extend({
+            match range
+                .splitn(2, '-')
+                .map(u32::from_str)
+                .collect::<Result<Vec<_>, _>>()
+                .map_err(|_| ())?
+                .as_slice()
+            {
+                &[] | &[_, _, _, ..] => return Err(()),
+                &[start] => start..=start,
+                &[start, end] => start..=end,
+            }
+        })
+    }
+
+    Ok(cpus)
+}
+
+#[cfg_attr(test, allow(unused_variables))]
+unsafe fn mmap(
+    addr: *mut c_void,
+    len: usize,
+    prot: c_int,
+    flags: c_int,
+    fd: i32,
+    offset: i64,
+) -> *mut c_void {
+    #[cfg(not(test))]
+    return libc::mmap(addr, len, prot, flags, fd, offset);
+
+    #[cfg(test)]
+    use crate::syscalls::TEST_MMAP_RET;
+
+    #[cfg(test)]
+    TEST_MMAP_RET.with(|ret| *ret.borrow())
+}
+
+#[derive(Debug)]
+#[repr(C)]
+pub struct Sample {
+    header: perf_event_header,
+    pub size: u32,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct LostSamples {
+    header: perf_event_header,
+    pub id: u64,
+    pub count: u64,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::{
+        generated::perf_event_mmap_page,
+        syscalls::{override_syscall, Syscall, TEST_MMAP_RET},
+    };
+
+    use std::{convert::TryInto, fmt::Debug, iter::FromIterator, mem};
+
+    #[test]
+    fn test_parse_online_cpus() {
+        assert_eq!(parse_online_cpus("0").unwrap(), vec![0]);
+        assert_eq!(parse_online_cpus("0,1").unwrap(), vec![0, 1]);
+        assert_eq!(parse_online_cpus("0,1,2").unwrap(), vec![0, 1, 2]);
+        assert_eq!(parse_online_cpus("0-7").unwrap(), Vec::from_iter(0..=7));
+        assert_eq!(parse_online_cpus("0-3,4-7").unwrap(), Vec::from_iter(0..=7));
+        assert_eq!(parse_online_cpus("0-5,6,7").unwrap(), Vec::from_iter(0..=7));
+        assert!(parse_online_cpus("").is_err());
+        assert!(parse_online_cpus("0-1,2-").is_err());
+        assert!(parse_online_cpus("foo").is_err());
+    }
+
+    const PAGE_SIZE: usize = 4096;
+    union MMappedBuf {
+        mmap_page: perf_event_mmap_page,
+        data: [u8; PAGE_SIZE * 2],
+    }
+
+    fn fake_mmap(buf: &mut MMappedBuf) {
+        override_syscall(|call| match call {
+            Syscall::PerfEventOpen { .. } | Syscall::PerfEventIoctl { .. } => Ok(42),
+            _ => panic!(),
+        });
+        TEST_MMAP_RET.with(|ret| *ret.borrow_mut() = buf as *const _ as *mut _);
+    }
+
+    #[test]
+    fn test_invalid_page_count() {
+        assert!(matches!(
+            PerfBuffer::open(1, PAGE_SIZE, 0),
+            Err(PerfBufferError::InvalidPageCount { .. })
+        ));
+        assert!(matches!(
+            PerfBuffer::open(1, PAGE_SIZE, 3),
+            Err(PerfBufferError::InvalidPageCount { .. })
+        ));
+        assert!(matches!(
+            PerfBuffer::open(1, PAGE_SIZE, 5),
+            Err(PerfBufferError::InvalidPageCount { .. })
+        ));
+    }
+
+    #[test]
+    fn test_no_out_bufs() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+        assert!(matches!(
+            buf.read_events(&mut []),
+            Err(PerfBufferError::NoBuffers)
+        ))
+    }
+
+    #[test]
+    fn test_no_events() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+        let out_buf = BytesMut::with_capacity(4);
+        assert_eq!(
+            buf.read_events(&mut [out_buf]).unwrap(),
+            Events { read: 0, lost: 0 }
+        );
+    }
+
+    #[test]
+    fn test_read_first_lost() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+
+        let evt = LostSamples {
+            header: perf_event_header {
+                type_: PERF_RECORD_LOST,
+                misc: 0,
+                size: mem::size_of::<LostSamples>() as u16,
+            },
+            id: 1,
+            count: 0xCAFEBABE,
+        };
+        write(&mut mmapped_buf, 0, evt);
+
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+        let out_buf = BytesMut::with_capacity(0);
+        let events = buf.read_events(&mut [out_buf]).unwrap();
+        assert_eq!(events.lost, 0xCAFEBABE);
+    }
+
+    #[repr(C)]
+    #[derive(Debug)]
+    struct PerfSample<T: Debug> {
+        s_hdr: Sample,
+        value: T,
+    }
+
+    fn write<T: Debug>(mmapped_buf: &mut MMappedBuf, offset: usize, value: T) -> usize {
+        let dst = (mmapped_buf as *const _ as usize + PAGE_SIZE + offset) as *const PerfSample<T>
+            as *mut T;
+        unsafe {
+            ptr::write_unaligned(dst, value);
+            mmapped_buf.mmap_page.data_head = (offset + mem::size_of::<T>()) as u64;
+            mmapped_buf.mmap_page.data_head as usize
+        }
+    }
+
+    fn write_sample<T: Debug>(mmapped_buf: &mut MMappedBuf, offset: usize, value: T) -> usize {
+        let sample = PerfSample {
+            s_hdr: Sample {
+                header: perf_event_header {
+                    type_: PERF_RECORD_SAMPLE,
+                    misc: 0,
+                    size: mem::size_of::<PerfSample<T>>() as u16,
+                },
+                size: mem::size_of::<T>() as u32,
+            },
+            value,
+        };
+        write(mmapped_buf, offset, sample)
+    }
+
+    fn u32_from_buf(buf: &[u8]) -> u32 {
+        u32::from_ne_bytes(buf[..4].try_into().unwrap())
+    }
+
+    fn u64_from_buf(buf: &[u8]) -> u64 {
+        u64::from_ne_bytes(buf[..8].try_into().unwrap())
+    }
+
+    #[test]
+    fn test_read_first_sample() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        write_sample(&mut mmapped_buf, 0, 0xCAFEBABEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(4)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+    }
+
+    #[test]
+    fn test_read_many_with_many_reads() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let next = write_sample(&mut mmapped_buf, 0, 0xCAFEBABEu32);
+        write_sample(&mut mmapped_buf, next, 0xBADCAFEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(4)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xBADCAFE);
+    }
+
+    #[test]
+    fn test_read_many_with_one_read() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let next = write_sample(&mut mmapped_buf, 0, 0xCAFEBABEu32);
+        write_sample(&mut mmapped_buf, next, 0xBADCAFEu32);
+
+        let mut out_bufs = (0..3)
+            .map(|_| BytesMut::with_capacity(4))
+            .collect::<Vec<_>>();
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 2 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+        assert_eq!(u32_from_buf(&out_bufs[1]), 0xBADCAFE);
+    }
+
+    #[test]
+    fn test_read_last_sample() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let offset = PAGE_SIZE - mem::size_of::<PerfSample<u32>>();
+        mmapped_buf.mmap_page.data_tail = offset as u64;
+        write_sample(&mut mmapped_buf, offset, 0xCAFEBABEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(4)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xCAFEBABE);
+    }
+
+    #[test]
+    fn test_read_wrapping_sample_size() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let header = perf_event_header {
+            type_: PERF_RECORD_SAMPLE,
+            misc: 0,
+            size: mem::size_of::<PerfSample<u64>>() as u16,
+        };
+
+        let offset = PAGE_SIZE - mem::size_of::<perf_event_header>() - 2;
+        mmapped_buf.mmap_page.data_tail = offset as u64;
+        write(&mut mmapped_buf, offset, header);
+        write(&mut mmapped_buf, PAGE_SIZE - 2, 0x0004u16);
+        write(&mut mmapped_buf, 0, 0x0000u16);
+        write(&mut mmapped_buf, 2, 0xBAADCAFEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(8)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u32_from_buf(&out_bufs[0]), 0xBAADCAFE);
+    }
+
+    #[test]
+    fn test_read_wrapping_value() {
+        let mut mmapped_buf = MMappedBuf {
+            data: [0; PAGE_SIZE * 2],
+        };
+        fake_mmap(&mut mmapped_buf);
+        let mut buf = PerfBuffer::open(1, PAGE_SIZE, 1).unwrap();
+
+        let sample = PerfSample {
+            s_hdr: Sample {
+                header: perf_event_header {
+                    type_: PERF_RECORD_SAMPLE,
+                    misc: 0,
+                    size: mem::size_of::<PerfSample<u64>>() as u16,
+                },
+                size: mem::size_of::<u64>() as u32,
+            },
+            value: 0xCAFEBABEu32,
+        };
+
+        let offset = PAGE_SIZE - mem::size_of::<PerfSample<u32>>();
+        mmapped_buf.mmap_page.data_tail = offset as u64;
+        write(&mut mmapped_buf, offset, sample);
+        write(&mut mmapped_buf, 0, 0xBAADCAFEu32);
+
+        let mut out_bufs = [BytesMut::with_capacity(8)];
+
+        let events = buf.read_events(&mut out_bufs).unwrap();
+        assert_eq!(events, Events { lost: 0, read: 1 });
+        assert_eq!(u64_from_buf(&out_bufs[0]), 0xBAADCAFECAFEBABE);
+    }
+}

+ 414 - 0
src/obj/mod.rs

@@ -0,0 +1,414 @@
+mod relocation;
+
+use object::{
+    pod,
+    read::{Object as ElfObject, ObjectSection, Section},
+    Endianness, ObjectSymbol, ObjectSymbolTable, SectionIndex, SymbolIndex,
+};
+use std::{
+    collections::HashMap,
+    convert::{TryFrom, TryInto},
+    ffi::{CStr, CString},
+    mem,
+    str::FromStr,
+};
+use thiserror::Error;
+
+pub use self::relocation::{relocate, RelocationError};
+
+use crate::{
+    bpf_map_def,
+    generated::{bpf_insn, bpf_map_type::BPF_MAP_TYPE_ARRAY},
+    obj::relocation::{Relocation, Symbol},
+};
+
+const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE;
+
+#[derive(Debug, Clone)]
+pub struct Object {
+    pub(crate) endianness: Endianness,
+    pub license: CString,
+    pub kernel_version: KernelVersion,
+    pub(crate) maps: HashMap<String, Map>,
+    pub(crate) programs: HashMap<String, Program>,
+    pub(crate) relocations: HashMap<SectionIndex, Vec<Relocation>>,
+    pub(crate) symbol_table: HashMap<SymbolIndex, Symbol>,
+}
+
+#[derive(Debug, Clone)]
+pub struct Map {
+    pub(crate) name: String,
+    pub(crate) def: bpf_map_def,
+    pub(crate) section_index: usize,
+    pub(crate) data: Vec<u8>,
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct Program {
+    pub(crate) license: CString,
+    pub(crate) kernel_version: KernelVersion,
+    pub(crate) instructions: Vec<bpf_insn>,
+    pub(crate) kind: ProgramKind,
+    pub(crate) section_index: SectionIndex,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub enum ProgramKind {
+    KProbe,
+    UProbe,
+    Xdp,
+    TracePoint,
+}
+
+impl FromStr for ProgramKind {
+    type Err = ParseError;
+
+    fn from_str(kind: &str) -> Result<ProgramKind, ParseError> {
+        use ProgramKind::*;
+        Ok(match kind {
+            "kprobe" => KProbe,
+            "uprobe" => UProbe,
+            "xdp" => Xdp,
+            "trace_point" => TracePoint,
+            _ => {
+                return Err(ParseError::InvalidProgramKind {
+                    kind: kind.to_string(),
+                })
+            }
+        })
+    }
+}
+
+impl Object {
+    pub(crate) fn parse(data: &[u8]) -> Result<Object, ParseError> {
+        let obj = object::read::File::parse(data).map_err(|source| ParseError::Error { source })?;
+        let endianness = obj.endianness();
+
+        let section = obj
+            .section_by_name("license")
+            .ok_or(ParseError::MissingLicense)?;
+        let license = parse_license(BPFSection::try_from(&section)?.data)?;
+
+        let section = obj
+            .section_by_name("version")
+            .ok_or(ParseError::MissingKernelVersion)?;
+        let kernel_version = parse_version(BPFSection::try_from(&section)?.data, endianness)?;
+
+        let mut bpf_obj = Object {
+            endianness: endianness.into(),
+            license,
+            kernel_version,
+            maps: HashMap::new(),
+            programs: HashMap::new(),
+            relocations: HashMap::new(),
+            symbol_table: HashMap::new(),
+        };
+
+        for s in obj.sections() {
+            parse_section(&mut bpf_obj, BPFSection::try_from(&s)?)?;
+        }
+
+        if let Some(symbol_table) = obj.symbol_table() {
+            for symbol in symbol_table.symbols() {
+                bpf_obj.symbol_table.insert(
+                    symbol.index(),
+                    Symbol {
+                        name: symbol.name().ok().map(String::from),
+                        section_index: symbol.section().index(),
+                        address: symbol.address(),
+                    },
+                );
+            }
+        }
+
+        return Ok(bpf_obj);
+    }
+}
+
+#[derive(Debug, Clone, Error)]
+pub enum ParseError {
+    #[error("error parsing ELF data")]
+    Error {
+        #[source]
+        source: object::read::Error,
+    },
+
+    #[error("no license specified")]
+    MissingLicense,
+
+    #[error("invalid license `{data:?}`: missing NULL terminator")]
+    MissingLicenseNullTerminator { data: Vec<u8> },
+
+    #[error("invalid license `{data:?}`")]
+    InvalidLicense { data: Vec<u8> },
+
+    #[error("missing kernel version")]
+    MissingKernelVersion,
+
+    #[error("invalid kernel version `{data:?}`")]
+    InvalidKernelVersion { data: Vec<u8> },
+
+    #[error("error parsing section with index {index}")]
+    SectionError {
+        index: usize,
+        #[source]
+        source: object::read::Error,
+    },
+
+    #[error("unsupported relocation")]
+    UnsupportedRelocationKind,
+
+    #[error("invalid program kind `{kind}`")]
+    InvalidProgramKind { kind: String },
+
+    #[error("error parsing program `{name}`")]
+    InvalidProgramCode { name: String },
+
+    #[error("error parsing map `{name}`")]
+    InvalidMapDefinition { name: String },
+}
+
+struct BPFSection<'s> {
+    index: SectionIndex,
+    name: &'s str,
+    data: &'s [u8],
+    relocations: Vec<Relocation>,
+}
+
+impl<'data, 'file, 's> TryFrom<&'s Section<'data, 'file>> for BPFSection<'s> {
+    type Error = ParseError;
+
+    fn try_from(section: &'s Section) -> Result<BPFSection<'s>, ParseError> {
+        let index = section.index();
+        let map_err = |source| ParseError::SectionError {
+            index: index.0,
+            source,
+        };
+        Ok(BPFSection {
+            index,
+            name: section.name().map_err(map_err)?,
+            data: section.data().map_err(map_err)?,
+            relocations: section
+                .relocations()
+                .map(|(offset, r)| {
+                    Ok(Relocation {
+                        kind: r.kind(),
+                        target: r.target(),
+                        addend: r.addend(),
+                        offset,
+                    })
+                })
+                .collect::<Result<Vec<_>, _>>()?,
+        })
+    }
+}
+
+fn parse_license(data: &[u8]) -> Result<CString, ParseError> {
+    if data.len() < 2 {
+        return Err(ParseError::InvalidLicense {
+            data: data.to_vec(),
+        });
+    }
+    if data[data.len() - 1] != 0 {
+        return Err(ParseError::MissingLicenseNullTerminator {
+            data: data.to_vec(),
+        });
+    }
+
+    Ok(CStr::from_bytes_with_nul(data)
+        .map_err(|_| ParseError::InvalidLicense {
+            data: data.to_vec(),
+        })?
+        .to_owned())
+}
+
+fn parse_version(data: &[u8], endianness: object::Endianness) -> Result<KernelVersion, ParseError> {
+    let data = match data.len() {
+        4 => data.try_into().unwrap(),
+        _ => {
+            return Err(ParseError::InvalidKernelVersion {
+                data: data.to_vec(),
+            })
+        }
+    };
+
+    let v = match endianness {
+        object::Endianness::Big => u32::from_be_bytes(data),
+        object::Endianness::Little => u32::from_le_bytes(data),
+    };
+
+    Ok(match v {
+        KERNEL_VERSION_ANY => KernelVersion::Any,
+        v => KernelVersion::Version(v),
+    })
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum KernelVersion {
+    Version(u32),
+    Any,
+}
+
+impl From<KernelVersion> for u32 {
+    fn from(version: KernelVersion) -> u32 {
+        match version {
+            KernelVersion::Any => KERNEL_VERSION_ANY,
+            KernelVersion::Version(v) => v,
+        }
+    }
+}
+
+fn parse_map(section: &BPFSection, name: &str) -> Result<Map, ParseError> {
+    let (def, data) = if name == ".bss" || name.starts_with(".data") || name.starts_with(".rodata")
+    {
+        let def = bpf_map_def {
+            map_type: BPF_MAP_TYPE_ARRAY,
+            key_size: mem::size_of::<u32>() as u32,
+            value_size: section.data.len() as u32,
+            max_entries: 1,
+            map_flags: 0, /* FIXME: set rodata readonly */
+        };
+        (def, section.data.to_vec())
+    } else {
+        (parse_map_def(name, section.data)?, Vec::new())
+    };
+
+    Ok(Map {
+        section_index: section.index.0,
+        name: name.to_string(),
+        def,
+        data,
+    })
+}
+
+fn parse_map_def(name: &str, data: &[u8]) -> Result<bpf_map_def, ParseError> {
+    let (def, rest) =
+        pod::from_bytes::<bpf_map_def>(data).map_err(|_| ParseError::InvalidMapDefinition {
+            name: name.to_string(),
+        })?;
+    if !rest.is_empty() {
+        return Err(ParseError::InvalidMapDefinition {
+            name: name.to_string(),
+        });
+    }
+
+    Ok(*def)
+}
+
+fn parse_program(bpf: &Object, section: &BPFSection, ty: &str) -> Result<Program, ParseError> {
+    let (code, rest) = pod::slice_from_bytes::<bpf_insn>(
+        section.data,
+        section.data.len() / mem::size_of::<bpf_insn>(),
+    )
+    .map_err(|_| ParseError::InvalidProgramCode {
+        name: section.name.to_string(),
+    })?;
+
+    if !rest.is_empty() {
+        return Err(ParseError::InvalidProgramCode {
+            name: section.name.to_string(),
+        });
+    }
+
+    Ok(Program {
+        section_index: section.index,
+        license: bpf.license.clone(),
+        kernel_version: bpf.kernel_version,
+        instructions: code.to_vec(),
+        kind: ProgramKind::from_str(ty)?,
+    })
+}
+
+fn parse_section(bpf: &mut Object, section: BPFSection) -> Result<(), ParseError> {
+    let parts = section.name.split("/").collect::<Vec<_>>();
+
+    match parts.as_slice() {
+        &[name] if name == ".bss" || name.starts_with(".data") || name.starts_with(".rodata") => {
+            bpf.maps
+                .insert(name.to_string(), parse_map(&section, name)?);
+        }
+        &["maps", name] => {
+            bpf.maps
+                .insert(name.to_string(), parse_map(&section, name)?);
+        }
+        &[ty @ "kprobe", name]
+        | &[ty @ "uprobe", name]
+        | &[ty @ "xdp", name]
+        | &[ty @ "trace_point", name] => {
+            bpf.programs
+                .insert(name.to_string(), parse_program(bpf, &section, ty)?);
+            if !section.relocations.is_empty() {
+                bpf.relocations.insert(section.index, section.relocations);
+            }
+        }
+
+        _ => {}
+    }
+
+    Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use object::Endianness;
+
+    #[test]
+    fn test_parse_generic_error() {
+        assert!(matches!(
+            Object::parse(&b"foo"[..]),
+            Err(ParseError::Error { .. })
+        ))
+    }
+
+    #[test]
+    fn test_parse_license() {
+        assert!(matches!(
+            parse_license(b""),
+            Err(ParseError::InvalidLicense { .. })
+        ));
+
+        assert!(matches!(
+            parse_license(b"\0"),
+            Err(ParseError::InvalidLicense { .. })
+        ));
+
+        assert!(matches!(
+            parse_license(b"GPL"),
+            Err(ParseError::MissingLicenseNullTerminator { .. })
+        ));
+
+        assert_eq!(parse_license(b"GPL\0").unwrap().to_str().unwrap(), "GPL");
+    }
+
+    #[test]
+    fn test_parse_version() {
+        assert!(matches!(
+            parse_version(b"", Endianness::Little),
+            Err(ParseError::InvalidKernelVersion { .. })
+        ));
+
+        assert!(matches!(
+            parse_version(b"123", Endianness::Little),
+            Err(ParseError::InvalidKernelVersion { .. })
+        ));
+
+        assert_eq!(
+            parse_version(&0xFFFF_FFFEu32.to_le_bytes(), Endianness::Little)
+                .expect("failed to parse magic version"),
+            KernelVersion::Any
+        );
+
+        assert_eq!(
+            parse_version(&0xFFFF_FFFEu32.to_be_bytes(), Endianness::Big)
+                .expect("failed to parse magic version"),
+            KernelVersion::Any
+        );
+
+        assert_eq!(
+            parse_version(&1234u32.to_le_bytes(), Endianness::Little)
+                .expect("failed to parse magic version"),
+            KernelVersion::Version(1234)
+        );
+    }
+}

+ 108 - 0
src/obj/relocation.rs

@@ -0,0 +1,108 @@
+use std::collections::HashMap;
+
+use object::{RelocationKind, RelocationTarget, SectionIndex};
+use thiserror::Error;
+
+use super::Object;
+use crate::{
+    generated::{bpf_insn, BPF_PSEUDO_MAP_FD, BPF_PSEUDO_MAP_VALUE},
+    maps::Map,
+};
+
+#[derive(Debug, Clone, Error)]
+pub enum RelocationError {
+    #[error("unknown symbol, index `{index}`")]
+    UnknownSymbol { index: usize },
+
+    #[error("unknown symbol section, index `{index}`")]
+    UnknownSymbolSection { index: usize },
+
+    #[error("section `{section_index}` not found, referenced by symbol `{}`",
+            .symbol_name.clone().unwrap_or_else(|| .symbol_index.to_string()))]
+    RelocationSectionNotFound {
+        section_index: usize,
+        symbol_index: usize,
+        symbol_name: Option<String>,
+    },
+
+    #[error("the map `{name}` at section `{section_index}` has not been created")]
+    MapNotCreated { section_index: usize, name: String },
+
+    #[error("invalid relocation offset `{offset}`")]
+    InvalidRelocationOffset { offset: u64 },
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct Relocation {
+    pub(crate) kind: RelocationKind,
+    pub(crate) target: RelocationTarget,
+    pub(crate) offset: u64,
+    pub(crate) addend: i64,
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct Symbol {
+    pub(crate) section_index: Option<SectionIndex>,
+    pub(crate) name: Option<String>,
+    pub(crate) address: u64,
+}
+
+pub fn relocate(obj: &mut Object, maps: &[Map]) -> Result<(), RelocationError> {
+    let maps_by_section = maps
+        .iter()
+        .map(|map| (map.obj.section_index, map))
+        .collect::<HashMap<_, _>>();
+
+    for program in obj.programs.values_mut() {
+        if let Some(relocations) = obj.relocations.get(&program.section_index) {
+            for rel in relocations {
+                match rel.target {
+                    RelocationTarget::Symbol(index) => {
+                        let sym = obj
+                            .symbol_table
+                            .get(&index)
+                            .ok_or(RelocationError::UnknownSymbol { index: index.0 })?;
+
+                        let section_index = sym
+                            .section_index
+                            .ok_or(RelocationError::UnknownSymbolSection { index: index.0 })?;
+
+                        let map = maps_by_section.get(&section_index.0).ok_or(
+                            RelocationError::RelocationSectionNotFound {
+                                symbol_index: index.0,
+                                symbol_name: sym.name.clone(),
+                                section_index: section_index.0,
+                            },
+                        )?;
+
+                        let map_fd = map.fd.ok_or_else(|| RelocationError::MapNotCreated {
+                            name: map.obj.name.clone(),
+                            section_index: section_index.0,
+                        })?;
+
+                        let instructions = &mut program.instructions;
+                        let ins_index =
+                            (rel.offset / std::mem::size_of::<bpf_insn>() as u64) as usize;
+                        if ins_index >= instructions.len() {
+                            return Err(RelocationError::InvalidRelocationOffset {
+                                offset: rel.offset,
+                            });
+                        }
+                        if !map.obj.data.is_empty() {
+                            instructions[ins_index].set_src_reg(BPF_PSEUDO_MAP_VALUE as u8);
+                            instructions[ins_index + 1].imm =
+                                instructions[ins_index].imm + sym.address as i32;
+                        } else {
+                            instructions[ins_index].set_src_reg(BPF_PSEUDO_MAP_FD as u8);
+                        }
+                        instructions[ins_index].imm = map_fd;
+                    }
+                    RelocationTarget::Section(_index) => {}
+                    RelocationTarget::Absolute => todo!(),
+                }
+            }
+        }
+    }
+
+    Ok(())
+}

+ 262 - 0
src/programs/mod.rs

@@ -0,0 +1,262 @@
+mod perf_attach;
+mod probe;
+mod socket_filter;
+mod trace_point;
+mod xdp;
+
+use libc::ENOSPC;
+use perf_attach::*;
+pub use probe::*;
+pub use socket_filter::*;
+pub use trace_point::*;
+pub use xdp::*;
+
+use std::{
+    cell::RefCell,
+    cmp,
+    ffi::CStr,
+    io,
+    os::raw::c_uint,
+    path::PathBuf,
+    rc::{Rc, Weak},
+};
+
+use thiserror::Error;
+
+use crate::{obj, syscalls::bpf_load_program, RawFd};
+#[derive(Debug, Error)]
+pub enum ProgramError {
+    #[error("the program {program} is already loaded")]
+    AlreadyLoaded { program: String },
+
+    #[error("the program {program} is not loaded")]
+    NotLoaded { program: String },
+
+    #[error("the BPF_PROG_LOAD syscall for `{program}` failed: {io_error}\nVerifier output:\n{verifier_log}")]
+    LoadFailed {
+        program: String,
+        io_error: io::Error,
+        verifier_log: String,
+    },
+
+    #[error("FIXME")]
+    AlreadyDetached,
+
+    #[error("the perf_event_open syscall failed: {io_error}")]
+    PerfEventOpenFailed { io_error: io::Error },
+
+    #[error("PERF_EVENT_IOC_SET_BPF/PERF_EVENT_IOC_ENABLE failed: {io_error}")]
+    PerfEventAttachFailed { io_error: io::Error },
+
+    #[error("the program {program} is not attached")]
+    NotAttached { program: String },
+
+    #[error("error attaching {program}: BPF_LINK_CREATE failed with {io_error}")]
+    BpfLinkCreateFailed {
+        program: String,
+        #[source]
+        io_error: io::Error,
+    },
+
+    #[error("unkown network interface {name}")]
+    UnkownInterface { name: String },
+
+    #[error("error reading ld.so.cache file")]
+    InvalidLdSoCache { error_kind: io::ErrorKind },
+
+    #[error("could not resolve uprobe target {path}")]
+    InvalidUprobeTarget { path: PathBuf },
+
+    #[error("error resolving symbol: {error}")]
+    UprobeSymbolError { symbol: String, error: String },
+
+    #[error("setsockopt SO_ATTACH_BPF failed: {io_error}")]
+    SocketFilterError { io_error: io::Error },
+
+    #[error("{message}")]
+    Other { message: String },
+}
+
+#[derive(Debug)]
+pub(crate) struct ProgramData {
+    pub(crate) name: String,
+    pub(crate) obj: obj::Program,
+    pub(crate) fd: Option<RawFd>,
+    pub(crate) links: Vec<Rc<RefCell<dyn Link>>>,
+}
+
+#[derive(Debug)]
+pub enum Program {
+    KProbe(KProbe),
+    UProbe(UProbe),
+    TracePoint(TracePoint),
+    SocketFilter(SocketFilter),
+    Xdp(Xdp),
+}
+
+impl Program {
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(self.prog_type(), self.data_mut())
+    }
+
+    fn prog_type(&self) -> c_uint {
+        use crate::generated::bpf_prog_type::*;
+        match self {
+            Program::KProbe(_) => BPF_PROG_TYPE_KPROBE,
+            Program::UProbe(_) => BPF_PROG_TYPE_KPROBE,
+            Program::TracePoint(_) => BPF_PROG_TYPE_TRACEPOINT,
+            Program::SocketFilter(_) => BPF_PROG_TYPE_SOCKET_FILTER,
+            Program::Xdp(_) => BPF_PROG_TYPE_XDP,
+        }
+    }
+
+    fn data_mut(&mut self) -> &mut ProgramData {
+        match self {
+            Program::KProbe(p) => &mut p.data,
+            Program::UProbe(p) => &mut p.data,
+            Program::TracePoint(p) => &mut p.data,
+            Program::SocketFilter(p) => &mut p.data,
+            Program::Xdp(p) => &mut p.data,
+        }
+    }
+}
+
+impl ProgramData {
+    fn fd_or_err(&self) -> Result<RawFd, ProgramError> {
+        self.fd.ok_or(ProgramError::NotLoaded {
+            program: self.name.clone(),
+        })
+    }
+}
+
+impl Drop for ProgramData {
+    fn drop(&mut self) {}
+}
+
+const MAX_LOG_BUF_SIZE: usize = (std::u32::MAX >> 8) as usize;
+
+pub struct VerifierLog {
+    buf: Vec<u8>,
+}
+
+impl VerifierLog {
+    fn new() -> VerifierLog {
+        VerifierLog { buf: Vec::new() }
+    }
+
+    pub(crate) fn buf(&mut self) -> &mut Vec<u8> {
+        &mut self.buf
+    }
+
+    fn grow(&mut self) {
+        self.buf.reserve(cmp::max(
+            1024 * 4,
+            cmp::min(MAX_LOG_BUF_SIZE, self.buf.capacity() * 2),
+        ));
+        self.buf.resize(self.buf.capacity(), 0);
+    }
+
+    fn reset(&mut self) {
+        if !self.buf.is_empty() {
+            self.buf[0] = 0;
+        }
+    }
+
+    fn truncate(&mut self) {
+        if self.buf.is_empty() {
+            return;
+        }
+
+        let pos = self
+            .buf
+            .iter()
+            .position(|b| *b == 0)
+            .unwrap_or(self.buf.len() - 1);
+        self.buf.truncate(pos + 1);
+    }
+
+    pub fn as_c_str(&self) -> Option<&CStr> {
+        if self.buf.is_empty() {
+            None
+        } else {
+            Some(CStr::from_bytes_with_nul(&self.buf).unwrap())
+        }
+    }
+}
+
+fn load_program(prog_type: c_uint, data: &mut ProgramData) -> Result<(), ProgramError> {
+    let ProgramData { obj, fd, name, .. } = data;
+    if fd.is_some() {
+        return Err(ProgramError::AlreadyLoaded {
+            program: name.to_string(),
+        });
+    }
+    let crate::obj::Program {
+        instructions,
+        license,
+        kernel_version,
+        ..
+    } = obj;
+
+    let mut ret = Ok(1);
+    let mut log_buf = VerifierLog::new();
+    for i in 0..3 {
+        log_buf.reset();
+
+        ret = match bpf_load_program(
+            prog_type,
+            instructions,
+            license,
+            (*kernel_version).into(),
+            &mut log_buf,
+        ) {
+            Ok(prog_fd) => {
+                *fd = Some(prog_fd as RawFd);
+                return Ok(());
+            }
+            Err((_, io_error)) if i == 0 || io_error.raw_os_error() == Some(ENOSPC) => {
+                log_buf.grow();
+                continue;
+            }
+            x => x,
+        };
+    }
+
+    if let Err((_, io_error)) = ret {
+        log_buf.truncate();
+        return Err(ProgramError::LoadFailed {
+            program: name.clone(),
+            io_error,
+            verifier_log: log_buf.as_c_str().unwrap().to_string_lossy().to_string(),
+        });
+    }
+
+    Ok(())
+}
+
+pub trait Link: std::fmt::Debug {
+    fn detach(&mut self) -> Result<(), ProgramError>;
+}
+
+#[derive(Debug)]
+pub(crate) struct LinkRef<T: Link> {
+    inner: Weak<RefCell<T>>,
+}
+
+impl<T: Link> LinkRef<T> {
+    fn new(inner: &Rc<RefCell<T>>) -> LinkRef<T> {
+        LinkRef {
+            inner: Rc::downgrade(inner),
+        }
+    }
+}
+
+impl<T: Link> Link for LinkRef<T> {
+    fn detach(&mut self) -> Result<(), ProgramError> {
+        if let Some(inner) = self.inner.upgrade() {
+            inner.borrow_mut().detach()
+        } else {
+            Err(ProgramError::AlreadyDetached)
+        }
+    }
+}

+ 46 - 0
src/programs/perf_attach.rs

@@ -0,0 +1,46 @@
+use std::{cell::RefCell, rc::Rc};
+
+use libc::close;
+
+use crate::{
+    syscalls::perf_event_ioctl, RawFd, PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE,
+    PERF_EVENT_IOC_SET_BPF,
+};
+
+use super::{Link, LinkRef, ProgramData, ProgramError};
+
+#[derive(Debug)]
+struct PerfLink {
+    perf_fd: Option<RawFd>,
+}
+
+impl Link for PerfLink {
+    fn detach(&mut self) -> Result<(), ProgramError> {
+        if let Some(fd) = self.perf_fd.take() {
+            let _ = perf_event_ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
+            unsafe { close(fd) };
+            Ok(())
+        } else {
+            Err(ProgramError::AlreadyDetached)
+        }
+    }
+}
+
+impl Drop for PerfLink {
+    fn drop(&mut self) {
+        let _ = self.detach();
+    }
+}
+
+pub(crate) fn perf_attach(data: &mut ProgramData, fd: RawFd) -> Result<impl Link, ProgramError> {
+    let link = Rc::new(RefCell::new(PerfLink { perf_fd: Some(fd) }));
+    data.links.push(link.clone());
+
+    let prog_fd = data.fd_or_err()?;
+    perf_event_ioctl(fd, PERF_EVENT_IOC_SET_BPF, prog_fd)
+        .map_err(|(_, io_error)| ProgramError::PerfEventAttachFailed { io_error })?;
+    perf_event_ioctl(fd, PERF_EVENT_IOC_ENABLE, 0)
+        .map_err(|(_, io_error)| ProgramError::PerfEventAttachFailed { io_error })?;
+
+    Ok(LinkRef::new(&link))
+}

+ 326 - 0
src/programs/probe.rs

@@ -0,0 +1,326 @@
+use libc::pid_t;
+use object::{Object, ObjectSymbol};
+use std::{
+    ffi::CStr,
+    fs,
+    io::{self, BufRead, Cursor, Read},
+    mem,
+    os::raw::c_char,
+    path::{Path, PathBuf},
+};
+use thiserror::Error;
+
+use crate::{
+    generated::bpf_prog_type::BPF_PROG_TYPE_KPROBE,
+    programs::{load_program, ProgramData, ProgramError},
+    syscalls::perf_event_open_probe,
+};
+
+use super::{perf_attach, Link};
+
+lazy_static! {
+    static ref LD_SO_CACHE: Result<LdSoCache, io::Error> = LdSoCache::load("/etc/ld.so.cache");
+}
+const LD_SO_CACHE_HEADER: &str = "glibc-ld.so.cache1.1";
+
+#[derive(Debug)]
+pub struct KProbe {
+    pub(crate) data: ProgramData,
+}
+
+#[derive(Debug)]
+pub struct UProbe {
+    pub(crate) data: ProgramData,
+}
+
+impl KProbe {
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(BPF_PROG_TYPE_KPROBE, &mut self.data)
+    }
+
+    pub fn name(&self) -> String {
+        self.data.name.to_string()
+    }
+
+    pub fn attach(
+        &mut self,
+        fn_name: &str,
+        offset: u64,
+        pid: Option<pid_t>,
+    ) -> Result<impl Link, ProgramError> {
+        attach(&mut self.data, ProbeKind::KProbe, fn_name, offset, pid)
+    }
+}
+
+impl UProbe {
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(BPF_PROG_TYPE_KPROBE, &mut self.data)
+    }
+
+    pub fn name(&self) -> String {
+        self.data.name.to_string()
+    }
+
+    pub fn attach<T: AsRef<Path>>(
+        &mut self,
+        fn_name: Option<&str>,
+        offset: u64,
+        target: T,
+        pid: Option<pid_t>,
+    ) -> Result<impl Link, ProgramError> {
+        let target = target.as_ref();
+        let target_str = &*target.as_os_str().to_string_lossy();
+
+        let mut path = if let Some(pid) = pid {
+            find_lib_in_proc_maps(pid, &target_str).map_err(|io_error| ProgramError::Other {
+                message: format!("error parsing /proc/{}/maps: {}", pid, io_error),
+            })?
+        } else {
+            None
+        };
+
+        if path.is_none() {
+            path = if target.is_absolute() {
+                Some(target_str)
+            } else {
+                let cache =
+                    LD_SO_CACHE
+                        .as_ref()
+                        .map_err(|io_error| ProgramError::InvalidLdSoCache {
+                            error_kind: io_error.kind(),
+                        })?;
+                cache.resolve(target_str)
+            }
+            .map(String::from)
+        };
+
+        let path = path.ok_or(ProgramError::InvalidUprobeTarget {
+            path: target.to_owned(),
+        })?;
+
+        let sym_offset = if let Some(fn_name) = fn_name {
+            resolve_symbol(&path, fn_name).map_err(|error| ProgramError::UprobeSymbolError {
+                symbol: fn_name.to_string(),
+                error: error.to_string(),
+            })?
+        } else {
+            0
+        };
+
+        attach(
+            &mut self.data,
+            ProbeKind::UProbe,
+            &path,
+            sym_offset + offset,
+            pid,
+        )
+    }
+}
+
+enum ProbeKind {
+    KProbe,
+    KRetProbe,
+    UProbe,
+    URetProbe,
+}
+
+fn attach(
+    program_data: &mut ProgramData,
+    kind: ProbeKind,
+    name: &str,
+    offset: u64,
+    pid: Option<pid_t>,
+) -> Result<impl Link, ProgramError> {
+    use ProbeKind::*;
+
+    let perf_ty = read_sys_fs_perf_type(match kind {
+        KProbe | KRetProbe => "kprobe",
+        UProbe | URetProbe => "uprobe",
+    })?;
+    let ret_bit = match kind {
+        KRetProbe => Some(read_sys_fs_perf_ret_probe("kprobe")?),
+        URetProbe => Some(read_sys_fs_perf_ret_probe("uprobe")?),
+        _ => None,
+    };
+
+    let fd = perf_event_open_probe(perf_ty, ret_bit, name, offset, pid)
+        .map_err(|(_code, io_error)| ProgramError::PerfEventOpenFailed { io_error })?
+        as i32;
+
+    perf_attach(program_data, fd)
+}
+
+fn proc_maps_libs(pid: pid_t) -> Result<Vec<(String, String)>, io::Error> {
+    let maps_file = format!("/proc/{}/maps", pid);
+    let data = fs::read_to_string(maps_file)?;
+
+    Ok(data
+        .lines()
+        .filter_map(|line| {
+            let line = line.split_whitespace().last()?;
+            if line.starts_with('/') {
+                let path = PathBuf::from(line);
+                let key = path.file_name().unwrap().to_string_lossy().into_owned();
+                Some((key, path.to_string_lossy().to_string()))
+            } else {
+                None
+            }
+        })
+        .collect())
+}
+
+fn find_lib_in_proc_maps(pid: pid_t, lib: &str) -> Result<Option<String>, io::Error> {
+    let libs = proc_maps_libs(pid)?;
+
+    let ret = if lib.contains(".so") {
+        libs.iter().find(|(k, _)| k.as_str().starts_with(lib))
+    } else {
+        let lib = lib.to_string();
+        let lib1 = lib.clone() + ".so";
+        let lib2 = lib + "-";
+        libs.iter()
+            .find(|(k, _)| k.starts_with(&lib1) || k.starts_with(&lib2))
+    };
+
+    Ok(ret.map(|(_, v)| v.clone()))
+}
+
+#[derive(Debug)]
+pub(crate) struct CacheEntry {
+    key: String,
+    value: String,
+    flags: i32,
+}
+
+#[derive(Debug)]
+pub(crate) struct LdSoCache {
+    entries: Vec<CacheEntry>,
+}
+
+impl LdSoCache {
+    pub fn load<T: AsRef<Path>>(path: T) -> Result<Self, io::Error> {
+        let data = fs::read(path)?;
+        Self::parse(&data)
+    }
+
+    fn parse(data: &[u8]) -> Result<Self, io::Error> {
+        let mut cursor = Cursor::new(data);
+
+        let read_u32 = |cursor: &mut Cursor<_>| -> Result<u32, io::Error> {
+            let mut buf = [0u8; mem::size_of::<u32>()];
+            cursor.read_exact(&mut buf)?;
+
+            Ok(u32::from_ne_bytes(buf))
+        };
+
+        let read_i32 = |cursor: &mut Cursor<_>| -> Result<i32, io::Error> {
+            let mut buf = [0u8; mem::size_of::<i32>()];
+            cursor.read_exact(&mut buf)?;
+
+            Ok(i32::from_ne_bytes(buf))
+        };
+
+        let mut buf = [0u8; LD_SO_CACHE_HEADER.len()];
+        cursor.read_exact(&mut buf)?;
+        let header = std::str::from_utf8(&buf).or(Err(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "invalid ld.so.cache header",
+        )))?;
+        if header != LD_SO_CACHE_HEADER {
+            return Err(io::Error::new(
+                io::ErrorKind::InvalidData,
+                "invalid ld.so.cache header",
+            ));
+        }
+
+        let num_entries = read_u32(&mut cursor)?;
+        let _str_tab_len = read_u32(&mut cursor)?;
+        cursor.consume(5 * mem::size_of::<u32>());
+
+        let mut entries = Vec::new();
+        for _ in 0..num_entries {
+            let flags = read_i32(&mut cursor)?;
+            let k_pos = read_u32(&mut cursor)? as usize;
+            let v_pos = read_u32(&mut cursor)? as usize;
+            cursor.consume(12);
+            let key =
+                unsafe { CStr::from_ptr(cursor.get_ref()[k_pos..].as_ptr() as *const c_char) }
+                    .to_string_lossy()
+                    .into_owned();
+            let value =
+                unsafe { CStr::from_ptr(cursor.get_ref()[v_pos..].as_ptr() as *const c_char) }
+                    .to_string_lossy()
+                    .into_owned();
+            entries.push(CacheEntry { key, value, flags });
+        }
+
+        Ok(LdSoCache { entries })
+    }
+
+    pub fn resolve(&self, lib: &str) -> Option<&str> {
+        let lib = if !lib.contains(".so") {
+            lib.to_string() + ".so"
+        } else {
+            lib.to_string()
+        };
+        self.entries
+            .iter()
+            .find(|entry| entry.key.starts_with(&lib))
+            .map(|entry| entry.value.as_str())
+    }
+}
+
+#[derive(Error, Debug)]
+enum ResolveSymbolError {
+    #[error("io error {0}")]
+    Io(#[from] io::Error),
+
+    #[error("error parsing ELF {0}")]
+    Object(#[from] object::Error),
+
+    #[error("unknown symbol {0}")]
+    Unknown(String),
+}
+
+fn resolve_symbol(path: &str, symbol: &str) -> Result<u64, ResolveSymbolError> {
+    let data = fs::read(path)?;
+    let obj = object::read::File::parse(&data)?;
+
+    obj.dynamic_symbols()
+        .chain(obj.symbols())
+        .find(|sym| sym.name().map(|name| name == symbol).unwrap_or(false))
+        .map(|s| s.address())
+        .ok_or_else(|| ResolveSymbolError::Unknown(symbol.to_string()))
+}
+
+pub fn read_sys_fs_perf_type(pmu: &str) -> Result<u32, ProgramError> {
+    let file = format!("/sys/bus/event_source/devices/{}/type", pmu);
+
+    let perf_ty = fs::read_to_string(&file).map_err(|e| ProgramError::Other {
+        message: format!("error parsing {}: {}", file, e),
+    })?;
+    let perf_ty = perf_ty
+        .trim()
+        .parse::<u32>()
+        .map_err(|e| ProgramError::Other {
+            message: format!("error parsing {}: {}", file, e),
+        })?;
+
+    Ok(perf_ty)
+}
+
+pub fn read_sys_fs_perf_ret_probe(pmu: &str) -> Result<u32, ProgramError> {
+    let file = format!("/sys/bus/event_source/devices/{}/format/retprobe", pmu);
+
+    let data = fs::read_to_string(&file).map_err(|e| ProgramError::Other {
+        message: format!("error parsing {}: {}", file, e),
+    })?;
+
+    let mut parts = data.trim().splitn(2, ":").skip(1);
+    let config = parts.next().ok_or(ProgramError::Other {
+        message: format!("error parsing {}: `{}'", file, data),
+    })?;
+    config.parse::<u32>().map_err(|e| ProgramError::Other {
+        message: format!("error parsing {}: {}", file, e),
+    })
+}

+ 39 - 0
src/programs/socket_filter.rs

@@ -0,0 +1,39 @@
+use std::{io, mem, os::unix::prelude::RawFd};
+
+use libc::{setsockopt, SOL_SOCKET, SO_ATTACH_BPF};
+
+use crate::generated::bpf_prog_type::BPF_PROG_TYPE_SOCKET_FILTER;
+
+use super::{load_program, ProgramData, ProgramError};
+
+#[derive(Debug)]
+pub struct SocketFilter {
+    pub(crate) data: ProgramData,
+}
+
+impl SocketFilter {
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(BPF_PROG_TYPE_SOCKET_FILTER, &mut self.data)
+    }
+
+    pub fn attach(&self, socket: RawFd) -> Result<(), ProgramError> {
+        let prog_fd = self.data.fd_or_err()?;
+
+        let ret = unsafe {
+            setsockopt(
+                socket,
+                SOL_SOCKET,
+                SO_ATTACH_BPF,
+                &prog_fd as *const _ as *const _,
+                mem::size_of::<RawFd>() as u32,
+            )
+        };
+        if ret < 0 {
+            return Err(ProgramError::SocketFilterError {
+                io_error: io::Error::last_os_error(),
+            });
+        }
+
+        Ok(())
+    }
+}

+ 40 - 0
src/programs/trace_point.rs

@@ -0,0 +1,40 @@
+use std::fs;
+
+use crate::{
+    generated::bpf_prog_type::BPF_PROG_TYPE_TRACEPOINT, syscalls::perf_event_open_trace_point,
+};
+
+use super::{load_program, perf_attach, Link, ProgramData, ProgramError};
+
+#[derive(Debug)]
+pub struct TracePoint {
+    pub(crate) data: ProgramData,
+}
+
+impl TracePoint {
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(BPF_PROG_TYPE_TRACEPOINT, &mut self.data)
+    }
+
+    pub fn attach(&mut self, category: &str, name: &str) -> Result<impl Link, ProgramError> {
+        let id = read_sys_fs_trace_point_id(category, name)?;
+        let fd = perf_event_open_trace_point(id)
+            .map_err(|(_code, io_error)| ProgramError::PerfEventOpenFailed { io_error })?
+            as i32;
+
+        perf_attach(&mut self.data, fd)
+    }
+}
+
+fn read_sys_fs_trace_point_id(category: &str, name: &str) -> Result<u32, ProgramError> {
+    let file = format!("/sys/kernel/debug/tracing/events/{}/{}/id", category, name);
+
+    let id = fs::read_to_string(&file).map_err(|e| ProgramError::Other {
+        message: format!("error parsing {}: {}", file, e),
+    })?;
+    let id = id.trim().parse::<u32>().map_err(|e| ProgramError::Other {
+        message: format!("error parsing {}: {}", file, e),
+    })?;
+
+    Ok(id)
+}

+ 45 - 0
src/programs/xdp.rs

@@ -0,0 +1,45 @@
+use std::ffi::CString;
+
+use libc::if_nametoindex;
+
+use crate::generated::{bpf_attach_type::BPF_XDP, bpf_prog_type::BPF_PROG_TYPE_XDP};
+use crate::syscalls::bpf_link_create;
+use crate::RawFd;
+
+use super::{load_program, ProgramData, ProgramError};
+
+#[derive(Debug)]
+pub struct Xdp {
+    pub(crate) data: ProgramData,
+}
+
+impl Xdp {
+    pub fn load(&mut self) -> Result<(), ProgramError> {
+        load_program(BPF_PROG_TYPE_XDP, &mut self.data)
+    }
+
+    pub fn name(&self) -> String {
+        self.data.name.to_string()
+    }
+
+    pub fn attach(&self, interface: &str) -> Result<(), ProgramError> {
+        let prog_fd = self.data.fd_or_err()?;
+
+        let c_interface = CString::new(interface).unwrap();
+        let if_index = unsafe { if_nametoindex(c_interface.as_ptr()) } as RawFd;
+        if if_index == 0 {
+            return Err(ProgramError::UnkownInterface {
+                name: interface.to_string(),
+            })?;
+        }
+
+        let link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, 0).map_err(|(_, io_error)| {
+            ProgramError::BpfLinkCreateFailed {
+                program: self.name(),
+                io_error,
+            }
+        })?;
+
+        Ok(())
+    }
+}

+ 180 - 0
src/syscalls/bpf.rs

@@ -0,0 +1,180 @@
+use std::{
+    cmp,
+    ffi::CStr,
+    io,
+    mem::{self, MaybeUninit},
+    slice,
+};
+
+use libc::{c_long, c_uint, ENOENT};
+
+use crate::{
+    bpf_map_def,
+    generated::{bpf_attach_type, bpf_attr, bpf_cmd, bpf_insn},
+    programs::VerifierLog,
+    syscalls::SysResult,
+    RawFd, BPF_OBJ_NAME_LEN,
+};
+
+use super::{syscall, Syscall};
+
+pub(crate) fn bpf_create_map(name: &CStr, def: &bpf_map_def) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_1 };
+    u.map_type = def.map_type;
+    u.key_size = def.key_size;
+    u.value_size = def.value_size;
+    u.max_entries = def.max_entries;
+    u.map_flags = def.map_flags;
+
+    // u.map_name is 16 bytes max and must be NULL terminated
+    let name_len = cmp::min(name.to_bytes().len(), BPF_OBJ_NAME_LEN - 1);
+    u.map_name[..name_len]
+        .copy_from_slice(unsafe { slice::from_raw_parts(name.as_ptr(), name_len) });
+
+    sys_bpf(bpf_cmd::BPF_MAP_CREATE, &attr)
+}
+
+pub(crate) fn bpf_load_program(
+    ty: c_uint,
+    insns: &[bpf_insn],
+    license: &CStr,
+    kernel_version: u32,
+    log: &mut VerifierLog,
+) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_3 };
+    u.prog_type = ty;
+    u.expected_attach_type = 0;
+    u.insns = insns.as_ptr() as u64;
+    u.insn_cnt = insns.len() as u32;
+    u.license = license.as_ptr() as u64;
+    u.kern_version = kernel_version;
+    let log_buf = log.buf();
+    if log_buf.capacity() > 0 {
+        u.log_level = 7;
+        u.log_buf = log_buf.as_mut_ptr() as u64;
+        u.log_size = log_buf.capacity() as u32;
+    }
+
+    sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr)
+}
+
+fn lookup<K, V: crate::Pod>(
+    fd: RawFd,
+    key: &K,
+    flags: u64,
+    cmd: bpf_cmd::Type,
+) -> Result<Option<V>, (c_long, io::Error)> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let mut value = MaybeUninit::uninit();
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd as u32;
+    u.key = key as *const _ as u64;
+    u.__bindgen_anon_1.value = &mut value as *mut _ as u64;
+    u.flags = flags;
+
+    match sys_bpf(cmd, &attr) {
+        Ok(_) => Ok(Some(unsafe { value.assume_init() })),
+        Err((_, io_error)) if io_error.raw_os_error() == Some(ENOENT) => Ok(None),
+        Err(e) => Err(e),
+    }
+}
+
+pub(crate) fn bpf_map_lookup_elem<K, V: crate::Pod>(
+    fd: RawFd,
+    key: &K,
+    flags: u64,
+) -> Result<Option<V>, (c_long, io::Error)> {
+    lookup(fd, key, flags, bpf_cmd::BPF_MAP_LOOKUP_ELEM)
+}
+
+pub(crate) fn bpf_map_lookup_and_delete_elem<K, V: crate::Pod>(
+    fd: RawFd,
+    key: &K,
+) -> Result<Option<V>, (c_long, io::Error)> {
+    lookup(fd, key, 0, bpf_cmd::BPF_MAP_LOOKUP_AND_DELETE_ELEM)
+}
+
+pub(crate) fn bpf_map_update_elem<K, V>(fd: RawFd, key: &K, value: &V, flags: u64) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd as u32;
+    u.key = key as *const _ as u64;
+    u.__bindgen_anon_1.value = value as *const _ as u64;
+    u.flags = flags;
+
+    sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &attr)
+}
+
+pub(crate) fn bpf_map_update_elem_ptr<K, V>(
+    fd: RawFd,
+    key: *const K,
+    value: *const V,
+    flags: u64,
+) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd as u32;
+    u.key = key as u64;
+    u.__bindgen_anon_1.value = value as u64;
+    u.flags = flags;
+
+    sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &attr)
+}
+
+pub(crate) fn bpf_map_delete_elem<K>(fd: RawFd, key: &K) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd as u32;
+    u.key = key as *const _ as u64;
+
+    sys_bpf(bpf_cmd::BPF_MAP_DELETE_ELEM, &attr)
+}
+
+pub(crate) fn bpf_map_get_next_key<K>(
+    fd: RawFd,
+    key: Option<&K>,
+) -> Result<Option<K>, (c_long, io::Error)> {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+    let mut next_key = MaybeUninit::uninit();
+
+    let u = unsafe { &mut attr.__bindgen_anon_2 };
+    u.map_fd = fd as u32;
+    if let Some(key) = key {
+        u.key = key as *const _ as u64;
+    }
+    u.__bindgen_anon_1.next_key = &mut next_key as *mut _ as u64;
+
+    match sys_bpf(bpf_cmd::BPF_MAP_GET_NEXT_KEY, &attr) {
+        Ok(_) => Ok(Some(unsafe { next_key.assume_init() })),
+        Err((_, io_error)) if io_error.raw_os_error() == Some(ENOENT) => Ok(None),
+        Err(e) => Err(e),
+    }
+}
+
+pub(crate) fn bpf_link_create(
+    prog_fd: RawFd,
+    target_fd: RawFd,
+    attach_type: bpf_attach_type::Type,
+    flags: u32,
+) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
+
+    attr.link_create.prog_fd = prog_fd as u32;
+    attr.link_create.__bindgen_anon_1.target_fd = target_fd as u32;
+    attr.link_create.attach_type = attach_type;
+    attr.link_create.flags = flags;
+
+    sys_bpf(bpf_cmd::BPF_LINK_CREATE, &attr)
+}
+
+fn sys_bpf<'a>(cmd: bpf_cmd::Type, attr: &'a bpf_attr) -> SysResult {
+    syscall(Syscall::Bpf { cmd, attr })
+}

+ 22 - 0
src/syscalls/fake.rs

@@ -0,0 +1,22 @@
+use std::{cell::RefCell, io, ptr};
+
+use libc::c_void;
+
+use super::{SysResult, Syscall};
+
+type SyscallFn = unsafe fn(Syscall) -> SysResult;
+
+#[cfg(test)]
+thread_local! {
+    pub(crate) static TEST_SYSCALL: RefCell<SyscallFn> = RefCell::new(test_syscall);
+    pub(crate) static TEST_MMAP_RET: RefCell<*mut c_void> = RefCell::new(ptr::null_mut());
+}
+
+#[cfg(test)]
+unsafe fn test_syscall(_call: Syscall) -> SysResult {
+    return Err((-1, io::Error::from_raw_os_error(libc::EINVAL)));
+}
+
+pub(crate) fn override_syscall(call: unsafe fn(Syscall) -> SysResult) {
+    TEST_SYSCALL.with(|test_impl| *test_impl.borrow_mut() = call);
+}

+ 71 - 0
src/syscalls/mod.rs

@@ -0,0 +1,71 @@
+mod bpf;
+mod perf_event;
+
+#[cfg(test)]
+mod fake;
+
+use std::io;
+
+use libc::{c_int, c_long, c_ulong, pid_t};
+
+pub(crate) use bpf::*;
+#[cfg(test)]
+pub(crate) use fake::*;
+pub(crate) use perf_event::*;
+
+use crate::generated::{bpf_attr, bpf_cmd, perf_event_attr};
+
+pub(crate) type SysResult = Result<c_long, (c_long, io::Error)>;
+
+#[cfg_attr(test, allow(dead_code))]
+pub(crate) enum Syscall<'a> {
+    Bpf {
+        cmd: bpf_cmd::Type,
+        attr: &'a bpf_attr,
+    },
+    PerfEventOpen {
+        attr: perf_event_attr,
+        pid: pid_t,
+        cpu: i32,
+        group: i32,
+        flags: u32,
+    },
+    PerfEventIoctl {
+        fd: c_int,
+        request: c_ulong,
+        arg: c_int,
+    },
+}
+
+fn syscall(call: Syscall) -> SysResult {
+    #[cfg(not(test))]
+    return unsafe { syscall_impl(call) };
+
+    #[cfg(test)]
+    return TEST_SYSCALL.with(|test_impl| unsafe { test_impl.borrow()(call) });
+}
+
+#[cfg(not(test))]
+unsafe fn syscall_impl(call: Syscall) -> SysResult {
+    use libc::{SYS_bpf, SYS_perf_event_open};
+    use std::mem;
+
+    use Syscall::*;
+    let ret = match call {
+        Bpf { cmd, attr } => libc::syscall(SYS_bpf, cmd, attr, mem::size_of::<bpf_attr>()),
+        PerfEventOpen {
+            attr,
+            pid,
+            cpu,
+            group,
+            flags,
+        } => libc::syscall(SYS_perf_event_open, &attr, pid, cpu, group, flags),
+        PerfEventIoctl { fd, request, arg } => libc::ioctl(fd, request, arg) as i64,
+    };
+
+    if ret < 0 {
+        return Err((ret, io::Error::last_os_error()));
+    }
+
+    Ok(ret)
+}

+ 89 - 0
src/syscalls/perf_event.rs

@@ -0,0 +1,89 @@
+use std::{ffi::CString, mem};
+
+use libc::{c_int, c_ulong, pid_t};
+
+use crate::generated::{
+    perf_event_attr,
+    perf_event_sample_format::PERF_SAMPLE_RAW,
+    perf_sw_ids::PERF_COUNT_SW_BPF_OUTPUT,
+    perf_type_id::{PERF_TYPE_SOFTWARE, PERF_TYPE_TRACEPOINT},
+    PERF_FLAG_FD_CLOEXEC,
+};
+
+use super::{syscall, SysResult, Syscall};
+
+pub(crate) fn perf_event_open(cpu: c_int) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<perf_event_attr>() };
+
+    attr.config = PERF_COUNT_SW_BPF_OUTPUT as u64;
+    attr.size = mem::size_of::<perf_event_attr>() as u32;
+    attr.type_ = PERF_TYPE_SOFTWARE;
+    attr.sample_type = PERF_SAMPLE_RAW as u64;
+    attr.__bindgen_anon_1.sample_period = 1;
+    attr.__bindgen_anon_2.wakeup_events = 1;
+
+    syscall(Syscall::PerfEventOpen {
+        attr,
+        pid: -1,
+        cpu,
+        group: -1,
+        flags: PERF_FLAG_FD_CLOEXEC,
+    })
+}
+
+pub(crate) fn perf_event_open_probe(
+    ty: u32,
+    ret_bit: Option<u32>,
+    name: &str,
+    offset: u64,
+    pid: Option<pid_t>,
+) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<perf_event_attr>() };
+
+    if let Some(ret_bit) = ret_bit {
+        attr.config = 1 << ret_bit;
+    }
+
+    let c_name = CString::new(name).unwrap();
+
+    attr.size = mem::size_of::<perf_event_attr>() as u32;
+    attr.type_ = ty;
+    attr.__bindgen_anon_3.config1 = c_name.as_ptr() as u64;
+    attr.__bindgen_anon_4.config2 = offset;
+
+    let cpu = if pid.is_some() { -1 } else { 0 };
+    let pid = pid.unwrap_or(-1);
+
+    syscall(Syscall::PerfEventOpen {
+        attr,
+        pid,
+        cpu,
+        group: -1,
+        flags: PERF_FLAG_FD_CLOEXEC,
+    })
+}
+
+pub(crate) fn perf_event_open_trace_point(id: u32) -> SysResult {
+    let mut attr = unsafe { mem::zeroed::<perf_event_attr>() };
+
+    attr.size = mem::size_of::<perf_event_attr>() as u32;
+    attr.type_ = PERF_TYPE_TRACEPOINT;
+    attr.config = id as u64;
+
+    syscall(Syscall::PerfEventOpen {
+        attr,
+        pid: -1,
+        cpu: 0,
+        group: -1,
+        flags: PERF_FLAG_FD_CLOEXEC,
+    })
+}
+
+pub(crate) fn perf_event_ioctl(fd: c_int, request: c_ulong, arg: c_int) -> SysResult {
+    let call = Syscall::PerfEventIoctl { fd, request, arg };
+    #[cfg(not(test))]
+    return syscall(call);
+
+    #[cfg(test)]
+    return crate::syscalls::TEST_SYSCALL.with(|test_impl| unsafe { test_impl.borrow()(call) });
+}