perf_event_byte_array.rs 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. use core::{cell::UnsafeCell, mem};
  2. use crate::{
  3. bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU},
  4. helpers::bpf_perf_event_output,
  5. maps::PinningType,
  6. BpfContext,
  7. };
  8. #[repr(transparent)]
  9. pub struct PerfEventByteArray {
  10. def: UnsafeCell<bpf_map_def>,
  11. }
  12. unsafe impl Sync for PerfEventByteArray {}
  13. impl PerfEventByteArray {
  14. pub const fn new(flags: u32) -> PerfEventByteArray {
  15. PerfEventByteArray::with_max_entries(0, flags)
  16. }
  17. pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray {
  18. PerfEventByteArray {
  19. def: UnsafeCell::new(bpf_map_def {
  20. type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  21. key_size: mem::size_of::<u32>() as u32,
  22. value_size: mem::size_of::<u32>() as u32,
  23. max_entries,
  24. map_flags: flags,
  25. id: 0,
  26. pinning: PinningType::None as u32,
  27. }),
  28. }
  29. }
  30. pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray {
  31. PerfEventByteArray {
  32. def: UnsafeCell::new(bpf_map_def {
  33. type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  34. key_size: mem::size_of::<u32>() as u32,
  35. value_size: mem::size_of::<u32>() as u32,
  36. max_entries,
  37. map_flags: flags,
  38. id: 0,
  39. pinning: PinningType::ByName as u32,
  40. }),
  41. }
  42. }
  43. pub fn output<C: BpfContext>(&self, ctx: &C, data: &[u8], flags: u32) {
  44. self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
  45. }
  46. pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &[u8], flags: u32) {
  47. let flags = u64::from(flags) << 32 | u64::from(index);
  48. unsafe {
  49. bpf_perf_event_output(
  50. ctx.as_ptr(),
  51. self.def.get() as *mut _,
  52. flags,
  53. data.as_ptr() as *mut _,
  54. data.len() as u64,
  55. );
  56. }
  57. }
  58. }