sock_hash.rs 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. use core::{borrow::Borrow, cell::UnsafeCell, marker::PhantomData, mem};
  2. use aya_ebpf_cty::c_void;
  3. use crate::{
  4. bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_SOCKHASH, bpf_sock_ops},
  5. helpers::{
  6. bpf_map_lookup_elem, bpf_msg_redirect_hash, bpf_sk_assign, bpf_sk_redirect_hash,
  7. bpf_sk_release, bpf_sock_hash_update,
  8. },
  9. maps::PinningType,
  10. programs::{SkBuffContext, SkLookupContext, SkMsgContext},
  11. BpfContext,
  12. };
  13. #[repr(transparent)]
  14. pub struct SockHash<K> {
  15. def: UnsafeCell<bpf_map_def>,
  16. _k: PhantomData<K>,
  17. }
  18. unsafe impl<K: Sync> Sync for SockHash<K> {}
  19. impl<K> SockHash<K> {
  20. pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> {
  21. SockHash {
  22. def: UnsafeCell::new(bpf_map_def {
  23. type_: BPF_MAP_TYPE_SOCKHASH,
  24. key_size: mem::size_of::<K>() as u32,
  25. value_size: mem::size_of::<u32>() as u32,
  26. max_entries,
  27. map_flags: flags,
  28. id: 0,
  29. pinning: PinningType::None as u32,
  30. }),
  31. _k: PhantomData,
  32. }
  33. }
  34. pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> {
  35. SockHash {
  36. def: UnsafeCell::new(bpf_map_def {
  37. type_: BPF_MAP_TYPE_SOCKHASH,
  38. key_size: mem::size_of::<K>() as u32,
  39. value_size: mem::size_of::<u32>() as u32,
  40. max_entries,
  41. map_flags: flags,
  42. id: 0,
  43. pinning: PinningType::ByName as u32,
  44. }),
  45. _k: PhantomData,
  46. }
  47. }
  48. pub fn update(&self, key: &mut K, sk_ops: &mut bpf_sock_ops, flags: u64) -> Result<(), i64> {
  49. let ret = unsafe {
  50. bpf_sock_hash_update(
  51. sk_ops as *mut _,
  52. self.def.get() as *mut _,
  53. key as *mut _ as *mut c_void,
  54. flags,
  55. )
  56. };
  57. (ret == 0).then_some(()).ok_or(ret)
  58. }
  59. pub fn redirect_msg(&self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
  60. unsafe {
  61. bpf_msg_redirect_hash(
  62. ctx.as_ptr() as *mut _,
  63. self.def.get() as *mut _,
  64. key as *mut _ as *mut _,
  65. flags,
  66. )
  67. }
  68. }
  69. pub fn redirect_skb(&self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
  70. unsafe {
  71. bpf_sk_redirect_hash(
  72. ctx.as_ptr() as *mut _,
  73. self.def.get() as *mut _,
  74. key as *mut _ as *mut _,
  75. flags,
  76. )
  77. }
  78. }
  79. pub fn redirect_sk_lookup(
  80. &mut self,
  81. ctx: &SkLookupContext,
  82. key: impl Borrow<K>,
  83. flags: u64,
  84. ) -> Result<(), u32> {
  85. unsafe {
  86. let sk = bpf_map_lookup_elem(
  87. &mut self.def as *mut _ as *mut _,
  88. &key as *const _ as *const c_void,
  89. );
  90. if sk.is_null() {
  91. return Err(1);
  92. }
  93. let ret = bpf_sk_assign(ctx.as_ptr() as *mut _, sk, flags);
  94. bpf_sk_release(sk);
  95. (ret == 0).then_some(()).ok_or(1)
  96. }
  97. }
  98. }