sock_map.rs 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. use core::{cell::UnsafeCell, mem};
  2. use aya_bpf_cty::c_void;
  3. use crate::{
  4. bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_SOCKMAP, bpf_sock_ops},
  5. helpers::{
  6. bpf_map_lookup_elem, bpf_msg_redirect_map, bpf_sk_assign, bpf_sk_redirect_map,
  7. bpf_sk_release, bpf_sock_map_update,
  8. },
  9. maps::PinningType,
  10. programs::{SkBuffContext, SkLookupContext, SkMsgContext},
  11. BpfContext,
  12. };
  13. #[repr(transparent)]
  14. pub struct SockMap {
  15. def: UnsafeCell<bpf_map_def>,
  16. }
  17. unsafe impl Sync for SockMap {}
  18. impl SockMap {
  19. pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap {
  20. SockMap {
  21. def: UnsafeCell::new(bpf_map_def {
  22. type_: BPF_MAP_TYPE_SOCKMAP,
  23. key_size: mem::size_of::<u32>() as u32,
  24. value_size: mem::size_of::<u32>() as u32,
  25. max_entries,
  26. map_flags: flags,
  27. id: 0,
  28. pinning: PinningType::None as u32,
  29. }),
  30. }
  31. }
  32. pub const fn pinned(max_entries: u32, flags: u32) -> SockMap {
  33. SockMap {
  34. def: UnsafeCell::new(bpf_map_def {
  35. type_: BPF_MAP_TYPE_SOCKMAP,
  36. key_size: mem::size_of::<u32>() as u32,
  37. value_size: mem::size_of::<u32>() as u32,
  38. max_entries,
  39. map_flags: flags,
  40. id: 0,
  41. pinning: PinningType::ByName as u32,
  42. }),
  43. }
  44. }
  45. pub unsafe fn update(
  46. &self,
  47. mut index: u32,
  48. sk_ops: *mut bpf_sock_ops,
  49. flags: u64,
  50. ) -> Result<(), i64> {
  51. let ret = bpf_sock_map_update(
  52. sk_ops,
  53. self.def.get() as *mut _,
  54. &mut index as *mut _ as *mut c_void,
  55. flags,
  56. );
  57. if ret == 0 {
  58. Ok(())
  59. } else {
  60. Err(ret)
  61. }
  62. }
  63. pub unsafe fn redirect_msg(&self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 {
  64. bpf_msg_redirect_map(
  65. ctx.as_ptr() as *mut _,
  66. self.def.get() as *mut _,
  67. index,
  68. flags,
  69. )
  70. }
  71. pub unsafe fn redirect_skb(&self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 {
  72. bpf_sk_redirect_map(
  73. ctx.as_ptr() as *mut _,
  74. self.def.get() as *mut _,
  75. index,
  76. flags,
  77. )
  78. }
  79. pub fn redirect_sk_lookup(
  80. &mut self,
  81. ctx: &SkLookupContext,
  82. index: u32,
  83. flags: u64,
  84. ) -> Result<(), u32> {
  85. unsafe {
  86. let sk = bpf_map_lookup_elem(
  87. &mut self.def as *mut _ as *mut _,
  88. &index as *const _ as *const c_void,
  89. );
  90. if sk.is_null() {
  91. return Err(1);
  92. }
  93. let ret = bpf_sk_assign(ctx.as_ptr() as *mut _, sk, flags);
  94. bpf_sk_release(sk);
  95. (ret == 0).then_some(()).ok_or(1)
  96. }
  97. }
  98. }