sk_buff.rs 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. use core::{
  2. ffi::c_void,
  3. mem::{self, MaybeUninit},
  4. };
  5. use aya_bpf_bindings::helpers::{
  6. bpf_clone_redirect, bpf_get_socket_uid, bpf_l3_csum_replace, bpf_l4_csum_replace,
  7. bpf_skb_adjust_room, bpf_skb_change_type, bpf_skb_load_bytes, bpf_skb_store_bytes,
  8. };
  9. use aya_bpf_cty::c_long;
  10. use crate::{bindings::__sk_buff, BpfContext};
  11. pub struct SkBuffContext {
  12. pub skb: *mut __sk_buff,
  13. }
  14. impl SkBuffContext {
  15. pub fn new(skb: *mut __sk_buff) -> SkBuffContext {
  16. SkBuffContext { skb }
  17. }
  18. #[allow(clippy::len_without_is_empty)]
  19. #[inline]
  20. pub fn len(&self) -> u32 {
  21. unsafe { *self.skb }.len
  22. }
  23. #[inline]
  24. pub fn set_mark(&mut self, mark: u32) {
  25. unsafe { *self.skb }.mark = mark;
  26. }
  27. #[inline]
  28. pub fn cb(&self) -> &[u32] {
  29. unsafe { &(*self.skb).cb }
  30. }
  31. #[inline]
  32. pub fn cb_mut(&mut self) -> &mut [u32] {
  33. unsafe { &mut (*self.skb).cb }
  34. }
  35. /// Returns the owner UID of the socket associated to the SKB context.
  36. #[inline]
  37. pub fn get_socket_uid(&self) -> u32 {
  38. unsafe { bpf_get_socket_uid(self.skb) }
  39. }
  40. #[inline]
  41. pub fn load<T>(&self, offset: usize) -> Result<T, c_long> {
  42. unsafe {
  43. let mut data = MaybeUninit::<T>::uninit();
  44. let ret = bpf_skb_load_bytes(
  45. self.skb as *const _,
  46. offset as u32,
  47. &mut data as *mut _ as *mut _,
  48. mem::size_of::<T>() as u32,
  49. );
  50. if ret == 0 {
  51. Ok(data.assume_init())
  52. } else {
  53. Err(ret)
  54. }
  55. }
  56. }
  57. #[inline]
  58. pub fn store<T>(&mut self, offset: usize, v: &T, flags: u64) -> Result<(), c_long> {
  59. unsafe {
  60. let ret = bpf_skb_store_bytes(
  61. self.skb as *mut _,
  62. offset as u32,
  63. v as *const _ as *const _,
  64. mem::size_of::<T>() as u32,
  65. flags,
  66. );
  67. if ret == 0 {
  68. Ok(())
  69. } else {
  70. Err(ret)
  71. }
  72. }
  73. }
  74. #[inline]
  75. pub fn l3_csum_replace(
  76. &self,
  77. offset: usize,
  78. from: u64,
  79. to: u64,
  80. size: u64,
  81. ) -> Result<(), c_long> {
  82. unsafe {
  83. let ret = bpf_l3_csum_replace(self.skb as *mut _, offset as u32, from, to, size);
  84. if ret == 0 {
  85. Ok(())
  86. } else {
  87. Err(ret)
  88. }
  89. }
  90. }
  91. #[inline]
  92. pub fn l4_csum_replace(
  93. &self,
  94. offset: usize,
  95. from: u64,
  96. to: u64,
  97. flags: u64,
  98. ) -> Result<(), c_long> {
  99. unsafe {
  100. let ret = bpf_l4_csum_replace(self.skb as *mut _, offset as u32, from, to, flags);
  101. if ret == 0 {
  102. Ok(())
  103. } else {
  104. Err(ret)
  105. }
  106. }
  107. }
  108. #[inline]
  109. pub fn adjust_room(&self, len_diff: i32, mode: u32, flags: u64) -> Result<(), c_long> {
  110. let ret = unsafe { bpf_skb_adjust_room(self.as_ptr() as *mut _, len_diff, mode, flags) };
  111. if ret == 0 {
  112. Ok(())
  113. } else {
  114. Err(ret)
  115. }
  116. }
  117. #[inline]
  118. pub fn clone_redirect(&self, if_index: u32, flags: u64) -> Result<(), c_long> {
  119. let ret = unsafe { bpf_clone_redirect(self.as_ptr() as *mut _, if_index, flags) };
  120. if ret == 0 {
  121. Ok(())
  122. } else {
  123. Err(ret)
  124. }
  125. }
  126. #[inline]
  127. pub fn change_type(&self, ty: u32) -> Result<(), c_long> {
  128. let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) };
  129. if ret == 0 {
  130. Ok(())
  131. } else {
  132. Err(ret)
  133. }
  134. }
  135. }
  136. impl BpfContext for SkBuffContext {
  137. fn as_ptr(&self) -> *mut c_void {
  138. self.skb as *mut _
  139. }
  140. }