4
0

args.rs 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. #[cfg(any(
  2. bpf_target_arch = "x86_64",
  3. bpf_target_arch = "arm",
  4. bpf_target_arch = "powerpc64",
  5. bpf_target_arch = "mips",
  6. ))]
  7. use crate::bindings::pt_regs;
  8. // aarch64 uses user_pt_regs instead of pt_regs
  9. #[cfg(any(bpf_target_arch = "aarch64", bpf_target_arch = "s390x"))]
  10. use crate::bindings::user_pt_regs as pt_regs;
  11. // riscv64 uses user_regs_struct instead of pt_regs
  12. #[cfg(bpf_target_arch = "riscv64")]
  13. use crate::bindings::user_regs_struct as pt_regs;
  14. use crate::{bindings::bpf_raw_tracepoint_args, cty::c_void, helpers::bpf_probe_read};
  15. /// A trait that indicates a valid type for an argument which can be coerced from a BTF
  16. /// context.
  17. ///
  18. /// Users should not implement this trait.
  19. ///
  20. /// SAFETY: This trait is _only_ safe to implement on primitive types that can fit into
  21. /// a `u64`. For example, integers and raw pointers may be coerced from a BTF context.
  22. pub unsafe trait FromBtfArgument: Sized {
  23. /// Coerces a `T` from the `n`th argument from a BTF context where `n` starts
  24. /// at 0 and increases by 1 for each successive argument.
  25. ///
  26. /// SAFETY: This function is deeply unsafe, as we are reading raw pointers into kernel
  27. /// memory. In particular, the value of `n` must not exceed the number of function
  28. /// arguments. Moreover, `ctx` must be a valid pointer to a BTF context, and `T` must
  29. /// be the right type for the given argument.
  30. unsafe fn from_argument(ctx: *const c_void, n: usize) -> Self;
  31. }
  32. unsafe impl<T> FromBtfArgument for *const T {
  33. unsafe fn from_argument(ctx: *const c_void, n: usize) -> *const T {
  34. // BTF arguments are exposed as an array of `usize` where `usize` can
  35. // either be treated as a pointer or a primitive type
  36. *(ctx as *const usize).add(n) as _
  37. }
  38. }
  39. /// Helper macro to implement [`FromBtfArgument`] for a primitive type.
  40. macro_rules! unsafe_impl_from_btf_argument {
  41. ($type:ident) => {
  42. unsafe impl FromBtfArgument for $type {
  43. unsafe fn from_argument(ctx: *const c_void, n: usize) -> Self {
  44. // BTF arguments are exposed as an array of `usize` where `usize` can
  45. // either be treated as a pointer or a primitive type
  46. *(ctx as *const usize).add(n) as _
  47. }
  48. }
  49. };
  50. }
  51. unsafe_impl_from_btf_argument!(u8);
  52. unsafe_impl_from_btf_argument!(u16);
  53. unsafe_impl_from_btf_argument!(u32);
  54. unsafe_impl_from_btf_argument!(u64);
  55. unsafe_impl_from_btf_argument!(i8);
  56. unsafe_impl_from_btf_argument!(i16);
  57. unsafe_impl_from_btf_argument!(i32);
  58. unsafe_impl_from_btf_argument!(i64);
  59. unsafe_impl_from_btf_argument!(usize);
  60. unsafe_impl_from_btf_argument!(isize);
  61. pub struct PtRegs {
  62. regs: *mut pt_regs,
  63. }
  64. /// A portable wrapper around pt_regs, user_pt_regs and user_regs_struct.
  65. impl PtRegs {
  66. pub fn new(regs: *mut pt_regs) -> Self {
  67. PtRegs { regs }
  68. }
  69. /// Returns the value of the register used to pass arg `n`.
  70. pub fn arg<T: FromPtRegs>(&self, n: usize) -> Option<T> {
  71. T::from_argument(unsafe { &*self.regs }, n)
  72. }
  73. /// Returns the value of the register used to pass the return value.
  74. pub fn ret<T: FromPtRegs>(&self) -> Option<T> {
  75. T::from_retval(unsafe { &*self.regs })
  76. }
  77. /// Returns a pointer to the wrapped value.
  78. pub fn as_ptr(&self) -> *mut pt_regs {
  79. self.regs
  80. }
  81. }
  82. /// A trait that indicates a valid type for an argument which can be coerced from
  83. /// a pt_regs context.
  84. ///
  85. /// Any implementation of this trait is strictly architecture-specific and depends on the
  86. /// layout of the underlying pt_regs struct and the target processor's calling
  87. /// conventions. Users should not implement this trait.
  88. pub trait FromPtRegs: Sized {
  89. /// Coerces a `T` from the `n`th argument of a pt_regs context where `n` starts
  90. /// at 0 and increases by 1 for each successive argument.
  91. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self>;
  92. /// Coerces a `T` from the return value of a pt_regs context.
  93. fn from_retval(ctx: &pt_regs) -> Option<Self>;
  94. }
  95. #[cfg(bpf_target_arch = "x86_64")]
  96. impl<T> FromPtRegs for *const T {
  97. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  98. match n {
  99. 0 => unsafe { bpf_probe_read(&ctx.rdi).map(|v| v as *const _).ok() },
  100. 1 => unsafe { bpf_probe_read(&ctx.rsi).map(|v| v as *const _).ok() },
  101. 2 => unsafe { bpf_probe_read(&ctx.rdx).map(|v| v as *const _).ok() },
  102. 3 => unsafe { bpf_probe_read(&ctx.rcx).map(|v| v as *const _).ok() },
  103. 4 => unsafe { bpf_probe_read(&ctx.r8).map(|v| v as *const _).ok() },
  104. 5 => unsafe { bpf_probe_read(&ctx.r9).map(|v| v as *const _).ok() },
  105. _ => None,
  106. }
  107. }
  108. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  109. unsafe { bpf_probe_read(&ctx.rax).map(|v| v as *const _).ok() }
  110. }
  111. }
  112. #[cfg(bpf_target_arch = "arm")]
  113. impl<T> FromPtRegs for *const T {
  114. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  115. if n <= 6 {
  116. unsafe { bpf_probe_read(&ctx.uregs[n]).map(|v| v as *const _).ok() }
  117. } else {
  118. None
  119. }
  120. }
  121. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  122. unsafe { bpf_probe_read(&ctx.uregs[0]).map(|v| v as *const _).ok() }
  123. }
  124. }
  125. #[cfg(bpf_target_arch = "aarch64")]
  126. impl<T> FromPtRegs for *const T {
  127. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  128. if n <= 7 {
  129. unsafe { bpf_probe_read(&ctx.regs[n]).map(|v| v as *const _).ok() }
  130. } else {
  131. None
  132. }
  133. }
  134. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  135. unsafe { bpf_probe_read(&ctx.regs[0]).map(|v| v as *const _).ok() }
  136. }
  137. }
  138. #[cfg(bpf_target_arch = "riscv64")]
  139. impl<T> FromPtRegs for *const T {
  140. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  141. match n {
  142. 0 => unsafe { bpf_probe_read(&ctx.a0).map(|v| v as *const _).ok() },
  143. 1 => unsafe { bpf_probe_read(&ctx.a1).map(|v| v as *const _).ok() },
  144. 2 => unsafe { bpf_probe_read(&ctx.a2).map(|v| v as *const _).ok() },
  145. 3 => unsafe { bpf_probe_read(&ctx.a3).map(|v| v as *const _).ok() },
  146. 4 => unsafe { bpf_probe_read(&ctx.a4).map(|v| v as *const _).ok() },
  147. 5 => unsafe { bpf_probe_read(&ctx.a5).map(|v| v as *const _).ok() },
  148. 6 => unsafe { bpf_probe_read(&ctx.a6).map(|v| v as *const _).ok() },
  149. 7 => unsafe { bpf_probe_read(&ctx.a7).map(|v| v as *const _).ok() },
  150. _ => None,
  151. }
  152. }
  153. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  154. unsafe { bpf_probe_read(&ctx.ra).map(|v| v as *const _).ok() }
  155. }
  156. }
  157. #[cfg(bpf_target_arch = "powerpc64")]
  158. impl<T> FromPtRegs for *const T {
  159. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  160. if n <= 7 {
  161. unsafe { bpf_probe_read(&ctx.gpr[3 + n]).map(|v| v as *const _).ok() }
  162. } else {
  163. None
  164. }
  165. }
  166. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  167. unsafe { bpf_probe_read(&ctx.gpr[3]).map(|v| v as *const _).ok() }
  168. }
  169. }
  170. #[cfg(bpf_target_arch = "s390x")]
  171. impl<T> FromPtRegs for *const T {
  172. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  173. if n <= 4 {
  174. unsafe { bpf_probe_read(&ctx.gprs[2 + n]).map(|v| v as *const _).ok() }
  175. } else {
  176. None
  177. }
  178. }
  179. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  180. unsafe { bpf_probe_read(&ctx.gprs[2]).map(|v| v as *const _).ok() }
  181. }
  182. }
  183. #[cfg(bpf_target_arch = "mips")]
  184. impl<T> FromPtRegs for *const T {
  185. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  186. // Assume N64 ABI like libbpf does.
  187. if n <= 7 {
  188. unsafe { bpf_probe_read(&ctx.regs[n + 4]).map(|v| v as *const _).ok() }
  189. } else {
  190. None
  191. }
  192. }
  193. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  194. unsafe { bpf_probe_read(&ctx.regs[31]).map(|v| v as *const _).ok() }
  195. }
  196. }
  197. #[cfg(bpf_target_arch = "x86_64")]
  198. impl<T> FromPtRegs for *mut T {
  199. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  200. match n {
  201. 0 => unsafe { bpf_probe_read(&ctx.rdi).map(|v| v as *mut _).ok() },
  202. 1 => unsafe { bpf_probe_read(&ctx.rsi).map(|v| v as *mut _).ok() },
  203. 2 => unsafe { bpf_probe_read(&ctx.rdx).map(|v| v as *mut _).ok() },
  204. 3 => unsafe { bpf_probe_read(&ctx.rcx).map(|v| v as *mut _).ok() },
  205. 4 => unsafe { bpf_probe_read(&ctx.r8).map(|v| v as *mut _).ok() },
  206. 5 => unsafe { bpf_probe_read(&ctx.r9).map(|v| v as *mut _).ok() },
  207. _ => None,
  208. }
  209. }
  210. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  211. unsafe { bpf_probe_read(&ctx.rax).map(|v| v as *mut _).ok() }
  212. }
  213. }
  214. #[cfg(bpf_target_arch = "arm")]
  215. impl<T> FromPtRegs for *mut T {
  216. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  217. if n <= 6 {
  218. unsafe { bpf_probe_read(&ctx.uregs[n]).map(|v| v as *mut _).ok() }
  219. } else {
  220. None
  221. }
  222. }
  223. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  224. unsafe { bpf_probe_read(&ctx.uregs[0]).map(|v| v as *mut _).ok() }
  225. }
  226. }
  227. #[cfg(bpf_target_arch = "aarch64")]
  228. impl<T> FromPtRegs for *mut T {
  229. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  230. if n <= 7 {
  231. unsafe { bpf_probe_read(&ctx.regs[n]).map(|v| v as *mut _).ok() }
  232. } else {
  233. None
  234. }
  235. }
  236. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  237. unsafe { bpf_probe_read(&ctx.regs[0]).map(|v| v as *mut _).ok() }
  238. }
  239. }
  240. #[cfg(bpf_target_arch = "riscv64")]
  241. impl<T> FromPtRegs for *mut T {
  242. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  243. match n {
  244. 0 => unsafe { bpf_probe_read(&ctx.a0).map(|v| v as *mut _).ok() },
  245. 1 => unsafe { bpf_probe_read(&ctx.a1).map(|v| v as *mut _).ok() },
  246. 2 => unsafe { bpf_probe_read(&ctx.a2).map(|v| v as *mut _).ok() },
  247. 3 => unsafe { bpf_probe_read(&ctx.a3).map(|v| v as *mut _).ok() },
  248. 4 => unsafe { bpf_probe_read(&ctx.a4).map(|v| v as *mut _).ok() },
  249. 5 => unsafe { bpf_probe_read(&ctx.a5).map(|v| v as *mut _).ok() },
  250. 6 => unsafe { bpf_probe_read(&ctx.a6).map(|v| v as *mut _).ok() },
  251. 7 => unsafe { bpf_probe_read(&ctx.a7).map(|v| v as *mut _).ok() },
  252. _ => None,
  253. }
  254. }
  255. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  256. unsafe { bpf_probe_read(&ctx.ra).map(|v| v as *mut _).ok() }
  257. }
  258. }
  259. #[cfg(bpf_target_arch = "powerpc64")]
  260. impl<T> FromPtRegs for *mut T {
  261. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  262. if n <= 7 {
  263. unsafe { bpf_probe_read(&ctx.gpr[3 + n]).map(|v| v as *mut _).ok() }
  264. } else {
  265. None
  266. }
  267. }
  268. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  269. unsafe { bpf_probe_read(&ctx.gpr[3]).map(|v| v as *mut _).ok() }
  270. }
  271. }
  272. #[cfg(bpf_target_arch = "s390x")]
  273. impl<T> FromPtRegs for *mut T {
  274. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  275. if n <= 4 {
  276. unsafe { bpf_probe_read(&ctx.gprs[2 + n]).map(|v| v as *mut _).ok() }
  277. } else {
  278. None
  279. }
  280. }
  281. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  282. unsafe { bpf_probe_read(&ctx.gprs[2]).map(|v| v as *mut _).ok() }
  283. }
  284. }
  285. #[cfg(bpf_target_arch = "mips")]
  286. impl<T> FromPtRegs for *mut T {
  287. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  288. // Assume N64 ABI like libbpf does.
  289. if n <= 7 {
  290. unsafe { bpf_probe_read(&ctx.regs[n + 4]).map(|v| v as *mut _).ok() }
  291. } else {
  292. None
  293. }
  294. }
  295. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  296. unsafe { bpf_probe_read(&ctx.regs[31]).map(|v| v as *mut _).ok() }
  297. }
  298. }
  299. /// Helper macro to implement [`FromPtRegs`] for a primitive type.
  300. macro_rules! impl_from_pt_regs {
  301. ($type:ident) => {
  302. #[cfg(bpf_target_arch = "x86_64")]
  303. impl FromPtRegs for $type {
  304. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  305. match n {
  306. 0 => Some(ctx.rdi as *const $type as _),
  307. 1 => Some(ctx.rsi as *const $type as _),
  308. 2 => Some(ctx.rdx as *const $type as _),
  309. 3 => Some(ctx.rcx as *const $type as _),
  310. 4 => Some(ctx.r8 as *const $type as _),
  311. 5 => Some(ctx.r9 as *const $type as _),
  312. _ => None,
  313. }
  314. }
  315. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  316. Some(ctx.rax as *const $type as _)
  317. }
  318. }
  319. #[cfg(bpf_target_arch = "arm")]
  320. impl FromPtRegs for $type {
  321. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  322. if n <= 6 {
  323. Some(ctx.uregs[n] as *const $type as _)
  324. } else {
  325. None
  326. }
  327. }
  328. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  329. Some(ctx.uregs[0] as *const $type as _)
  330. }
  331. }
  332. #[cfg(bpf_target_arch = "aarch64")]
  333. impl FromPtRegs for $type {
  334. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  335. if n <= 7 {
  336. Some(ctx.regs[n] as *const $type as _)
  337. } else {
  338. None
  339. }
  340. }
  341. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  342. Some(ctx.regs[0] as *const $type as _)
  343. }
  344. }
  345. #[cfg(bpf_target_arch = "riscv64")]
  346. impl FromPtRegs for $type {
  347. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  348. match n {
  349. 0 => Some(ctx.a0 as *const $type as _),
  350. 1 => Some(ctx.a1 as *const $type as _),
  351. 2 => Some(ctx.a2 as *const $type as _),
  352. 3 => Some(ctx.a3 as *const $type as _),
  353. 4 => Some(ctx.a4 as *const $type as _),
  354. 5 => Some(ctx.a5 as *const $type as _),
  355. 6 => Some(ctx.a6 as *const $type as _),
  356. 7 => Some(ctx.a7 as *const $type as _),
  357. _ => None,
  358. }
  359. }
  360. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  361. Some(ctx.ra as *const $type as _)
  362. }
  363. }
  364. #[cfg(bpf_target_arch = "powerpc64")]
  365. impl FromPtRegs for $type {
  366. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  367. if n <= 7 {
  368. Some(ctx.gpr[3 + n] as *const $type as _)
  369. } else {
  370. None
  371. }
  372. }
  373. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  374. Some(ctx.gpr[3] as *const $type as _)
  375. }
  376. }
  377. #[cfg(bpf_target_arch = "s390x")]
  378. impl FromPtRegs for $type {
  379. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  380. if n <= 4 {
  381. Some(ctx.gprs[2 + n] as *const $type as _)
  382. } else {
  383. None
  384. }
  385. }
  386. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  387. Some(ctx.gprs[2] as *const $type as _)
  388. }
  389. }
  390. #[cfg(bpf_target_arch = "mips")]
  391. impl FromPtRegs for $type {
  392. fn from_argument(ctx: &pt_regs, n: usize) -> Option<Self> {
  393. if n <= 7 {
  394. Some(ctx.regs[n + 4] as *const $type as _)
  395. } else {
  396. None
  397. }
  398. }
  399. fn from_retval(ctx: &pt_regs) -> Option<Self> {
  400. Some(ctx.regs[31] as *const $type as _)
  401. }
  402. }
  403. };
  404. }
  405. impl_from_pt_regs!(u8);
  406. impl_from_pt_regs!(u16);
  407. impl_from_pt_regs!(u32);
  408. impl_from_pt_regs!(u64);
  409. impl_from_pt_regs!(i8);
  410. impl_from_pt_regs!(i16);
  411. impl_from_pt_regs!(i32);
  412. impl_from_pt_regs!(i64);
  413. impl_from_pt_regs!(usize);
  414. impl_from_pt_regs!(isize);
  415. /// A Rust wrapper on `bpf_raw_tracepoint_args`.
  416. pub struct RawTracepointArgs {
  417. args: *mut bpf_raw_tracepoint_args,
  418. }
  419. impl RawTracepointArgs {
  420. /// Creates a new instance of `RawTracepointArgs` from the given
  421. /// `bpf_raw_tracepoint_args` raw pointer to allow easier access
  422. /// to raw tracepoint argumetns.
  423. pub fn new(args: *mut bpf_raw_tracepoint_args) -> Self {
  424. RawTracepointArgs { args }
  425. }
  426. /// Returns the n-th argument of the raw tracepoint.
  427. ///
  428. /// ## Safety
  429. ///
  430. /// This method is unsafe because it performs raw pointer conversion and makes assumptions
  431. /// about the structure of the `bpf_raw_tracepoint_args` type. The tracepoint arguments are
  432. /// represented as an array of `__u64` values. To be precise, the wrapped
  433. /// `bpf_raw_tracepoint_args` binding defines it as `__IncompleteArrayField<__u64>` and the
  434. /// original C type as `__u64 args[0]`. This method provides a way to access these arguments
  435. /// conveniently in Rust using `__IncompleteArrayField<T>::as_slice` to represent that array
  436. /// as a slice of length n and then retrieve the n-th element of it.
  437. ///
  438. /// However, the method does not check the total number of available arguments for a given
  439. /// tracepoint and assumes that the slice has at least `n` elements, leading to undefined
  440. /// behavior if this condition is not met. Such check is impossible to do, because the
  441. /// tracepoint context doesn't contain any information about number of arguments.
  442. ///
  443. /// This method also cannot guarantee that the requested type matches the actual value type.
  444. /// Wrong assumptions about types can lead to undefined behavior. The tracepoint context
  445. /// doesn't provide any type information.
  446. ///
  447. /// The caller is responsible for ensuring they have accurate knowledge of the arguments
  448. /// and their respective types for the accessed tracepoint context.
  449. pub unsafe fn arg<T: FromRawTracepointArgs>(&self, n: usize) -> *const T {
  450. &T::from_argument(&*self.args, n)
  451. }
  452. }
  453. pub unsafe trait FromRawTracepointArgs: Sized {
  454. /// Returns the n-th argument of the raw tracepoint.
  455. ///
  456. /// ## Safety
  457. ///
  458. /// This method is unsafe because it performs raw pointer conversion and makes assumptions
  459. /// about the structure of the `bpf_raw_tracepoint_args` type. The tracepoint arguments are
  460. /// represented as an array of `__u64` values. To be precise, the wrapped
  461. /// `bpf_raw_tracepoint_args` binding defines it as `__IncompleteArrayField<__u64>` and the
  462. /// original C type as `__u64 args[0]`. This method provides a way to access these arguments
  463. /// conveniently in Rust using `__IncompleteArrayField<T>::as_slice` to represent that array
  464. /// as a slice of length n and then retrieve the n-th element of it.
  465. ///
  466. /// However, the method does not check the total number of available arguments for a given
  467. /// tracepoint and assumes that the slice has at least `n` elements, leading to undefined
  468. /// behavior if this condition is not met. Such check is impossible to do, because the
  469. /// tracepoint context doesn't contain any information about number of arguments.
  470. ///
  471. /// This method also cannot guarantee that the requested type matches the actual value type.
  472. /// Wrong assumptions about types can lead to undefined behavior. The tracepoint context
  473. /// doesn't provide any type information.
  474. ///
  475. /// The caller is responsible for ensuring they have accurate knowledge of the arguments
  476. /// and their respective types for the accessed tracepoint context.
  477. unsafe fn from_argument(ctx: &bpf_raw_tracepoint_args, n: usize) -> Self;
  478. }
  479. unsafe impl<T> FromRawTracepointArgs for *const T {
  480. unsafe fn from_argument(ctx: &bpf_raw_tracepoint_args, n: usize) -> *const T {
  481. // Raw tracepoint arguments are exposed as `__u64 args[0]`.
  482. // https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/bpf.h#L6829
  483. // They are represented as `__IncompleteArrayField<T>` in the Rust
  484. // wraapper.
  485. //
  486. // The most convenient way of accessing such type in Rust is to use
  487. // `__IncompleteArrayField<T>::as_slice` to represent that array as a
  488. // slice of length n and then retrieve the n-th element of it.
  489. //
  490. // We don't know how many arguments are there for the given tracepoint,
  491. // so we just assume that the slice has at least n elements. The whole
  492. // assumntion and implementation is unsafe.
  493. ctx.args.as_slice(n + 1)[n] as *const _
  494. }
  495. }
  496. macro_rules! unsafe_impl_from_raw_tracepoint_args {
  497. ($type:ident) => {
  498. unsafe impl FromRawTracepointArgs for $type {
  499. unsafe fn from_argument(ctx: &bpf_raw_tracepoint_args, n: usize) -> Self {
  500. ctx.args.as_slice(n + 1)[n] as _
  501. }
  502. }
  503. };
  504. }
  505. unsafe_impl_from_raw_tracepoint_args!(u8);
  506. unsafe_impl_from_raw_tracepoint_args!(u16);
  507. unsafe_impl_from_raw_tracepoint_args!(u32);
  508. unsafe_impl_from_raw_tracepoint_args!(u64);
  509. unsafe_impl_from_raw_tracepoint_args!(i8);
  510. unsafe_impl_from_raw_tracepoint_args!(i16);
  511. unsafe_impl_from_raw_tracepoint_args!(i32);
  512. unsafe_impl_from_raw_tracepoint_args!(i64);
  513. unsafe_impl_from_raw_tracepoint_args!(usize);
  514. unsafe_impl_from_raw_tracepoint_args!(isize);