hash_map.rs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
  2. use aya_ebpf_bindings::bindings::bpf_map_type::{
  3. BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
  4. };
  5. use aya_ebpf_cty::{c_long, c_void};
  6. use crate::{
  7. bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_HASH},
  8. helpers::{bpf_map_delete_elem, bpf_map_lookup_elem, bpf_map_update_elem},
  9. maps::PinningType,
  10. };
  11. #[repr(transparent)]
  12. pub struct HashMap<K, V> {
  13. def: UnsafeCell<bpf_map_def>,
  14. _k: PhantomData<K>,
  15. _v: PhantomData<V>,
  16. }
  17. unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
  18. impl<K, V> HashMap<K, V> {
  19. pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
  20. HashMap {
  21. def: UnsafeCell::new(build_def::<K, V>(
  22. BPF_MAP_TYPE_HASH,
  23. max_entries,
  24. flags,
  25. PinningType::None,
  26. )),
  27. _k: PhantomData,
  28. _v: PhantomData,
  29. }
  30. }
  31. pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
  32. HashMap {
  33. def: UnsafeCell::new(build_def::<K, V>(
  34. BPF_MAP_TYPE_HASH,
  35. max_entries,
  36. flags,
  37. PinningType::ByName,
  38. )),
  39. _k: PhantomData,
  40. _v: PhantomData,
  41. }
  42. }
  43. /// Retrieve the value associate with `key` from the map.
  44. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
  45. /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
  46. /// map might get aliased by another element in the map, causing garbage to be read, or
  47. /// corruption in case of writes.
  48. #[inline]
  49. pub unsafe fn get(&self, key: &K) -> Option<&V> {
  50. get(self.def.get(), key)
  51. }
  52. /// Retrieve the value associate with `key` from the map.
  53. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
  54. /// to decide whether it's safe to dereference the pointer or not.
  55. #[inline]
  56. pub fn get_ptr(&self, key: &K) -> Option<*const V> {
  57. get_ptr(self.def.get(), key)
  58. }
  59. /// Retrieve the value associate with `key` from the map.
  60. /// The same caveat as `get` applies, and additionally cares should be taken to avoid
  61. /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
  62. /// pointer or not.
  63. #[inline]
  64. pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
  65. get_ptr_mut(self.def.get(), key)
  66. }
  67. #[inline]
  68. pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
  69. insert(self.def.get(), key, value, flags)
  70. }
  71. #[inline]
  72. pub fn remove(&self, key: &K) -> Result<(), c_long> {
  73. remove(self.def.get(), key)
  74. }
  75. }
  76. #[repr(transparent)]
  77. pub struct LruHashMap<K, V> {
  78. def: UnsafeCell<bpf_map_def>,
  79. _k: PhantomData<K>,
  80. _v: PhantomData<V>,
  81. }
  82. unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
  83. impl<K, V> LruHashMap<K, V> {
  84. pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
  85. LruHashMap {
  86. def: UnsafeCell::new(build_def::<K, V>(
  87. BPF_MAP_TYPE_LRU_HASH,
  88. max_entries,
  89. flags,
  90. PinningType::None,
  91. )),
  92. _k: PhantomData,
  93. _v: PhantomData,
  94. }
  95. }
  96. pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
  97. LruHashMap {
  98. def: UnsafeCell::new(build_def::<K, V>(
  99. BPF_MAP_TYPE_LRU_HASH,
  100. max_entries,
  101. flags,
  102. PinningType::ByName,
  103. )),
  104. _k: PhantomData,
  105. _v: PhantomData,
  106. }
  107. }
  108. /// Retrieve the value associate with `key` from the map.
  109. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
  110. /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
  111. /// map might get aliased by another element in the map, causing garbage to be read, or
  112. /// corruption in case of writes.
  113. #[inline]
  114. pub unsafe fn get(&self, key: &K) -> Option<&V> {
  115. get(self.def.get(), key)
  116. }
  117. /// Retrieve the value associate with `key` from the map.
  118. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
  119. /// to decide whether it's safe to dereference the pointer or not.
  120. #[inline]
  121. pub fn get_ptr(&self, key: &K) -> Option<*const V> {
  122. get_ptr(self.def.get(), key)
  123. }
  124. /// Retrieve the value associate with `key` from the map.
  125. /// The same caveat as `get` applies, and additionally cares should be taken to avoid
  126. /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
  127. /// pointer or not.
  128. #[inline]
  129. pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
  130. get_ptr_mut(self.def.get(), key)
  131. }
  132. #[inline]
  133. pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
  134. insert(self.def.get(), key, value, flags)
  135. }
  136. #[inline]
  137. pub fn remove(&self, key: &K) -> Result<(), c_long> {
  138. remove(self.def.get(), key)
  139. }
  140. }
  141. #[repr(transparent)]
  142. pub struct PerCpuHashMap<K, V> {
  143. def: UnsafeCell<bpf_map_def>,
  144. _k: PhantomData<K>,
  145. _v: PhantomData<V>,
  146. }
  147. unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
  148. impl<K, V> PerCpuHashMap<K, V> {
  149. pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
  150. PerCpuHashMap {
  151. def: UnsafeCell::new(build_def::<K, V>(
  152. BPF_MAP_TYPE_PERCPU_HASH,
  153. max_entries,
  154. flags,
  155. PinningType::None,
  156. )),
  157. _k: PhantomData,
  158. _v: PhantomData,
  159. }
  160. }
  161. pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
  162. PerCpuHashMap {
  163. def: UnsafeCell::new(build_def::<K, V>(
  164. BPF_MAP_TYPE_PERCPU_HASH,
  165. max_entries,
  166. flags,
  167. PinningType::ByName,
  168. )),
  169. _k: PhantomData,
  170. _v: PhantomData,
  171. }
  172. }
  173. /// Retrieve the value associate with `key` from the map.
  174. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
  175. /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
  176. /// map might get aliased by another element in the map, causing garbage to be read, or
  177. /// corruption in case of writes.
  178. #[inline]
  179. pub unsafe fn get(&self, key: &K) -> Option<&V> {
  180. get(self.def.get(), key)
  181. }
  182. /// Retrieve the value associate with `key` from the map.
  183. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
  184. /// to decide whether it's safe to dereference the pointer or not.
  185. #[inline]
  186. pub fn get_ptr(&self, key: &K) -> Option<*const V> {
  187. get_ptr(self.def.get(), key)
  188. }
  189. /// Retrieve the value associate with `key` from the map.
  190. /// The same caveat as `get` applies, and additionally cares should be taken to avoid
  191. /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
  192. /// pointer or not.
  193. #[inline]
  194. pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
  195. get_ptr_mut(self.def.get(), key)
  196. }
  197. #[inline]
  198. pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
  199. insert(self.def.get(), key, value, flags)
  200. }
  201. #[inline]
  202. pub fn remove(&self, key: &K) -> Result<(), c_long> {
  203. remove(self.def.get(), key)
  204. }
  205. }
  206. #[repr(transparent)]
  207. pub struct LruPerCpuHashMap<K, V> {
  208. def: UnsafeCell<bpf_map_def>,
  209. _k: PhantomData<K>,
  210. _v: PhantomData<V>,
  211. }
  212. unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
  213. impl<K, V> LruPerCpuHashMap<K, V> {
  214. pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
  215. LruPerCpuHashMap {
  216. def: UnsafeCell::new(build_def::<K, V>(
  217. BPF_MAP_TYPE_LRU_PERCPU_HASH,
  218. max_entries,
  219. flags,
  220. PinningType::None,
  221. )),
  222. _k: PhantomData,
  223. _v: PhantomData,
  224. }
  225. }
  226. pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
  227. LruPerCpuHashMap {
  228. def: UnsafeCell::new(build_def::<K, V>(
  229. BPF_MAP_TYPE_LRU_PERCPU_HASH,
  230. max_entries,
  231. flags,
  232. PinningType::ByName,
  233. )),
  234. _k: PhantomData,
  235. _v: PhantomData,
  236. }
  237. }
  238. /// Retrieve the value associate with `key` from the map.
  239. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
  240. /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
  241. /// map might get aliased by another element in the map, causing garbage to be read, or
  242. /// corruption in case of writes.
  243. #[inline]
  244. pub unsafe fn get(&self, key: &K) -> Option<&V> {
  245. get(self.def.get(), key)
  246. }
  247. /// Retrieve the value associate with `key` from the map.
  248. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
  249. /// to decide whether it's safe to dereference the pointer or not.
  250. #[inline]
  251. pub fn get_ptr(&self, key: &K) -> Option<*const V> {
  252. get_ptr(self.def.get(), key)
  253. }
  254. /// Retrieve the value associate with `key` from the map.
  255. /// The same caveat as `get` applies, and additionally cares should be taken to avoid
  256. /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
  257. /// pointer or not.
  258. #[inline]
  259. pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
  260. get_ptr_mut(self.def.get(), key)
  261. }
  262. #[inline]
  263. pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
  264. insert(self.def.get(), key, value, flags)
  265. }
  266. #[inline]
  267. pub fn remove(&self, key: &K) -> Result<(), c_long> {
  268. remove(self.def.get(), key)
  269. }
  270. }
  271. const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def {
  272. bpf_map_def {
  273. type_: ty,
  274. key_size: mem::size_of::<K>() as u32,
  275. value_size: mem::size_of::<V>() as u32,
  276. max_entries,
  277. map_flags: flags,
  278. id: 0,
  279. pinning: pin as u32,
  280. }
  281. }
  282. #[inline]
  283. fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
  284. unsafe {
  285. let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void);
  286. // FIXME: alignment
  287. NonNull::new(value as *mut V).map(|p| p.as_ptr())
  288. }
  289. }
  290. #[inline]
  291. fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> {
  292. get_ptr_mut(def, key).map(|p| p as *const V)
  293. }
  294. #[inline]
  295. unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> {
  296. get_ptr(def, key).map(|p| &*p)
  297. }
  298. #[inline]
  299. fn insert<K, V>(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
  300. let ret = unsafe {
  301. bpf_map_update_elem(
  302. def as *mut _,
  303. key as *const _ as *const _,
  304. value as *const _ as *const _,
  305. flags,
  306. )
  307. };
  308. (ret == 0).then_some(()).ok_or(ret)
  309. }
  310. #[inline]
  311. fn remove<K>(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> {
  312. let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) };
  313. (ret == 0).then_some(()).ok_or(ret)
  314. }