4
0

mod.rs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. //! eBPF data structures used to exchange data with eBPF programs.
  2. //!
  3. //! The eBPF platform provides data structures - maps in eBPF speak - that can be used by eBPF
  4. //! programs and user-space to exchange data. When you call
  5. //! [`Bpf::load_file`](crate::Bpf::load_file) or [`Bpf::load`](crate::Bpf::load), all the maps
  6. //! defined in the code get initialized and can then be accessed using
  7. //! [`Bpf::map`](crate::Bpf::map) and [`Bpf::map_mut`](crate::Bpf::map_mut).
  8. //!
  9. //! # Concrete map types
  10. //!
  11. //! The eBPF platform provides many map types each supporting different operations.
  12. //! [`Bpf::map`](crate::Bpf::map) and [`Bpf::map_mut`](crate::Bpf::map_mut) always return the
  13. //! opaque [`MapRef`] and [`MapRefMut`] types respectively. Those two types can be converted to
  14. //! *concrete map types* using the [`TryFrom`](std::convert::TryFrom) trait. For example:
  15. //!
  16. //! ```no_run
  17. //! # let bpf = aya::Bpf::load(&[], None)?;
  18. //! use aya::maps::HashMap;
  19. //! use std::convert::TryFrom;
  20. //!
  21. //! const CONFIG_KEY_NUM_RETRIES: u8 = 1;
  22. //!
  23. //! // HashMap::try_from() converts MapRefMut to HashMap. It will fail if CONFIG is not an eBPF
  24. //! // hash map.
  25. //! let mut hm = HashMap::try_from(bpf.map_mut("CONFIG")?)?;
  26. //! hm.insert(CONFIG_KEY_NUM_RETRIES, 3, 0 /* flags */);
  27. //! # Ok::<(), aya::BpfError>(())
  28. //! ```
  29. //!
  30. //! The code above uses `HashMap`, but all the concrete map types implement the
  31. //! `TryFrom` trait.
  32. use std::{convert::TryFrom, ffi::CString, io, mem, ops::Deref, os::unix::io::RawFd, ptr};
  33. use thiserror::Error;
  34. use crate::{
  35. generated::bpf_map_type,
  36. obj,
  37. sys::{bpf_create_map, bpf_map_get_next_key},
  38. util::nr_cpus,
  39. Pod,
  40. };
  41. pub mod array;
  42. pub mod hash_map;
  43. mod map_lock;
  44. pub mod perf;
  45. pub use array::{Array, ProgramArray};
  46. pub use hash_map::{HashMap, PerCpuHashMap};
  47. pub use map_lock::*;
  48. pub use perf::PerfEventArray;
  49. #[derive(Error, Debug)]
  50. pub enum MapError {
  51. #[error("map `{name}` not found ")]
  52. MapNotFound { name: String },
  53. #[error("invalid map type {map_type}")]
  54. InvalidMapType { map_type: u32 },
  55. #[error("invalid map name `{name}`")]
  56. InvalidName { name: String },
  57. #[error("the map `{name}` has not been created")]
  58. NotCreated { name: String },
  59. #[error("the map `{name}` has already been created")]
  60. AlreadyCreated { name: String },
  61. #[error("failed to create map `{name}`: {code}")]
  62. CreateError {
  63. name: String,
  64. code: i64,
  65. io_error: io::Error,
  66. },
  67. #[error("invalid key size {size}, expected {expected}")]
  68. InvalidKeySize { size: usize, expected: usize },
  69. #[error("invalid value size {size}, expected {expected}")]
  70. InvalidValueSize { size: usize, expected: usize },
  71. #[error("the index is {index} but `max_entries` is {max_entries}")]
  72. OutOfBounds { index: u32, max_entries: u32 },
  73. #[error("key not found")]
  74. KeyNotFound,
  75. #[error("the program is not loaded")]
  76. ProgramNotLoaded,
  77. #[error("the `{call}` syscall failed with code {code} io_error {io_error}")]
  78. SyscallError {
  79. call: String,
  80. code: i64,
  81. io_error: io::Error,
  82. },
  83. #[error("map `{name}` is borrowed mutably")]
  84. BorrowError { name: String },
  85. #[error("map `{name}` is already borrowed")]
  86. BorrowMutError { name: String },
  87. }
  88. #[derive(Debug)]
  89. pub struct Map {
  90. pub(crate) obj: obj::Map,
  91. pub(crate) fd: Option<RawFd>,
  92. }
  93. impl Map {
  94. pub fn create(&mut self) -> Result<RawFd, MapError> {
  95. let name = self.obj.name.clone();
  96. if self.fd.is_some() {
  97. return Err(MapError::AlreadyCreated { name: name.clone() });
  98. }
  99. let c_name =
  100. CString::new(name.clone()).map_err(|_| MapError::InvalidName { name: name.clone() })?;
  101. let fd = bpf_create_map(&c_name, &self.obj.def).map_err(|(code, io_error)| {
  102. MapError::CreateError {
  103. name,
  104. code,
  105. io_error,
  106. }
  107. })? as RawFd;
  108. self.fd = Some(fd);
  109. Ok(fd)
  110. }
  111. pub fn name(&self) -> &str {
  112. &self.obj.name
  113. }
  114. pub fn map_type(&self) -> Result<bpf_map_type, MapError> {
  115. bpf_map_type::try_from(self.obj.def.map_type)
  116. }
  117. pub(crate) fn fd_or_err(&self) -> Result<RawFd, MapError> {
  118. self.fd.ok_or_else(|| MapError::NotCreated {
  119. name: self.obj.name.clone(),
  120. })
  121. }
  122. }
  123. pub(crate) trait IterableMap<K: Pod, V> {
  124. fn fd(&self) -> Result<RawFd, MapError>;
  125. unsafe fn get(&self, key: &K) -> Result<V, MapError>;
  126. }
  127. /// Iterator returned by `map.keys()`.
  128. pub struct MapKeys<'coll, K: Pod, V> {
  129. map: &'coll dyn IterableMap<K, V>,
  130. err: bool,
  131. key: Option<K>,
  132. }
  133. impl<'coll, K: Pod, V> MapKeys<'coll, K, V> {
  134. fn new(map: &'coll dyn IterableMap<K, V>) -> MapKeys<'coll, K, V> {
  135. MapKeys {
  136. map,
  137. err: false,
  138. key: None,
  139. }
  140. }
  141. }
  142. impl<K: Pod, V> Iterator for MapKeys<'_, K, V> {
  143. type Item = Result<K, MapError>;
  144. fn next(&mut self) -> Option<Result<K, MapError>> {
  145. if self.err {
  146. return None;
  147. }
  148. let fd = match self.map.fd() {
  149. Ok(fd) => fd,
  150. Err(e) => {
  151. self.err = true;
  152. return Some(Err(e));
  153. }
  154. };
  155. match bpf_map_get_next_key(fd, self.key.as_ref()) {
  156. Ok(Some(key)) => {
  157. self.key = Some(key);
  158. return Some(Ok(key));
  159. }
  160. Ok(None) => {
  161. self.key = None;
  162. return None;
  163. }
  164. Err((code, io_error)) => {
  165. self.err = true;
  166. return Some(Err(MapError::SyscallError {
  167. call: "bpf_map_get_next_key".to_owned(),
  168. code,
  169. io_error,
  170. }));
  171. }
  172. }
  173. }
  174. }
  175. /// Iterator returned by `map.iter()`.
  176. pub struct MapIter<'coll, K: Pod, V> {
  177. inner: MapKeys<'coll, K, V>,
  178. }
  179. impl<'coll, K: Pod, V> MapIter<'coll, K, V> {
  180. fn new(map: &'coll dyn IterableMap<K, V>) -> MapIter<'coll, K, V> {
  181. MapIter {
  182. inner: MapKeys::new(map),
  183. }
  184. }
  185. }
  186. impl<K: Pod, V> Iterator for MapIter<'_, K, V> {
  187. type Item = Result<(K, V), MapError>;
  188. fn next(&mut self) -> Option<Self::Item> {
  189. loop {
  190. match self.inner.next() {
  191. Some(Ok(key)) => {
  192. let value = unsafe { self.inner.map.get(&key) };
  193. match value {
  194. Ok(value) => return Some(Ok((key, value))),
  195. Err(MapError::KeyNotFound) => continue,
  196. Err(e) => return Some(Err(e)),
  197. }
  198. }
  199. Some(Err(e)) => return Some(Err(e)),
  200. None => return None,
  201. }
  202. }
  203. }
  204. }
  205. impl TryFrom<u32> for bpf_map_type {
  206. type Error = MapError;
  207. fn try_from(map_type: u32) -> Result<Self, Self::Error> {
  208. use bpf_map_type::*;
  209. Ok(match map_type {
  210. x if x == BPF_MAP_TYPE_UNSPEC as u32 => BPF_MAP_TYPE_UNSPEC,
  211. x if x == BPF_MAP_TYPE_HASH as u32 => BPF_MAP_TYPE_HASH,
  212. x if x == BPF_MAP_TYPE_ARRAY as u32 => BPF_MAP_TYPE_ARRAY,
  213. x if x == BPF_MAP_TYPE_PROG_ARRAY as u32 => BPF_MAP_TYPE_PROG_ARRAY,
  214. x if x == BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32 => BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  215. x if x == BPF_MAP_TYPE_PERCPU_HASH as u32 => BPF_MAP_TYPE_PERCPU_HASH,
  216. x if x == BPF_MAP_TYPE_PERCPU_ARRAY as u32 => BPF_MAP_TYPE_PERCPU_ARRAY,
  217. x if x == BPF_MAP_TYPE_STACK_TRACE as u32 => BPF_MAP_TYPE_STACK_TRACE,
  218. x if x == BPF_MAP_TYPE_CGROUP_ARRAY as u32 => BPF_MAP_TYPE_CGROUP_ARRAY,
  219. x if x == BPF_MAP_TYPE_LRU_HASH as u32 => BPF_MAP_TYPE_LRU_HASH,
  220. x if x == BPF_MAP_TYPE_LRU_PERCPU_HASH as u32 => BPF_MAP_TYPE_LRU_PERCPU_HASH,
  221. x if x == BPF_MAP_TYPE_LPM_TRIE as u32 => BPF_MAP_TYPE_LPM_TRIE,
  222. x if x == BPF_MAP_TYPE_ARRAY_OF_MAPS as u32 => BPF_MAP_TYPE_ARRAY_OF_MAPS,
  223. x if x == BPF_MAP_TYPE_HASH_OF_MAPS as u32 => BPF_MAP_TYPE_HASH_OF_MAPS,
  224. x if x == BPF_MAP_TYPE_DEVMAP as u32 => BPF_MAP_TYPE_DEVMAP,
  225. x if x == BPF_MAP_TYPE_SOCKMAP as u32 => BPF_MAP_TYPE_SOCKMAP,
  226. x if x == BPF_MAP_TYPE_CPUMAP as u32 => BPF_MAP_TYPE_CPUMAP,
  227. x if x == BPF_MAP_TYPE_XSKMAP as u32 => BPF_MAP_TYPE_XSKMAP,
  228. x if x == BPF_MAP_TYPE_SOCKHASH as u32 => BPF_MAP_TYPE_SOCKHASH,
  229. x if x == BPF_MAP_TYPE_CGROUP_STORAGE as u32 => BPF_MAP_TYPE_CGROUP_STORAGE,
  230. x if x == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY as u32 => BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
  231. x if x == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE as u32 => {
  232. BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
  233. }
  234. x if x == BPF_MAP_TYPE_QUEUE as u32 => BPF_MAP_TYPE_QUEUE,
  235. x if x == BPF_MAP_TYPE_STACK as u32 => BPF_MAP_TYPE_STACK,
  236. x if x == BPF_MAP_TYPE_SK_STORAGE as u32 => BPF_MAP_TYPE_SK_STORAGE,
  237. x if x == BPF_MAP_TYPE_DEVMAP_HASH as u32 => BPF_MAP_TYPE_DEVMAP_HASH,
  238. x if x == BPF_MAP_TYPE_STRUCT_OPS as u32 => BPF_MAP_TYPE_STRUCT_OPS,
  239. x if x == BPF_MAP_TYPE_RINGBUF as u32 => BPF_MAP_TYPE_RINGBUF,
  240. x if x == BPF_MAP_TYPE_INODE_STORAGE as u32 => BPF_MAP_TYPE_INODE_STORAGE,
  241. x if x == BPF_MAP_TYPE_TASK_STORAGE as u32 => BPF_MAP_TYPE_TASK_STORAGE,
  242. _ => return Err(MapError::InvalidMapType { map_type }),
  243. })
  244. }
  245. }
  246. pub struct PerCpuKernelMem {
  247. bytes: Vec<u8>,
  248. }
  249. impl PerCpuKernelMem {
  250. pub(crate) fn as_ptr(&self) -> *const u8 {
  251. self.bytes.as_ptr()
  252. }
  253. pub(crate) fn as_mut_ptr(&mut self) -> *mut u8 {
  254. self.bytes.as_mut_ptr()
  255. }
  256. }
  257. /// A slice of per-CPU values.
  258. ///
  259. /// Used by maps that implement per-CPU storage like [`PerCpuHashMap`].
  260. ///
  261. /// # Example
  262. ///
  263. /// ```no_run
  264. /// # #[derive(thiserror::Error, Debug)]
  265. /// # enum Error {
  266. /// # #[error(transparent)]
  267. /// # IO(#[from] std::io::Error),
  268. /// # #[error(transparent)]
  269. /// # Map(#[from] aya::maps::MapError),
  270. /// # #[error(transparent)]
  271. /// # Bpf(#[from] aya::BpfError)
  272. /// # }
  273. /// # let bpf = aya::Bpf::load(&[], None)?;
  274. /// use aya::maps::PerCpuValues;
  275. /// use aya::util::nr_cpus;
  276. /// use std::convert::TryFrom;
  277. ///
  278. /// let values = PerCpuValues::try_from(vec![42u32; nr_cpus()?])?;
  279. /// # Ok::<(), Error>(())
  280. /// ```
  281. #[derive(Debug)]
  282. pub struct PerCpuValues<T: Pod> {
  283. values: Box<[T]>,
  284. }
  285. impl<T: Pod> TryFrom<Vec<T>> for PerCpuValues<T> {
  286. type Error = io::Error;
  287. fn try_from(values: Vec<T>) -> Result<Self, Self::Error> {
  288. let nr_cpus = nr_cpus()?;
  289. if values.len() != nr_cpus {
  290. return Err(io::Error::new(
  291. io::ErrorKind::InvalidInput,
  292. format!("not enough values ({}), nr_cpus: {}", values.len(), nr_cpus),
  293. ));
  294. }
  295. Ok(PerCpuValues {
  296. values: values.into_boxed_slice(),
  297. })
  298. }
  299. }
  300. impl<T: Pod> PerCpuValues<T> {
  301. pub(crate) fn alloc_kernel_mem() -> Result<PerCpuKernelMem, io::Error> {
  302. let value_size = mem::size_of::<T>() + 7 & !7;
  303. Ok(PerCpuKernelMem {
  304. bytes: vec![0u8; nr_cpus()? * value_size],
  305. })
  306. }
  307. pub(crate) unsafe fn from_kernel_mem(mem: PerCpuKernelMem) -> PerCpuValues<T> {
  308. let mem_ptr = mem.bytes.as_ptr() as usize;
  309. let value_size = mem::size_of::<T>() + 7 & !7;
  310. let mut values = Vec::new();
  311. let mut offset = 0;
  312. while offset < mem.bytes.len() {
  313. values.push(ptr::read_unaligned((mem_ptr + offset) as *const _));
  314. offset += value_size;
  315. }
  316. PerCpuValues {
  317. values: values.into_boxed_slice(),
  318. }
  319. }
  320. pub(crate) fn into_kernel_mem(&self) -> Result<PerCpuKernelMem, io::Error> {
  321. let mut mem = PerCpuValues::<T>::alloc_kernel_mem()?;
  322. let mem_ptr = mem.as_mut_ptr() as usize;
  323. let value_size = mem::size_of::<T>() + 7 & !7;
  324. for i in 0..self.values.len() {
  325. unsafe { ptr::write_unaligned((mem_ptr + i * value_size) as *mut _, self.values[i]) };
  326. }
  327. Ok(mem)
  328. }
  329. }
  330. impl<T: Pod> Deref for PerCpuValues<T> {
  331. type Target = Box<[T]>;
  332. fn deref(&self) -> &Self::Target {
  333. &self.values
  334. }
  335. }
  336. #[cfg(test)]
  337. mod tests {
  338. use libc::EFAULT;
  339. use crate::{
  340. bpf_map_def,
  341. generated::{bpf_cmd, bpf_map_type::BPF_MAP_TYPE_HASH},
  342. sys::{override_syscall, Syscall},
  343. };
  344. use super::*;
  345. fn new_obj_map(name: &str) -> obj::Map {
  346. obj::Map {
  347. name: name.to_string(),
  348. def: bpf_map_def {
  349. map_type: BPF_MAP_TYPE_HASH as u32,
  350. key_size: 4,
  351. value_size: 4,
  352. max_entries: 1024,
  353. map_flags: 0,
  354. },
  355. section_index: 0,
  356. data: Vec::new(),
  357. }
  358. }
  359. fn new_map(name: &str) -> Map {
  360. Map {
  361. obj: new_obj_map(name),
  362. fd: None,
  363. }
  364. }
  365. #[test]
  366. fn test_create() {
  367. override_syscall(|call| match call {
  368. Syscall::Bpf {
  369. cmd: bpf_cmd::BPF_MAP_CREATE,
  370. ..
  371. } => Ok(42),
  372. _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
  373. });
  374. let mut map = new_map("foo");
  375. assert!(matches!(map.create(), Ok(42)));
  376. assert_eq!(map.fd, Some(42));
  377. assert!(matches!(map.create(), Err(MapError::AlreadyCreated { .. })));
  378. }
  379. #[test]
  380. fn test_create_failed() {
  381. override_syscall(|_| {
  382. return Err((-42, io::Error::from_raw_os_error(EFAULT)));
  383. });
  384. let mut map = new_map("foo");
  385. let ret = map.create();
  386. assert!(matches!(ret, Err(MapError::CreateError { .. })));
  387. if let Err(MapError::CreateError {
  388. name,
  389. code,
  390. io_error,
  391. }) = ret
  392. {
  393. assert_eq!(name, "foo");
  394. assert_eq!(code, -42);
  395. assert_eq!(io_error.raw_os_error(), Some(EFAULT));
  396. }
  397. assert_eq!(map.fd, None);
  398. }
  399. }