4
0

lib.rs 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. // Derived from uBPF <https://github.com/iovisor/ubpf>
  2. // Copyright 2015 Big Switch Networks, Inc
  3. // (uBPF: VM architecture, parts of the interpreter, originally in C)
  4. // Copyright 2016 Quentin Monnet <quentin.monnet@6wind.com>
  5. // (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
  6. //
  7. // Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
  8. // the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
  9. // copied, modified, or distributed except according to those terms.
  10. //! Virtual machine and JIT compiler for eBPF programs.
  11. #![doc(html_logo_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/rbpf.png",
  12. html_favicon_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/rbpf.ico")]
  13. // One day we'll uncomment this!
  14. // #![warn(missing_docs)]
  15. use std::u32;
  16. use std::collections::HashMap;
  17. extern crate libc;
  18. pub mod ebpf;
  19. pub mod helpers;
  20. mod verifier;
  21. mod jit;
  22. // A metadata buffer with two offset indications. It can be used in one kind of eBPF VM to simulate
  23. // the use of a metadata buffer each time the program is executed, without the user having to
  24. // actually handle it. The offsets are used to tell the VM where in the buffer the pointers to
  25. // packet data start and end should be stored each time the program is run on a new packet.
  26. struct MetaBuff {
  27. data_offset: usize,
  28. data_end_offset: usize,
  29. buffer: std::vec::Vec<u8>,
  30. }
  31. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  32. /// on a metadata buffer containing pointers to packet data.
  33. ///
  34. /// # Examples
  35. ///
  36. /// ```
  37. /// let prog = vec![
  38. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff at offset 8 into R1.
  39. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  40. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  41. /// ];
  42. /// let mut mem = vec![
  43. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  44. /// ];
  45. ///
  46. /// // Just for the example we create our metadata buffer from scratch, and we store the pointers
  47. /// // to packet data start and end in it.
  48. /// let mut mbuff = vec![0u8; 32];
  49. /// unsafe {
  50. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  51. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  52. /// *data = mem.as_ptr() as u64;
  53. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  54. /// }
  55. ///
  56. /// // Instantiate a VM.
  57. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog);
  58. ///
  59. /// // Provide both a reference to the packet data, and to the metadata buffer.
  60. /// let res = vm.prog_exec(&mut mem, &mut mbuff);
  61. /// assert_eq!(res, 0x2211);
  62. /// ```
  63. pub struct EbpfVmMbuff<'a> {
  64. prog: &'a std::vec::Vec<u8>,
  65. jit: (fn (*mut u8, usize, *mut u8, usize, usize, usize) -> u64),
  66. helpers: HashMap<u32, fn (u64, u64, u64, u64, u64) -> u64>,
  67. }
  68. // Runs on packet data, with a metadata buffer
  69. impl<'a> EbpfVmMbuff<'a> {
  70. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  71. /// When attempting to load the program, it passes through a simple verifier.
  72. ///
  73. /// # Panics
  74. ///
  75. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  76. ///
  77. /// # Examples
  78. ///
  79. /// ```
  80. /// let prog = vec![
  81. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  82. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  83. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  84. /// ];
  85. ///
  86. /// // Instantiate a VM.
  87. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog);
  88. /// ```
  89. pub fn new(prog: &'a std::vec::Vec<u8>) -> EbpfVmMbuff<'a> {
  90. verifier::check(prog);
  91. #[allow(unused_variables)]
  92. fn no_jit(foo: *mut u8, foo_len: usize, bar: *mut u8, bar_len: usize,
  93. nodata_offset: usize, nodata_end_offset: usize) -> u64 {
  94. panic!("Error: program has not been JIT-compiled");
  95. }
  96. EbpfVmMbuff {
  97. prog: prog,
  98. jit: no_jit,
  99. helpers: HashMap::new(),
  100. }
  101. }
  102. /// Load a new eBPF program into the virtual machine instance.
  103. ///
  104. /// # Panics
  105. ///
  106. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  107. ///
  108. /// # Examples
  109. ///
  110. /// ```
  111. /// let prog1 = vec![
  112. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  113. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  114. /// ];
  115. /// let prog2 = vec![
  116. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  117. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  118. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  119. /// ];
  120. ///
  121. /// // Instantiate a VM.
  122. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog1);
  123. /// vm.set_prog(&prog2);
  124. /// ```
  125. pub fn set_prog(&mut self, prog: &'a std::vec::Vec<u8>) {
  126. verifier::check(prog);
  127. self.prog = prog;
  128. }
  129. /// Register a built-in or user-defined helper function in order to use it later from within
  130. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  131. ///
  132. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  133. /// program. You should be able to change registered helpers after compiling, but not to add
  134. /// new ones (i.e. with new keys).
  135. ///
  136. /// # Examples
  137. ///
  138. /// ```
  139. /// use rbpf::helpers;
  140. ///
  141. /// // This program was compiled with clang, from a C program containing the following single
  142. /// // instruction: `return bpf_trace_printk("foo %c %c %c\n", 10, 1, 2, 3);`
  143. /// let prog = vec![
  144. /// 0x18, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load 0 as u64 into r1 (That would be
  145. /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // replaced by tc by the address of
  146. /// // the format string, in the .map
  147. /// // section of the ELF file).
  148. /// 0xb7, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, // mov r2, 10
  149. /// 0xb7, 0x03, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // mov r3, 1
  150. /// 0xb7, 0x04, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov r4, 2
  151. /// 0xb7, 0x05, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // mov r5, 3
  152. /// 0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call helper with key 6
  153. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  154. /// ];
  155. ///
  156. /// // Instantiate a VM.
  157. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog);
  158. ///
  159. /// // Register a helper.
  160. /// // On running the program this helper will print the content of registers r3, r4 and r5 to
  161. /// // standard output.
  162. /// vm.register_helper(6, helpers::bpf_trace_printf);
  163. /// ```
  164. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  165. self.helpers.insert(key, function);
  166. }
  167. /// Execute the program loaded, with the given packet data and metadata buffer.
  168. ///
  169. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  170. /// address of the beginning and of the end of the memory area used for packet data from the
  171. /// metadata buffer, at some appointed offsets. It is up to the user to ensure that these
  172. /// pointers are correctly stored in the buffer.
  173. ///
  174. /// # Panics
  175. ///
  176. /// This function is currently expected to panic if it encounters any error during the program
  177. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  178. /// in the future (we could raise errors instead).
  179. ///
  180. /// # Examples
  181. ///
  182. /// ```
  183. /// let prog = vec![
  184. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  185. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  186. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  187. /// ];
  188. /// let mut mem = vec![
  189. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  190. /// ];
  191. ///
  192. /// // Just for the example we create our metadata buffer from scratch, and we store the
  193. /// // pointers to packet data start and end in it.
  194. /// let mut mbuff = vec![0u8; 32];
  195. /// unsafe {
  196. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  197. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  198. /// *data = mem.as_ptr() as u64;
  199. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  200. /// }
  201. ///
  202. /// // Instantiate a VM.
  203. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog);
  204. ///
  205. /// // Provide both a reference to the packet data, and to the metadata buffer.
  206. /// let res = vm.prog_exec(&mut mem, &mut mbuff);
  207. /// assert_eq!(res, 0x2211);
  208. /// ```
  209. pub fn prog_exec(&self, mem: &mut std::vec::Vec<u8>, mbuff: &'a mut std::vec::Vec<u8>) -> u64 {
  210. const U32MAX: u64 = u32::MAX as u64;
  211. let stack = vec![0u8;ebpf::STACK_SIZE];
  212. // R1 points to beginning of memory area, R10 to stack
  213. let mut reg: [u64;11] = [
  214. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, stack.as_ptr() as u64 + stack.len() as u64
  215. ];
  216. if mbuff.len() > 0 {
  217. reg[1] = mbuff.as_ptr() as u64;
  218. }
  219. else if mem.len() > 0 {
  220. reg[1] = mem.as_ptr() as u64;
  221. }
  222. let check_mem_load = | addr: u64, len: usize, insn_ptr: usize | {
  223. EbpfVmMbuff::check_mem(addr, len, "load", insn_ptr, &mbuff, &mem, &stack);
  224. };
  225. let check_mem_store = | addr: u64, len: usize, insn_ptr: usize | {
  226. EbpfVmMbuff::check_mem(addr, len, "store", insn_ptr, &mbuff, &mem, &stack);
  227. };
  228. // Loop on instructions
  229. let mut insn_ptr:usize = 0;
  230. while insn_ptr * ebpf::INSN_SIZE < self.prog.len() {
  231. let insn = ebpf::get_insn(self.prog, insn_ptr);
  232. insn_ptr += 1;
  233. let _dst = insn.dst as usize;
  234. let _src = insn.src as usize;
  235. match insn.opc {
  236. // BPF_LD class
  237. ebpf::LD_ABS_B => unimplemented!(),
  238. ebpf::LD_ABS_H => unimplemented!(),
  239. ebpf::LD_ABS_W => unimplemented!(),
  240. ebpf::LD_ABS_DW => unimplemented!(),
  241. ebpf::LD_IND_B => unimplemented!(),
  242. ebpf::LD_IND_H => unimplemented!(),
  243. ebpf::LD_IND_W => unimplemented!(),
  244. ebpf::LD_IND_DW => unimplemented!(),
  245. // BPF_LDX class
  246. ebpf::LD_DW_IMM => {
  247. let next_insn = ebpf::get_insn(self.prog, insn_ptr);
  248. insn_ptr += 1;
  249. reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
  250. },
  251. ebpf::LD_B_REG => reg[_dst] = unsafe {
  252. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u8;
  253. check_mem_load(x as u64, 1, insn_ptr);
  254. *x as u64
  255. },
  256. ebpf::LD_H_REG => reg[_dst] = unsafe {
  257. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16;
  258. check_mem_load(x as u64, 2, insn_ptr);
  259. *x as u64
  260. },
  261. ebpf::LD_W_REG => reg[_dst] = unsafe {
  262. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32;
  263. check_mem_load(x as u64, 4, insn_ptr);
  264. *x as u64
  265. },
  266. ebpf::LD_DW_REG => reg[_dst] = unsafe {
  267. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64;
  268. check_mem_load(x as u64, 8, insn_ptr);
  269. *x as u64
  270. },
  271. // BPF_ST class
  272. ebpf::ST_B_IMM => unsafe {
  273. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
  274. check_mem_store(x as u64, 1, insn_ptr);
  275. *x = insn.imm as u8;
  276. },
  277. ebpf::ST_H_IMM => unsafe {
  278. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
  279. check_mem_store(x as u64, 2, insn_ptr);
  280. *x = insn.imm as u16;
  281. },
  282. ebpf::ST_W_IMM => unsafe {
  283. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
  284. check_mem_store(x as u64, 4, insn_ptr);
  285. *x = insn.imm as u32;
  286. },
  287. ebpf::ST_DW_IMM => unsafe {
  288. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
  289. check_mem_store(x as u64, 8, insn_ptr);
  290. *x = insn.imm as u64;
  291. },
  292. // BPF_STX class
  293. ebpf::ST_B_REG => unsafe {
  294. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
  295. check_mem_store(x as u64, 1, insn_ptr);
  296. *x = reg[_src] as u8;
  297. },
  298. ebpf::ST_H_REG => unsafe {
  299. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
  300. check_mem_store(x as u64, 2, insn_ptr);
  301. *x = reg[_src] as u16;
  302. },
  303. ebpf::ST_W_REG => unsafe {
  304. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
  305. check_mem_store(x as u64, 4, insn_ptr);
  306. *x = reg[_src] as u32;
  307. },
  308. ebpf::ST_DW_REG => unsafe {
  309. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
  310. check_mem_store(x as u64, 8, insn_ptr);
  311. *x = reg[_src] as u64;
  312. },
  313. ebpf::ST_W_XADD => unimplemented!(),
  314. ebpf::ST_DW_XADD => unimplemented!(),
  315. // BPF_ALU class
  316. // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
  317. // before we do the operation?
  318. // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
  319. ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX,
  320. ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
  321. ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
  322. ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
  323. ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
  324. ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
  325. ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
  326. ebpf::DIV32_REG => {
  327. if reg[_src] == 0 {
  328. panic!("Error: division by 0");
  329. }
  330. reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64;
  331. },
  332. ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
  333. ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
  334. ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
  335. ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
  336. ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
  337. ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
  338. ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
  339. ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
  340. ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
  341. ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
  342. ebpf::MOD32_REG => {
  343. if reg[_src] == 0 {
  344. panic!("Error: division by 0");
  345. }
  346. reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64;
  347. },
  348. ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
  349. ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
  350. ebpf::MOV32_IMM => reg[_dst] = insn.imm as u64,
  351. ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
  352. ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
  353. ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
  354. ebpf::LE => {
  355. reg[_dst] = match insn.imm {
  356. 16 => (reg[_dst] as u16).to_le() as u64,
  357. 32 => (reg[_dst] as u32).to_le() as u64,
  358. 64 => reg[_dst].to_le(),
  359. _ => unreachable!(),
  360. };
  361. },
  362. ebpf::BE => {
  363. reg[_dst] = match insn.imm {
  364. 16 => (reg[_dst] as u16).to_be() as u64,
  365. 32 => (reg[_dst] as u32).to_be() as u64,
  366. 64 => reg[_dst].to_be(),
  367. _ => unreachable!(),
  368. };
  369. },
  370. // BPF_ALU64 class
  371. ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
  372. ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
  373. ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
  374. ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
  375. ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
  376. ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
  377. ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
  378. ebpf::DIV64_REG => {
  379. if reg[_src] == 0 {
  380. panic!("Error: division by 0");
  381. }
  382. reg[_dst] /= reg[_src];
  383. },
  384. ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
  385. ebpf::OR64_REG => reg[_dst] |= reg[_src],
  386. ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
  387. ebpf::AND64_REG => reg[_dst] &= reg[_src],
  388. ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64,
  389. ebpf::LSH64_REG => reg[_dst] <<= reg[_src],
  390. ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64,
  391. ebpf::RSH64_REG => reg[_dst] >>= reg[_src],
  392. ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
  393. ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
  394. ebpf::MOD64_REG => {
  395. if reg[_src] == 0 {
  396. panic!("Error: division by 0");
  397. }
  398. reg[_dst] %= reg[_src];
  399. },
  400. ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
  401. ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
  402. ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
  403. ebpf::MOV64_REG => reg[_dst] = reg[_src],
  404. ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
  405. ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
  406. // BPF_JMP class
  407. // TODO: check this actually works as expected for signed / unsigned ops
  408. ebpf::JA => insn_ptr = (insn_ptr as i16 + insn.off) as usize,
  409. ebpf::JEQ_IMM => if reg[_dst] == insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  410. ebpf::JEQ_REG => if reg[_dst] == reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  411. ebpf::JGT_IMM => if reg[_dst] > insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  412. ebpf::JGT_REG => if reg[_dst] > reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  413. ebpf::JGE_IMM => if reg[_dst] >= insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  414. ebpf::JGE_REG => if reg[_dst] >= reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  415. ebpf::JSET_IMM => if reg[_dst] & insn.imm as u64 != 0 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  416. ebpf::JSET_REG => if reg[_dst] & reg[_src] != 0 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  417. ebpf::JNE_IMM => if reg[_dst] != insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  418. ebpf::JNE_REG => if reg[_dst] != reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  419. ebpf::JSGT_IMM => if reg[_dst] as i64 > insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  420. ebpf::JSGT_REG => if reg[_dst] as i64 > reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  421. ebpf::JSGE_IMM => if reg[_dst] as i64 >= insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  422. ebpf::JSGE_REG => if reg[_dst] as i64 >= reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  423. // Do not delegate the check to the verifier, since registered functions can be
  424. // changed after the program has been verified.
  425. ebpf::CALL => if let Some(function) = self.helpers.get(&(insn.imm as u32)) {
  426. reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
  427. } else {
  428. panic!("Error: unknown helper function (id: {:#x})", insn.imm as u32);
  429. },
  430. ebpf::TAIL_CALL => unimplemented!(),
  431. ebpf::EXIT => return reg[0],
  432. _ => unreachable!()
  433. }
  434. }
  435. return 0;
  436. }
  437. fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize,
  438. mbuff: &std::vec::Vec<u8>, mem: &std::vec::Vec<u8>, stack: &std::vec::Vec<u8>) {
  439. // WARNING: untested
  440. if mbuff.as_ptr() as u64 <= addr && addr + len as u64 <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
  441. return
  442. }
  443. if mem.as_ptr() as u64 <= addr && addr + len as u64 <= mem.as_ptr() as u64 + mem.len() as u64 {
  444. return
  445. }
  446. if stack.as_ptr() as u64 <= addr && addr + len as u64 <= stack.as_ptr() as u64 + stack.len() as u64 {
  447. return
  448. }
  449. panic!(
  450. "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
  451. access_type, insn_ptr, addr, len,
  452. mbuff.as_ptr() as u64, mbuff.len(),
  453. mem.as_ptr() as u64, mem.len(),
  454. stack.as_ptr() as u64, stack.len()
  455. );
  456. }
  457. /// JIT-compile the loaded program. No argument required for this.
  458. ///
  459. /// If using helper functions, be sure to register them into the VM before calling this
  460. /// function.
  461. ///
  462. /// # Panics
  463. ///
  464. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  465. /// unknown eBPF operation code.
  466. ///
  467. /// # Examples
  468. ///
  469. /// ```
  470. /// let prog = vec![
  471. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  472. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  473. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  474. /// ];
  475. ///
  476. /// // Instantiate a VM.
  477. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog);
  478. ///
  479. /// vm.jit_compile();
  480. /// ```
  481. pub fn jit_compile(&mut self) {
  482. self.jit = jit::compile(&self.prog, &self.helpers, true, false);
  483. }
  484. /// Execute the previously JIT-compiled program, with the given packet data and metadata
  485. /// buffer, in a manner very similar to `prog_exec()`.
  486. ///
  487. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  488. /// address of the beginning and of the end of the memory area used for packet data from the
  489. /// metadata buffer, at some appointed offsets. It is up to the user to ensure that these
  490. /// pointers are correctly stored in the buffer.
  491. ///
  492. /// # Panics
  493. ///
  494. /// This function panics if an error occurs during the execution of the program.
  495. ///
  496. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  497. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  498. /// very bad (program may segfault). It may be wise to check that the program works with the
  499. /// interpreter before running the JIT-compiled version of it.
  500. ///
  501. /// # Examples
  502. ///
  503. /// ```
  504. /// let prog = vec![
  505. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into r1.
  506. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  507. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  508. /// ];
  509. /// let mut mem = vec![
  510. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  511. /// ];
  512. ///
  513. /// // Just for the example we create our metadata buffer from scratch, and we store the
  514. /// // pointers to packet data start and end in it.
  515. /// let mut mbuff = vec![0u8; 32];
  516. /// unsafe {
  517. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  518. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  519. /// *data = mem.as_ptr() as u64;
  520. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  521. /// }
  522. ///
  523. /// // Instantiate a VM.
  524. /// let mut vm = rbpf::EbpfVmMbuff::new(&prog);
  525. ///
  526. /// vm.jit_compile();
  527. ///
  528. /// // Provide both a reference to the packet data, and to the metadata buffer.
  529. /// let res = vm.prog_exec_jit(&mut mem, &mut mbuff);
  530. /// assert_eq!(res, 0x2211);
  531. /// ```
  532. pub fn prog_exec_jit(&self, mem: &mut std::vec::Vec<u8>, mbuff: &'a mut std::vec::Vec<u8>) -> u64 {
  533. // If packet data is empty, do not send the address of an empty vector; send a null
  534. // pointer (zero value) as first argument instead, as this is uBPF's behavior (empty
  535. // packet should not happen in the kernel; anyway the verifier would prevent the use of
  536. // uninitialized registers). See `mul_loop` test.
  537. let mem_ptr = match mem.len() {
  538. 0 => 0 as *mut u8,
  539. _ => mem.as_ptr() as *mut u8
  540. };
  541. // The last two arguments are not used in this function. They would be used if there was a
  542. // need to indicate to the JIT at which offset in the mbuff mem_ptr and mem_ptr + mem.len()
  543. // should be stored; this is what happens with struct EbpfVmFixedMbuff.
  544. (self.jit)(mbuff.as_ptr() as *mut u8, mbuff.len(), mem_ptr, mem.len(), 0, 0)
  545. }
  546. }
  547. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  548. /// on a metadata buffer containing pointers to packet data, but it internally handles the buffer
  549. /// so as to save the effort to manually handle the metadata buffer for the user.
  550. ///
  551. /// This struct implements a static internal buffer that is passed to the program. The user has to
  552. /// indicate the offset values at which the eBPF program expects to find the start and the end of
  553. /// packet data in the buffer. On calling the `prog_exec()` or `prog_exec_jit()` functions, the
  554. /// struct automatically updates the addresses in this static buffer, at the appointed offsets, for
  555. /// the start and the end of the packet data the program is called upon.
  556. ///
  557. /// # Examples
  558. ///
  559. /// This was compiled with clang from the following program, in C:
  560. ///
  561. /// ```c
  562. /// #include <linux/bpf.h>
  563. /// #include "path/to/linux/samples/bpf/bpf_helpers.h"
  564. ///
  565. /// SEC(".classifier")
  566. /// int classifier(struct __sk_buff *skb)
  567. /// {
  568. /// void *data = (void *)(long)skb->data;
  569. /// void *data_end = (void *)(long)skb->data_end;
  570. ///
  571. /// // Check program is long enough.
  572. /// if (data + 5 > data_end)
  573. /// return 0;
  574. ///
  575. /// return *((char *)data + 5);
  576. /// }
  577. /// ```
  578. ///
  579. /// Some small modifications have been brought to have it work, see comments.
  580. ///
  581. /// ```
  582. /// let prog = vec![
  583. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  584. /// // Here opcode 0x61 had to be replace by 0x79 so as to load a 8-bytes long address.
  585. /// // Also, offset 0x4c had to be replace with e.g. 0x40 so as to prevent the two pointers
  586. /// // from overlapping in the buffer.
  587. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load pointer to mem from r1[0x40] to r2
  588. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  589. /// // Here opcode 0x61 had to be replace by 0x79 so as to load a 8-bytes long address.
  590. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load ptr to mem_end from r1[0x50] to r1
  591. /// 0x2d, 0x12, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  592. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  593. /// 0x67, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, // r0 >>= 56
  594. /// 0xc7, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, // r0 <<= 56 (arsh) extend byte sign to u64
  595. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  596. /// ];
  597. /// let mut mem1 = vec![
  598. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  599. /// ];
  600. /// let mut mem2 = vec![
  601. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  602. /// ];
  603. ///
  604. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  605. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog, 0x40, 0x50);
  606. ///
  607. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  608. /// let res = vm.prog_exec(&mut mem1);
  609. /// assert_eq!(res, 0xffffffffffffffdd);
  610. ///
  611. /// let res = vm.prog_exec(&mut mem2);
  612. /// assert_eq!(res, 0x27);
  613. /// ```
  614. pub struct EbpfVmFixedMbuff<'a> {
  615. parent: EbpfVmMbuff<'a>,
  616. mbuff: MetaBuff,
  617. }
  618. impl<'a> EbpfVmFixedMbuff<'a> {
  619. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  620. /// When attempting to load the program, it passes through a simple verifier.
  621. ///
  622. /// # Panics
  623. ///
  624. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  625. ///
  626. /// # Examples
  627. ///
  628. /// ```
  629. /// let prog = vec![
  630. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  631. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  632. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  633. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  634. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  635. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  636. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  637. /// ];
  638. ///
  639. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  640. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog, 0x40, 0x50);
  641. /// ```
  642. pub fn new(prog: &'a std::vec::Vec<u8>, data_offset: usize, data_end_offset: usize) -> EbpfVmFixedMbuff<'a> {
  643. let parent = EbpfVmMbuff::new(prog);
  644. let get_buff_len = | x: usize, y: usize | if x >= y { x + 8 } else { y + 8 };
  645. let buffer = vec![0u8; get_buff_len(data_offset, data_end_offset)];
  646. let mbuff = MetaBuff {
  647. data_offset: data_offset,
  648. data_end_offset: data_end_offset,
  649. buffer: buffer,
  650. };
  651. EbpfVmFixedMbuff {
  652. parent: parent,
  653. mbuff: mbuff,
  654. }
  655. }
  656. /// Load a new eBPF program into the virtual machine instance.
  657. ///
  658. /// # Panics
  659. ///
  660. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  661. ///
  662. /// # Examples
  663. ///
  664. /// ```
  665. /// let prog1 = vec![
  666. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  667. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  668. /// ];
  669. /// let prog2 = vec![
  670. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  671. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  672. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  673. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  674. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  675. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  676. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  677. /// ];
  678. ///
  679. /// let mut mem = vec![
  680. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27,
  681. /// ];
  682. ///
  683. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog1, 0, 0);
  684. /// vm.set_prog(&prog2, 0x40, 0x50);
  685. ///
  686. /// let res = vm.prog_exec(&mut mem);
  687. /// assert_eq!(res, 0x27);
  688. /// ```
  689. pub fn set_prog(&mut self, prog: &'a std::vec::Vec<u8>, data_offset: usize, data_end_offset: usize) {
  690. let get_buff_len = | x: usize, y: usize | if x >= y { x + 8 } else { y + 8 };
  691. let buffer = vec![0u8; get_buff_len(data_offset, data_end_offset)];
  692. self.mbuff.buffer = buffer;
  693. self.mbuff.data_offset = data_offset;
  694. self.mbuff.data_end_offset = data_end_offset;
  695. self.parent.set_prog(prog)
  696. }
  697. /// Register a built-in or user-defined helper function in order to use it later from within
  698. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  699. ///
  700. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  701. /// program. You should be able to change registered helpers after compiling, but not to add
  702. /// new ones (i.e. with new keys).
  703. ///
  704. /// # Examples
  705. ///
  706. /// ```
  707. /// use rbpf::helpers;
  708. ///
  709. /// // This program was compiled with clang, from a C program containing the following single
  710. /// // instruction: `return bpf_trace_printk("foo %c %c %c\n", 10, 1, 2, 3);`
  711. /// let prog = vec![
  712. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  713. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  714. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  715. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  716. /// 0x2d, 0x12, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 6 instructions
  717. /// 0x71, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r1
  718. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  719. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  720. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  721. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  722. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  723. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  724. /// ];
  725. ///
  726. /// let mut mem = vec![
  727. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x09,
  728. /// ];
  729. ///
  730. /// // Instantiate a VM.
  731. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog, 0x40, 0x50);
  732. ///
  733. /// // Register a helper.
  734. /// // On running the program this helper will print the content of registers r3, r4 and r5 to
  735. /// // standard output.
  736. /// vm.register_helper(1, helpers::sqrti);
  737. ///
  738. /// let res = vm.prog_exec(&mut mem);
  739. /// assert_eq!(res, 3);
  740. /// ```
  741. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  742. self.parent.register_helper(key, function);
  743. }
  744. /// Execute the program loaded, with the given packet data.
  745. ///
  746. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  747. /// address of the beginning and of the end of the memory area used for packet data from some
  748. /// metadata buffer, which in the case of this VM is handled internally. The offsets at which
  749. /// the addresses should be placed should have be set at the creation of the VM.
  750. ///
  751. /// # Panics
  752. ///
  753. /// This function is currently expected to panic if it encounters any error during the program
  754. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  755. /// in the future (we could raise errors instead).
  756. ///
  757. /// # Examples
  758. ///
  759. /// ```
  760. /// let prog = vec![
  761. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  762. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  763. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  764. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  765. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  766. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  767. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  768. /// ];
  769. /// let mut mem = vec![
  770. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  771. /// ];
  772. ///
  773. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  774. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog, 0x40, 0x50);
  775. ///
  776. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  777. /// let res = vm.prog_exec(&mut mem);
  778. /// assert_eq!(res, 0xdd);
  779. /// ```
  780. pub fn prog_exec(&mut self, mem: &'a mut std::vec::Vec<u8>) -> u64 {
  781. let l = self.mbuff.buffer.len();
  782. // Can this happen? Yes, since MetaBuff is public.
  783. if self.mbuff.data_offset + 8 > l || self.mbuff.data_end_offset + 8 > l {
  784. panic!("Error: buffer too small ({:?}), cannot use data_offset {:?} and data_end_offset {:?}",
  785. l, self.mbuff.data_offset, self.mbuff.data_end_offset);
  786. }
  787. unsafe {
  788. let mut data = self.mbuff.buffer.as_ptr().offset(self.mbuff.data_offset as isize) as *mut u64;
  789. let mut data_end = self.mbuff.buffer.as_ptr().offset(self.mbuff.data_end_offset as isize) as *mut u64;
  790. *data = mem.as_ptr() as u64;
  791. *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  792. }
  793. self.parent.prog_exec(mem, &mut self.mbuff.buffer)
  794. }
  795. /// JIT-compile the loaded program. No argument required for this.
  796. ///
  797. /// If using helper functions, be sure to register them into the VM before calling this
  798. /// function.
  799. ///
  800. /// # Panics
  801. ///
  802. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  803. /// unknown eBPF operation code.
  804. ///
  805. /// # Examples
  806. ///
  807. /// ```
  808. /// let prog = vec![
  809. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  810. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  811. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  812. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  813. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  814. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  815. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  816. /// ];
  817. ///
  818. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  819. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog, 0x40, 0x50);
  820. ///
  821. /// vm.jit_compile();
  822. /// ```
  823. pub fn jit_compile(&mut self) {
  824. self.parent.jit = jit::compile(&self.parent.prog, &self.parent.helpers, true, true);
  825. }
  826. /// Execute the previously JIT-compiled program, with the given packet data, in a manner very
  827. /// similar to `prog_exec()`.
  828. ///
  829. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  830. /// address of the beginning and of the end of the memory area used for packet data from some
  831. /// metadata buffer, which in the case of this VM is handled internally. The offsets at which
  832. /// the addresses should be placed should have be set at the creation of the VM.
  833. ///
  834. /// # Panics
  835. ///
  836. /// This function panics if an error occurs during the execution of the program.
  837. ///
  838. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  839. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  840. /// very bad (program may segfault). It may be wise to check that the program works with the
  841. /// interpreter before running the JIT-compiled version of it.
  842. ///
  843. /// # Examples
  844. ///
  845. /// ```
  846. /// let prog = vec![
  847. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  848. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  849. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  850. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  851. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  852. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  853. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  854. /// ];
  855. /// let mut mem = vec![
  856. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  857. /// ];
  858. ///
  859. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  860. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(&prog, 0x40, 0x50);
  861. ///
  862. /// vm.jit_compile();
  863. ///
  864. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  865. /// let res = vm.prog_exec_jit(&mut mem);
  866. /// assert_eq!(res, 0xdd);
  867. /// ```
  868. // This struct redefines the `prog_exec_jit()` function, in order to pass the offsets
  869. // associated with the fixed mbuff.
  870. pub fn prog_exec_jit(&mut self, mem: &'a mut std::vec::Vec<u8>) -> u64 {
  871. // If packet data is empty, do not send the address of an empty vector; send a null
  872. // pointer (zero value) as first argument instead, as this is uBPF's behavior (empty
  873. // packet should not happen in the kernel; anyway the verifier would prevent the use of
  874. // uninitialized registers). See `mul_loop` test.
  875. let mem_ptr = match mem.len() {
  876. 0 => 0 as *mut u8,
  877. _ => mem.as_ptr() as *mut u8
  878. };
  879. (self.parent.jit)(self.mbuff.buffer.as_ptr() as *mut u8, self.mbuff.buffer.len(),
  880. mem_ptr, mem.len(), self.mbuff.data_offset, self.mbuff.data_end_offset)
  881. }
  882. }
  883. // Runs on a packet, no metadata buffer
  884. pub struct EbpfVmRaw<'a> {
  885. parent: EbpfVmMbuff<'a>,
  886. }
  887. impl<'a> EbpfVmRaw<'a> {
  888. pub fn new(prog: &'a std::vec::Vec<u8>) -> EbpfVmRaw<'a> {
  889. let parent = EbpfVmMbuff::new(prog);
  890. EbpfVmRaw {
  891. parent: parent,
  892. }
  893. }
  894. pub fn set_prog(&mut self, prog: &'a std::vec::Vec<u8>) {
  895. self.parent.set_prog(prog)
  896. }
  897. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  898. self.parent.register_helper(key, function);
  899. }
  900. pub fn prog_exec(&self, mem: &'a mut std::vec::Vec<u8>) -> u64 {
  901. let mut mbuff = vec![];
  902. self.parent.prog_exec(mem, &mut mbuff)
  903. }
  904. pub fn jit_compile(&mut self) {
  905. self.parent.jit = jit::compile(&self.parent.prog, &self.parent.helpers, false, false);
  906. }
  907. pub fn prog_exec_jit(&self, mem: &'a mut std::vec::Vec<u8>) -> u64 {
  908. let mut mbuff = vec![];
  909. self.parent.prog_exec_jit(mem, &mut mbuff)
  910. }
  911. }
  912. // Runs without data -- no packet, no metadata buffer
  913. pub struct EbpfVmNoData<'a> {
  914. parent: EbpfVmRaw<'a>,
  915. }
  916. impl<'a> EbpfVmNoData<'a> {
  917. pub fn new(prog: &'a std::vec::Vec<u8>) -> EbpfVmNoData<'a> {
  918. let parent = EbpfVmRaw::new(prog);
  919. EbpfVmNoData {
  920. parent: parent,
  921. }
  922. }
  923. pub fn set_prog(&mut self, prog: &'a std::vec::Vec<u8>) {
  924. self.parent.set_prog(prog)
  925. }
  926. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  927. self.parent.register_helper(key, function);
  928. }
  929. pub fn jit_compile(&mut self) {
  930. self.parent.jit_compile();
  931. }
  932. pub fn prog_exec(&self) -> u64 {
  933. self.parent.prog_exec(&mut vec![])
  934. }
  935. pub fn prog_exec_jit(&self) -> u64 {
  936. self.parent.prog_exec_jit(&mut vec![])
  937. }
  938. }