lib.rs 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463
  1. // Derived from uBPF <https://github.com/iovisor/ubpf>
  2. // Copyright 2015 Big Switch Networks, Inc
  3. // (uBPF: VM architecture, parts of the interpreter, originally in C)
  4. // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
  5. // (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
  6. //
  7. // Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
  8. // the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
  9. // copied, modified, or distributed except according to those terms.
  10. //! Virtual machine and JIT compiler for eBPF programs.
  11. #![doc(html_logo_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.png",
  12. html_favicon_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.ico")]
  13. #![warn(missing_docs)]
  14. // There are unused mut warnings due to unsafe code.
  15. #![allow(unused_mut)]
  16. #![cfg_attr(feature = "cargo-clippy", allow(cast_lossless, doc_markdown, match_same_arms, unreadable_literal))]
  17. extern crate byteorder;
  18. extern crate combine;
  19. extern crate time;
  20. use std::u32;
  21. use std::collections::HashMap;
  22. use byteorder::{ByteOrder, LittleEndian};
  23. pub mod assembler;
  24. pub mod disassembler;
  25. pub mod ebpf;
  26. pub mod helpers;
  27. pub mod insn_builder;
  28. mod asm_parser;
  29. #[cfg(not(windows))]
  30. mod jit;
  31. mod verifier;
  32. // A metadata buffer with two offset indications. It can be used in one kind of eBPF VM to simulate
  33. // the use of a metadata buffer each time the program is executed, without the user having to
  34. // actually handle it. The offsets are used to tell the VM where in the buffer the pointers to
  35. // packet data start and end should be stored each time the program is run on a new packet.
  36. struct MetaBuff {
  37. data_offset: usize,
  38. data_end_offset: usize,
  39. buffer: Vec<u8>,
  40. }
  41. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  42. /// on a metadata buffer containing pointers to packet data.
  43. ///
  44. /// # Examples
  45. ///
  46. /// ```
  47. /// let prog = &[
  48. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff at offset 8 into R1.
  49. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  50. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  51. /// ];
  52. /// let mem = &mut [
  53. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  54. /// ];
  55. ///
  56. /// // Just for the example we create our metadata buffer from scratch, and we store the pointers
  57. /// // to packet data start and end in it.
  58. /// let mut mbuff = [0u8; 32];
  59. /// unsafe {
  60. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  61. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  62. /// *data = mem.as_ptr() as u64;
  63. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  64. /// }
  65. ///
  66. /// // Instantiate a VM.
  67. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  68. ///
  69. /// // Provide both a reference to the packet data, and to the metadata buffer.
  70. /// let res = vm.prog_exec(mem, &mut mbuff);
  71. /// assert_eq!(res, 0x2211);
  72. /// ```
  73. pub struct EbpfVmMbuff<'a> {
  74. prog: &'a [u8],
  75. jit: (unsafe fn (*mut u8, usize, *mut u8, usize, usize, usize) -> u64),
  76. helpers: HashMap<u32, ebpf::Helper>,
  77. }
  78. impl<'a> EbpfVmMbuff<'a> {
  79. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  80. /// When attempting to load the program, it passes through a simple verifier.
  81. ///
  82. /// # Panics
  83. ///
  84. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  85. ///
  86. /// # Examples
  87. ///
  88. /// ```
  89. /// let prog = &[
  90. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  91. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  92. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  93. /// ];
  94. ///
  95. /// // Instantiate a VM.
  96. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  97. /// ```
  98. pub fn new(prog: &'a [u8]) -> EbpfVmMbuff<'a> {
  99. verifier::check(prog);
  100. fn no_jit(_mbuff: *mut u8, _len: usize, _mem: *mut u8, _mem_len: usize,
  101. _nodata_offset: usize, _nodata_end_offset: usize) -> u64 {
  102. panic!("Error: program has not been JIT-compiled");
  103. }
  104. EbpfVmMbuff {
  105. prog: prog,
  106. jit: no_jit,
  107. helpers: HashMap::new(),
  108. }
  109. }
  110. /// Load a new eBPF program into the virtual machine instance.
  111. ///
  112. /// # Panics
  113. ///
  114. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  115. ///
  116. /// # Examples
  117. ///
  118. /// ```
  119. /// let prog1 = &[
  120. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  121. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  122. /// ];
  123. /// let prog2 = &[
  124. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  125. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  126. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  127. /// ];
  128. ///
  129. /// // Instantiate a VM.
  130. /// let mut vm = rbpf::EbpfVmMbuff::new(prog1);
  131. /// vm.set_prog(prog2);
  132. /// ```
  133. pub fn set_prog(&mut self, prog: &'a [u8]) {
  134. verifier::check(prog);
  135. self.prog = prog;
  136. }
  137. /// Register a built-in or user-defined helper function in order to use it later from within
  138. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  139. ///
  140. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  141. /// program. You should be able to change registered helpers after compiling, but not to add
  142. /// new ones (i.e. with new keys).
  143. ///
  144. /// # Examples
  145. ///
  146. /// ```
  147. /// use rbpf::helpers;
  148. ///
  149. /// // This program was compiled with clang, from a C program containing the following single
  150. /// // instruction: `return bpf_trace_printk("foo %c %c %c\n", 10, 1, 2, 3);`
  151. /// let prog = &[
  152. /// 0x18, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load 0 as u64 into r1 (That would be
  153. /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // replaced by tc by the address of
  154. /// // the format string, in the .map
  155. /// // section of the ELF file).
  156. /// 0xb7, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, // mov r2, 10
  157. /// 0xb7, 0x03, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // mov r3, 1
  158. /// 0xb7, 0x04, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov r4, 2
  159. /// 0xb7, 0x05, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // mov r5, 3
  160. /// 0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call helper with key 6
  161. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  162. /// ];
  163. ///
  164. /// // Instantiate a VM.
  165. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  166. ///
  167. /// // Register a helper.
  168. /// // On running the program this helper will print the content of registers r3, r4 and r5 to
  169. /// // standard output.
  170. /// vm.register_helper(6, helpers::bpf_trace_printf);
  171. /// ```
  172. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  173. self.helpers.insert(key, function);
  174. }
  175. /// Execute the program loaded, with the given packet data and metadata buffer.
  176. ///
  177. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  178. /// address of the beginning and of the end of the memory area used for packet data from the
  179. /// metadata buffer, at some appointed offsets. It is up to the user to ensure that these
  180. /// pointers are correctly stored in the buffer.
  181. ///
  182. /// # Panics
  183. ///
  184. /// This function is currently expected to panic if it encounters any error during the program
  185. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  186. /// in the future (we could raise errors instead).
  187. ///
  188. /// # Examples
  189. ///
  190. /// ```
  191. /// let prog = &[
  192. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  193. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  194. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  195. /// ];
  196. /// let mem = &mut [
  197. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  198. /// ];
  199. ///
  200. /// // Just for the example we create our metadata buffer from scratch, and we store the
  201. /// // pointers to packet data start and end in it.
  202. /// let mut mbuff = [0u8; 32];
  203. /// unsafe {
  204. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  205. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  206. /// *data = mem.as_ptr() as u64;
  207. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  208. /// }
  209. ///
  210. /// // Instantiate a VM.
  211. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  212. ///
  213. /// // Provide both a reference to the packet data, and to the metadata buffer.
  214. /// let res = vm.prog_exec(mem, &mut mbuff);
  215. /// assert_eq!(res, 0x2211);
  216. /// ```
  217. #[allow(unknown_lints)]
  218. #[allow(cyclomatic_complexity)]
  219. pub fn prog_exec(&self, mem: &[u8], mbuff: &[u8]) -> u64 {
  220. const U32MAX: u64 = u32::MAX as u64;
  221. let stack = vec![0u8;ebpf::STACK_SIZE];
  222. // R1 points to beginning of memory area, R10 to stack
  223. let mut reg: [u64;11] = [
  224. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, stack.as_ptr() as u64 + stack.len() as u64
  225. ];
  226. if !mbuff.is_empty() {
  227. reg[1] = mbuff.as_ptr() as u64;
  228. }
  229. else if !mem.is_empty() {
  230. reg[1] = mem.as_ptr() as u64;
  231. }
  232. let check_mem_load = | addr: u64, len: usize, insn_ptr: usize | {
  233. EbpfVmMbuff::check_mem(addr, len, "load", insn_ptr, mbuff, mem, &stack);
  234. };
  235. let check_mem_store = | addr: u64, len: usize, insn_ptr: usize | {
  236. EbpfVmMbuff::check_mem(addr, len, "store", insn_ptr, mbuff, mem, &stack);
  237. };
  238. // Loop on instructions
  239. let mut insn_ptr:usize = 0;
  240. while insn_ptr * ebpf::INSN_SIZE < self.prog.len() {
  241. let insn = ebpf::get_insn(self.prog, insn_ptr);
  242. insn_ptr += 1;
  243. let _dst = insn.dst as usize;
  244. let _src = insn.src as usize;
  245. match insn.opc {
  246. // BPF_LD class
  247. // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
  248. // Since this pointer is constant, and since we already know it (mem), do not
  249. // bother re-fetching it, just use mem already.
  250. ebpf::LD_ABS_B => reg[0] = unsafe {
  251. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
  252. check_mem_load(x as u64, 8, insn_ptr);
  253. *x as u64
  254. },
  255. ebpf::LD_ABS_H => reg[0] = unsafe {
  256. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
  257. check_mem_load(x as u64, 8, insn_ptr);
  258. *x as u64
  259. },
  260. ebpf::LD_ABS_W => reg[0] = unsafe {
  261. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
  262. check_mem_load(x as u64, 8, insn_ptr);
  263. *x as u64
  264. },
  265. ebpf::LD_ABS_DW => reg[0] = unsafe {
  266. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
  267. check_mem_load(x as u64, 8, insn_ptr);
  268. *x as u64
  269. },
  270. ebpf::LD_IND_B => reg[0] = unsafe {
  271. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
  272. check_mem_load(x as u64, 8, insn_ptr);
  273. *x as u64
  274. },
  275. ebpf::LD_IND_H => reg[0] = unsafe {
  276. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
  277. check_mem_load(x as u64, 8, insn_ptr);
  278. *x as u64
  279. },
  280. ebpf::LD_IND_W => reg[0] = unsafe {
  281. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
  282. check_mem_load(x as u64, 8, insn_ptr);
  283. *x as u64
  284. },
  285. ebpf::LD_IND_DW => reg[0] = unsafe {
  286. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
  287. check_mem_load(x as u64, 8, insn_ptr);
  288. *x as u64
  289. },
  290. ebpf::LD_DW_IMM => {
  291. let next_insn = ebpf::get_insn(self.prog, insn_ptr);
  292. insn_ptr += 1;
  293. reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
  294. },
  295. // BPF_LDX class
  296. ebpf::LD_B_REG => reg[_dst] = unsafe {
  297. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u8;
  298. check_mem_load(x as u64, 1, insn_ptr);
  299. *x as u64
  300. },
  301. ebpf::LD_H_REG => reg[_dst] = unsafe {
  302. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16;
  303. check_mem_load(x as u64, 2, insn_ptr);
  304. *x as u64
  305. },
  306. ebpf::LD_W_REG => reg[_dst] = unsafe {
  307. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32;
  308. check_mem_load(x as u64, 4, insn_ptr);
  309. *x as u64
  310. },
  311. ebpf::LD_DW_REG => reg[_dst] = unsafe {
  312. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64;
  313. check_mem_load(x as u64, 8, insn_ptr);
  314. *x as u64
  315. },
  316. // BPF_ST class
  317. ebpf::ST_B_IMM => unsafe {
  318. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
  319. check_mem_store(x as u64, 1, insn_ptr);
  320. *x = insn.imm as u8;
  321. },
  322. ebpf::ST_H_IMM => unsafe {
  323. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
  324. check_mem_store(x as u64, 2, insn_ptr);
  325. *x = insn.imm as u16;
  326. },
  327. ebpf::ST_W_IMM => unsafe {
  328. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
  329. check_mem_store(x as u64, 4, insn_ptr);
  330. *x = insn.imm as u32;
  331. },
  332. ebpf::ST_DW_IMM => unsafe {
  333. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
  334. check_mem_store(x as u64, 8, insn_ptr);
  335. *x = insn.imm as u64;
  336. },
  337. // BPF_STX class
  338. ebpf::ST_B_REG => unsafe {
  339. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
  340. check_mem_store(x as u64, 1, insn_ptr);
  341. *x = reg[_src] as u8;
  342. },
  343. ebpf::ST_H_REG => unsafe {
  344. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
  345. check_mem_store(x as u64, 2, insn_ptr);
  346. *x = reg[_src] as u16;
  347. },
  348. ebpf::ST_W_REG => unsafe {
  349. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
  350. check_mem_store(x as u64, 4, insn_ptr);
  351. *x = reg[_src] as u32;
  352. },
  353. ebpf::ST_DW_REG => unsafe {
  354. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
  355. check_mem_store(x as u64, 8, insn_ptr);
  356. *x = reg[_src] as u64;
  357. },
  358. ebpf::ST_W_XADD => unimplemented!(),
  359. ebpf::ST_DW_XADD => unimplemented!(),
  360. // BPF_ALU class
  361. // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
  362. // before we do the operation?
  363. // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
  364. ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX,
  365. ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
  366. ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
  367. ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
  368. ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
  369. ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
  370. ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
  371. ebpf::DIV32_REG => {
  372. if reg[_src] == 0 {
  373. panic!("Error: division by 0");
  374. }
  375. reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64;
  376. },
  377. ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
  378. ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
  379. ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
  380. ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
  381. ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
  382. ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
  383. ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
  384. ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
  385. ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
  386. ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
  387. ebpf::MOD32_REG => {
  388. if reg[_src] == 0 {
  389. panic!("Error: division by 0");
  390. }
  391. reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64;
  392. },
  393. ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
  394. ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
  395. ebpf::MOV32_IMM => reg[_dst] = insn.imm as u64,
  396. ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
  397. ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
  398. ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
  399. ebpf::LE => {
  400. reg[_dst] = match insn.imm {
  401. 16 => (reg[_dst] as u16).to_le() as u64,
  402. 32 => (reg[_dst] as u32).to_le() as u64,
  403. 64 => reg[_dst].to_le(),
  404. _ => unreachable!(),
  405. };
  406. },
  407. ebpf::BE => {
  408. reg[_dst] = match insn.imm {
  409. 16 => (reg[_dst] as u16).to_be() as u64,
  410. 32 => (reg[_dst] as u32).to_be() as u64,
  411. 64 => reg[_dst].to_be(),
  412. _ => unreachable!(),
  413. };
  414. },
  415. // BPF_ALU64 class
  416. ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
  417. ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
  418. ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
  419. ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
  420. ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
  421. ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
  422. ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
  423. ebpf::DIV64_REG => {
  424. if reg[_src] == 0 {
  425. panic!("Error: division by 0");
  426. }
  427. reg[_dst] /= reg[_src];
  428. },
  429. ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
  430. ebpf::OR64_REG => reg[_dst] |= reg[_src],
  431. ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
  432. ebpf::AND64_REG => reg[_dst] &= reg[_src],
  433. ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64,
  434. ebpf::LSH64_REG => reg[_dst] <<= reg[_src],
  435. ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64,
  436. ebpf::RSH64_REG => reg[_dst] >>= reg[_src],
  437. ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
  438. ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
  439. ebpf::MOD64_REG => {
  440. if reg[_src] == 0 {
  441. panic!("Error: division by 0");
  442. }
  443. reg[_dst] %= reg[_src];
  444. },
  445. ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
  446. ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
  447. ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
  448. ebpf::MOV64_REG => reg[_dst] = reg[_src],
  449. ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
  450. ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
  451. // BPF_JMP class
  452. // TODO: check this actually works as expected for signed / unsigned ops
  453. ebpf::JA => insn_ptr = (insn_ptr as i16 + insn.off) as usize,
  454. ebpf::JEQ_IMM => if reg[_dst] == insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  455. ebpf::JEQ_REG => if reg[_dst] == reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  456. ebpf::JGT_IMM => if reg[_dst] > insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  457. ebpf::JGT_REG => if reg[_dst] > reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  458. ebpf::JGE_IMM => if reg[_dst] >= insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  459. ebpf::JGE_REG => if reg[_dst] >= reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  460. ebpf::JLT_IMM => if reg[_dst] < insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  461. ebpf::JLT_REG => if reg[_dst] < reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  462. ebpf::JLE_IMM => if reg[_dst] <= insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  463. ebpf::JLE_REG => if reg[_dst] <= reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  464. ebpf::JSET_IMM => if reg[_dst] & insn.imm as u64 != 0 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  465. ebpf::JSET_REG => if reg[_dst] & reg[_src] != 0 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  466. ebpf::JNE_IMM => if reg[_dst] != insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  467. ebpf::JNE_REG => if reg[_dst] != reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  468. ebpf::JSGT_IMM => if reg[_dst] as i64 > insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  469. ebpf::JSGT_REG => if reg[_dst] as i64 > reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  470. ebpf::JSGE_IMM => if reg[_dst] as i64 >= insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  471. ebpf::JSGE_REG => if reg[_dst] as i64 >= reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  472. ebpf::JSLT_IMM => if (reg[_dst] as i64) < insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  473. ebpf::JSLT_REG => if (reg[_dst] as i64) < reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  474. ebpf::JSLE_IMM => if (reg[_dst] as i64) <= insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  475. ebpf::JSLE_REG => if (reg[_dst] as i64) <= reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  476. // Do not delegate the check to the verifier, since registered functions can be
  477. // changed after the program has been verified.
  478. ebpf::CALL => if let Some(function) = self.helpers.get(&(insn.imm as u32)) {
  479. reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
  480. } else {
  481. panic!("Error: unknown helper function (id: {:#x})", insn.imm as u32);
  482. },
  483. ebpf::TAIL_CALL => unimplemented!(),
  484. ebpf::EXIT => return reg[0],
  485. _ => unreachable!()
  486. }
  487. }
  488. unreachable!()
  489. }
  490. fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize,
  491. mbuff: &[u8], mem: &[u8], stack: &[u8]) {
  492. if mbuff.as_ptr() as u64 <= addr && addr + len as u64 <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
  493. return
  494. }
  495. if mem.as_ptr() as u64 <= addr && addr + len as u64 <= mem.as_ptr() as u64 + mem.len() as u64 {
  496. return
  497. }
  498. if stack.as_ptr() as u64 <= addr && addr + len as u64 <= stack.as_ptr() as u64 + stack.len() as u64 {
  499. return
  500. }
  501. panic!(
  502. "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
  503. access_type, insn_ptr, addr, len,
  504. mbuff.as_ptr() as u64, mbuff.len(),
  505. mem.as_ptr() as u64, mem.len(),
  506. stack.as_ptr() as u64, stack.len()
  507. );
  508. }
  509. /// JIT-compile the loaded program. No argument required for this.
  510. ///
  511. /// If using helper functions, be sure to register them into the VM before calling this
  512. /// function.
  513. ///
  514. /// # Panics
  515. ///
  516. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  517. /// unknown eBPF operation code.
  518. ///
  519. /// # Examples
  520. ///
  521. /// ```
  522. /// let prog = &[
  523. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  524. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  525. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  526. /// ];
  527. ///
  528. /// // Instantiate a VM.
  529. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  530. ///
  531. /// vm.jit_compile();
  532. /// ```
  533. #[cfg(not(windows))]
  534. pub fn jit_compile(&mut self) {
  535. self.jit = jit::compile(self.prog, &self.helpers, true, false);
  536. }
  537. /// Execute the previously JIT-compiled program, with the given packet data and metadata
  538. /// buffer, in a manner very similar to `prog_exec()`.
  539. ///
  540. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  541. /// address of the beginning and of the end of the memory area used for packet data from the
  542. /// metadata buffer, at some appointed offsets. It is up to the user to ensure that these
  543. /// pointers are correctly stored in the buffer.
  544. ///
  545. /// # Panics
  546. ///
  547. /// This function panics if an error occurs during the execution of the program.
  548. ///
  549. /// # Safety
  550. ///
  551. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  552. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  553. /// very bad (program may segfault). It may be wise to check that the program works with the
  554. /// interpreter before running the JIT-compiled version of it.
  555. ///
  556. /// For this reason the function should be called from within an `unsafe` bloc.
  557. ///
  558. /// # Examples
  559. ///
  560. /// ```
  561. /// let prog = &[
  562. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into r1.
  563. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  564. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  565. /// ];
  566. /// let mem = &mut [
  567. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  568. /// ];
  569. ///
  570. /// // Just for the example we create our metadata buffer from scratch, and we store the
  571. /// // pointers to packet data start and end in it.
  572. /// let mut mbuff = [0u8; 32];
  573. /// unsafe {
  574. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  575. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  576. /// *data = mem.as_ptr() as u64;
  577. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  578. /// }
  579. ///
  580. /// // Instantiate a VM.
  581. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  582. ///
  583. /// # #[cfg(not(windows))]
  584. /// vm.jit_compile();
  585. ///
  586. /// // Provide both a reference to the packet data, and to the metadata buffer.
  587. /// # #[cfg(not(windows))]
  588. /// unsafe {
  589. /// let res = vm.prog_exec_jit(mem, &mut mbuff);
  590. /// assert_eq!(res, 0x2211);
  591. /// }
  592. /// ```
  593. pub unsafe fn prog_exec_jit(&self, mem: &mut [u8], mbuff: &'a mut [u8]) -> u64 {
  594. // If packet data is empty, do not send the address of an empty slice; send a null pointer
  595. // as first argument instead, as this is uBPF's behavior (empty packet should not happen
  596. // in the kernel; anyway the verifier would prevent the use of uninitialized registers).
  597. // See `mul_loop` test.
  598. let mem_ptr = match mem.len() {
  599. 0 => std::ptr::null_mut(),
  600. _ => mem.as_ptr() as *mut u8
  601. };
  602. // The last two arguments are not used in this function. They would be used if there was a
  603. // need to indicate to the JIT at which offset in the mbuff mem_ptr and mem_ptr + mem.len()
  604. // should be stored; this is what happens with struct EbpfVmFixedMbuff.
  605. (self.jit)(mbuff.as_ptr() as *mut u8, mbuff.len(), mem_ptr, mem.len(), 0, 0)
  606. }
  607. }
  608. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  609. /// on a metadata buffer containing pointers to packet data, but it internally handles the buffer
  610. /// so as to save the effort to manually handle the metadata buffer for the user.
  611. ///
  612. /// This struct implements a static internal buffer that is passed to the program. The user has to
  613. /// indicate the offset values at which the eBPF program expects to find the start and the end of
  614. /// packet data in the buffer. On calling the `prog_exec()` or `prog_exec_jit()` functions, the
  615. /// struct automatically updates the addresses in this static buffer, at the appointed offsets, for
  616. /// the start and the end of the packet data the program is called upon.
  617. ///
  618. /// # Examples
  619. ///
  620. /// This was compiled with clang from the following program, in C:
  621. ///
  622. /// ```c
  623. /// #include <linux/bpf.h>
  624. /// #include "path/to/linux/samples/bpf/bpf_helpers.h"
  625. ///
  626. /// SEC(".classifier")
  627. /// int classifier(struct __sk_buff *skb)
  628. /// {
  629. /// void *data = (void *)(long)skb->data;
  630. /// void *data_end = (void *)(long)skb->data_end;
  631. ///
  632. /// // Check program is long enough.
  633. /// if (data + 5 > data_end)
  634. /// return 0;
  635. ///
  636. /// return *((char *)data + 5);
  637. /// }
  638. /// ```
  639. ///
  640. /// Some small modifications have been brought to have it work, see comments.
  641. ///
  642. /// ```
  643. /// let prog = &[
  644. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  645. /// // Here opcode 0x61 had to be replace by 0x79 so as to load a 8-bytes long address.
  646. /// // Also, offset 0x4c had to be replace with e.g. 0x40 so as to prevent the two pointers
  647. /// // from overlapping in the buffer.
  648. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load pointer to mem from r1[0x40] to r2
  649. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  650. /// // Here opcode 0x61 had to be replace by 0x79 so as to load a 8-bytes long address.
  651. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load ptr to mem_end from r1[0x50] to r1
  652. /// 0x2d, 0x12, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  653. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  654. /// 0x67, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, // r0 >>= 56
  655. /// 0xc7, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, // r0 <<= 56 (arsh) extend byte sign to u64
  656. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  657. /// ];
  658. /// let mem1 = &mut [
  659. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  660. /// ];
  661. /// let mem2 = &mut [
  662. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  663. /// ];
  664. ///
  665. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  666. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  667. ///
  668. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  669. /// let res = vm.prog_exec(mem1);
  670. /// assert_eq!(res, 0xffffffffffffffdd);
  671. ///
  672. /// let res = vm.prog_exec(mem2);
  673. /// assert_eq!(res, 0x27);
  674. /// ```
  675. pub struct EbpfVmFixedMbuff<'a> {
  676. parent: EbpfVmMbuff<'a>,
  677. mbuff: MetaBuff,
  678. }
  679. impl<'a> EbpfVmFixedMbuff<'a> {
  680. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  681. /// When attempting to load the program, it passes through a simple verifier.
  682. ///
  683. /// # Panics
  684. ///
  685. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  686. ///
  687. /// # Examples
  688. ///
  689. /// ```
  690. /// let prog = &[
  691. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  692. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  693. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  694. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  695. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  696. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  697. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  698. /// ];
  699. ///
  700. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  701. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  702. /// ```
  703. pub fn new(prog: &'a [u8], data_offset: usize, data_end_offset: usize) -> EbpfVmFixedMbuff<'a> {
  704. let parent = EbpfVmMbuff::new(prog);
  705. let get_buff_len = | x: usize, y: usize | if x >= y { x + 8 } else { y + 8 };
  706. let buffer = vec![0u8; get_buff_len(data_offset, data_end_offset)];
  707. let mbuff = MetaBuff {
  708. data_offset: data_offset,
  709. data_end_offset: data_end_offset,
  710. buffer: buffer,
  711. };
  712. EbpfVmFixedMbuff {
  713. parent: parent,
  714. mbuff: mbuff,
  715. }
  716. }
  717. /// Load a new eBPF program into the virtual machine instance.
  718. ///
  719. /// At the same time, load new offsets for storing pointers to start and end of packet data in
  720. /// the internal metadata buffer.
  721. ///
  722. /// # Panics
  723. ///
  724. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  725. ///
  726. /// # Examples
  727. ///
  728. /// ```
  729. /// let prog1 = &[
  730. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  731. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  732. /// ];
  733. /// let prog2 = &[
  734. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  735. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  736. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  737. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  738. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  739. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  740. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  741. /// ];
  742. ///
  743. /// let mem = &mut [
  744. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27,
  745. /// ];
  746. ///
  747. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog1, 0, 0);
  748. /// vm.set_prog(prog2, 0x40, 0x50);
  749. ///
  750. /// let res = vm.prog_exec(mem);
  751. /// assert_eq!(res, 0x27);
  752. /// ```
  753. pub fn set_prog(&mut self, prog: &'a [u8], data_offset: usize, data_end_offset: usize) {
  754. let get_buff_len = | x: usize, y: usize | if x >= y { x + 8 } else { y + 8 };
  755. let buffer = vec![0u8; get_buff_len(data_offset, data_end_offset)];
  756. self.mbuff.buffer = buffer;
  757. self.mbuff.data_offset = data_offset;
  758. self.mbuff.data_end_offset = data_end_offset;
  759. self.parent.set_prog(prog)
  760. }
  761. /// Register a built-in or user-defined helper function in order to use it later from within
  762. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  763. ///
  764. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  765. /// program. You should be able to change registered helpers after compiling, but not to add
  766. /// new ones (i.e. with new keys).
  767. ///
  768. /// # Examples
  769. ///
  770. /// ```
  771. /// use rbpf::helpers;
  772. ///
  773. /// // This program was compiled with clang, from a C program containing the following single
  774. /// // instruction: `return bpf_trace_printk("foo %c %c %c\n", 10, 1, 2, 3);`
  775. /// let prog = &[
  776. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  777. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  778. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  779. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  780. /// 0x2d, 0x12, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 6 instructions
  781. /// 0x71, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r1
  782. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  783. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  784. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  785. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  786. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  787. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  788. /// ];
  789. ///
  790. /// let mem = &mut [
  791. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x09,
  792. /// ];
  793. ///
  794. /// // Instantiate a VM.
  795. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  796. ///
  797. /// // Register a helper. This helper will store the result of the square root of r1 into r0.
  798. /// vm.register_helper(1, helpers::sqrti);
  799. ///
  800. /// let res = vm.prog_exec(mem);
  801. /// assert_eq!(res, 3);
  802. /// ```
  803. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  804. self.parent.register_helper(key, function);
  805. }
  806. /// Execute the program loaded, with the given packet data.
  807. ///
  808. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  809. /// address of the beginning and of the end of the memory area used for packet data from some
  810. /// metadata buffer, which in the case of this VM is handled internally. The offsets at which
  811. /// the addresses should be placed should have be set at the creation of the VM.
  812. ///
  813. /// # Panics
  814. ///
  815. /// This function is currently expected to panic if it encounters any error during the program
  816. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  817. /// in the future (we could raise errors instead).
  818. ///
  819. /// # Examples
  820. ///
  821. /// ```
  822. /// let prog = &[
  823. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  824. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  825. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  826. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  827. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  828. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  829. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  830. /// ];
  831. /// let mem = &mut [
  832. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  833. /// ];
  834. ///
  835. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  836. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  837. ///
  838. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  839. /// let res = vm.prog_exec(mem);
  840. /// assert_eq!(res, 0xdd);
  841. /// ```
  842. pub fn prog_exec(&mut self, mem: &'a mut [u8]) -> u64 {
  843. let l = self.mbuff.buffer.len();
  844. // Can this ever happen? Probably not, should be ensured at mbuff creation.
  845. if self.mbuff.data_offset + 8 > l || self.mbuff.data_end_offset + 8 > l {
  846. panic!("Error: buffer too small ({:?}), cannot use data_offset {:?} and data_end_offset {:?}",
  847. l, self.mbuff.data_offset, self.mbuff.data_end_offset);
  848. }
  849. LittleEndian::write_u64(&mut self.mbuff.buffer[(self.mbuff.data_offset) .. ], mem.as_ptr() as u64);
  850. LittleEndian::write_u64(&mut self.mbuff.buffer[(self.mbuff.data_end_offset) .. ], mem.as_ptr() as u64 + mem.len() as u64);
  851. self.parent.prog_exec(mem, &self.mbuff.buffer)
  852. }
  853. /// JIT-compile the loaded program. No argument required for this.
  854. ///
  855. /// If using helper functions, be sure to register them into the VM before calling this
  856. /// function.
  857. ///
  858. /// # Panics
  859. ///
  860. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  861. /// unknown eBPF operation code.
  862. ///
  863. /// # Examples
  864. ///
  865. /// ```
  866. /// let prog = &[
  867. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  868. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  869. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  870. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  871. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  872. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  873. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  874. /// ];
  875. ///
  876. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  877. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  878. ///
  879. /// vm.jit_compile();
  880. /// ```
  881. #[cfg(not(windows))]
  882. pub fn jit_compile(&mut self) {
  883. self.parent.jit = jit::compile(self.parent.prog, &self.parent.helpers, true, true);
  884. }
  885. /// Execute the previously JIT-compiled program, with the given packet data, in a manner very
  886. /// similar to `prog_exec()`.
  887. ///
  888. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  889. /// address of the beginning and of the end of the memory area used for packet data from some
  890. /// metadata buffer, which in the case of this VM is handled internally. The offsets at which
  891. /// the addresses should be placed should have be set at the creation of the VM.
  892. ///
  893. /// # Panics
  894. ///
  895. /// This function panics if an error occurs during the execution of the program.
  896. ///
  897. /// # Safety
  898. ///
  899. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  900. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  901. /// very bad (program may segfault). It may be wise to check that the program works with the
  902. /// interpreter before running the JIT-compiled version of it.
  903. ///
  904. /// For this reason the function should be called from within an `unsafe` bloc.
  905. ///
  906. /// # Examples
  907. ///
  908. /// ```
  909. /// let prog = &[
  910. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  911. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  912. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  913. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  914. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  915. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  916. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  917. /// ];
  918. /// let mem = &mut [
  919. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  920. /// ];
  921. ///
  922. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  923. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  924. ///
  925. /// # #[cfg(not(windows))]
  926. /// vm.jit_compile();
  927. ///
  928. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  929. /// # #[cfg(not(windows))]
  930. /// unsafe {
  931. /// let res = vm.prog_exec_jit(mem);
  932. /// assert_eq!(res, 0xdd);
  933. /// }
  934. /// ```
  935. // This struct redefines the `prog_exec_jit()` function, in order to pass the offsets
  936. // associated with the fixed mbuff.
  937. pub unsafe fn prog_exec_jit(&mut self, mem: &'a mut [u8]) -> u64 {
  938. // If packet data is empty, do not send the address of an empty slice; send a null pointer
  939. // as first argument instead, as this is uBPF's behavior (empty packet should not happen
  940. // in the kernel; anyway the verifier would prevent the use of uninitialized registers).
  941. // See `mul_loop` test.
  942. let mem_ptr = match mem.len() {
  943. 0 => std::ptr::null_mut(),
  944. _ => mem.as_ptr() as *mut u8
  945. };
  946. (self.parent.jit)(self.mbuff.buffer.as_ptr() as *mut u8, self.mbuff.buffer.len(),
  947. mem_ptr, mem.len(), self.mbuff.data_offset, self.mbuff.data_end_offset)
  948. }
  949. }
  950. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  951. /// directly on the memory area representing packet data.
  952. ///
  953. /// # Examples
  954. ///
  955. /// ```
  956. /// let prog = &[
  957. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  958. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  959. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  960. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  961. /// ];
  962. /// let mem = &mut [
  963. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  964. /// ];
  965. ///
  966. /// // Instantiate a VM.
  967. /// let vm = rbpf::EbpfVmRaw::new(prog);
  968. ///
  969. /// // Provide only a reference to the packet data.
  970. /// let res = vm.prog_exec(mem);
  971. /// assert_eq!(res, 0x22cc);
  972. /// ```
  973. pub struct EbpfVmRaw<'a> {
  974. parent: EbpfVmMbuff<'a>,
  975. }
  976. impl<'a> EbpfVmRaw<'a> {
  977. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  978. /// When attempting to load the program, it passes through a simple verifier.
  979. ///
  980. /// # Panics
  981. ///
  982. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  983. ///
  984. /// # Examples
  985. ///
  986. /// ```
  987. /// let prog = &[
  988. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  989. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  990. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  991. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  992. /// ];
  993. ///
  994. /// // Instantiate a VM.
  995. /// let vm = rbpf::EbpfVmRaw::new(prog);
  996. /// ```
  997. pub fn new(prog: &'a [u8]) -> EbpfVmRaw<'a> {
  998. let parent = EbpfVmMbuff::new(prog);
  999. EbpfVmRaw {
  1000. parent: parent,
  1001. }
  1002. }
  1003. /// Load a new eBPF program into the virtual machine instance.
  1004. ///
  1005. /// # Panics
  1006. ///
  1007. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  1008. ///
  1009. /// # Examples
  1010. ///
  1011. /// ```
  1012. /// let prog1 = &[
  1013. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  1014. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1015. /// ];
  1016. /// let prog2 = &[
  1017. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1018. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1019. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1020. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1021. /// ];
  1022. ///
  1023. /// let mem = &mut [
  1024. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27,
  1025. /// ];
  1026. ///
  1027. /// let mut vm = rbpf::EbpfVmRaw::new(prog1);
  1028. /// vm.set_prog(prog2);
  1029. ///
  1030. /// let res = vm.prog_exec(mem);
  1031. /// assert_eq!(res, 0x22cc);
  1032. /// ```
  1033. pub fn set_prog(&mut self, prog: &'a [u8]) {
  1034. self.parent.set_prog(prog)
  1035. }
  1036. /// Register a built-in or user-defined helper function in order to use it later from within
  1037. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  1038. ///
  1039. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  1040. /// program. You should be able to change registered helpers after compiling, but not to add
  1041. /// new ones (i.e. with new keys).
  1042. ///
  1043. /// # Examples
  1044. ///
  1045. /// ```
  1046. /// use rbpf::helpers;
  1047. ///
  1048. /// let prog = &[
  1049. /// 0x79, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxdw r1, r1[0x00]
  1050. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  1051. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  1052. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  1053. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  1054. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  1055. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1056. /// ];
  1057. ///
  1058. /// let mem = &mut [
  1059. /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
  1060. /// ];
  1061. ///
  1062. /// // Instantiate a VM.
  1063. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1064. ///
  1065. /// // Register a helper. This helper will store the result of the square root of r1 into r0.
  1066. /// vm.register_helper(1, helpers::sqrti);
  1067. ///
  1068. /// let res = vm.prog_exec(mem);
  1069. /// assert_eq!(res, 0x10000000);
  1070. /// ```
  1071. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  1072. self.parent.register_helper(key, function);
  1073. }
  1074. /// Execute the program loaded, with the given packet data.
  1075. ///
  1076. /// # Panics
  1077. ///
  1078. /// This function is currently expected to panic if it encounters any error during the program
  1079. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  1080. /// in the future (we could raise errors instead).
  1081. ///
  1082. /// # Examples
  1083. ///
  1084. /// ```
  1085. /// let prog = &[
  1086. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1087. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1088. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1089. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1090. /// ];
  1091. ///
  1092. /// let mem = &mut [
  1093. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  1094. /// ];
  1095. ///
  1096. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1097. ///
  1098. /// let res = vm.prog_exec(mem);
  1099. /// assert_eq!(res, 0x22cc);
  1100. /// ```
  1101. pub fn prog_exec(&self, mem: &'a mut [u8]) -> u64 {
  1102. self.parent.prog_exec(mem, &[])
  1103. }
  1104. /// JIT-compile the loaded program. No argument required for this.
  1105. ///
  1106. /// If using helper functions, be sure to register them into the VM before calling this
  1107. /// function.
  1108. ///
  1109. /// # Panics
  1110. ///
  1111. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  1112. /// unknown eBPF operation code.
  1113. ///
  1114. /// # Examples
  1115. ///
  1116. /// ```
  1117. /// let prog = &[
  1118. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1119. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1120. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1121. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1122. /// ];
  1123. ///
  1124. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1125. ///
  1126. /// vm.jit_compile();
  1127. /// ```
  1128. #[cfg(not(windows))]
  1129. pub fn jit_compile(&mut self) {
  1130. self.parent.jit = jit::compile(self.parent.prog, &self.parent.helpers, false, false);
  1131. }
  1132. /// Execute the previously JIT-compiled program, with the given packet data, in a manner very
  1133. /// similar to `prog_exec()`.
  1134. ///
  1135. /// # Panics
  1136. ///
  1137. /// This function panics if an error occurs during the execution of the program.
  1138. ///
  1139. /// # Safety
  1140. ///
  1141. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  1142. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  1143. /// very bad (program may segfault). It may be wise to check that the program works with the
  1144. /// interpreter before running the JIT-compiled version of it.
  1145. ///
  1146. /// For this reason the function should be called from within an `unsafe` bloc.
  1147. ///
  1148. /// # Examples
  1149. ///
  1150. /// ```
  1151. /// let prog = &[
  1152. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1153. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1154. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1155. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1156. /// ];
  1157. ///
  1158. /// let mem = &mut [
  1159. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  1160. /// ];
  1161. ///
  1162. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1163. ///
  1164. /// # #[cfg(not(windows))]
  1165. /// vm.jit_compile();
  1166. ///
  1167. /// # #[cfg(not(windows))]
  1168. /// unsafe {
  1169. /// let res = vm.prog_exec_jit(mem);
  1170. /// assert_eq!(res, 0x22cc);
  1171. /// }
  1172. /// ```
  1173. pub unsafe fn prog_exec_jit(&self, mem: &'a mut [u8]) -> u64 {
  1174. let mut mbuff = vec![];
  1175. self.parent.prog_exec_jit(mem, &mut mbuff)
  1176. }
  1177. }
  1178. /// A virtual machine to run eBPF program. This kind of VM is used for programs that do not work
  1179. /// with any memory area—no metadata buffer, no packet data either.
  1180. ///
  1181. /// # Examples
  1182. ///
  1183. /// ```
  1184. /// let prog = &[
  1185. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  1186. /// 0xb7, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // mov r1, 1
  1187. /// 0xb7, 0x02, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov r2, 2
  1188. /// 0xb7, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // mov r3, 3
  1189. /// 0xb7, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, // mov r4, 4
  1190. /// 0xb7, 0x05, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // mov r5, 5
  1191. /// 0xb7, 0x06, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // mov r6, 6
  1192. /// 0xb7, 0x07, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, // mov r7, 7
  1193. /// 0xb7, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // mov r8, 8
  1194. /// 0x4f, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // or r0, r5
  1195. /// 0x47, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, // or r0, 0xa0
  1196. /// 0x57, 0x00, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, // and r0, 0xa3
  1197. /// 0xb7, 0x09, 0x00, 0x00, 0x91, 0x00, 0x00, 0x00, // mov r9, 0x91
  1198. /// 0x5f, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // and r0, r9
  1199. /// 0x67, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // lsh r0, 32
  1200. /// 0x67, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, // lsh r0, 22
  1201. /// 0x6f, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // lsh r0, r8
  1202. /// 0x77, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // rsh r0, 32
  1203. /// 0x77, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, // rsh r0, 19
  1204. /// 0x7f, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // rsh r0, r7
  1205. /// 0xa7, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // xor r0, 0x03
  1206. /// 0xaf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // xor r0, r2
  1207. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1208. /// ];
  1209. ///
  1210. /// // Instantiate a VM.
  1211. /// let vm = rbpf::EbpfVmNoData::new(prog);
  1212. ///
  1213. /// // Provide only a reference to the packet data.
  1214. /// let res = vm.prog_exec();
  1215. /// assert_eq!(res, 0x11);
  1216. /// ```
  1217. pub struct EbpfVmNoData<'a> {
  1218. parent: EbpfVmRaw<'a>,
  1219. }
  1220. impl<'a> EbpfVmNoData<'a> {
  1221. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  1222. /// When attempting to load the program, it passes through a simple verifier.
  1223. ///
  1224. /// # Panics
  1225. ///
  1226. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  1227. ///
  1228. /// # Examples
  1229. ///
  1230. /// ```
  1231. /// let prog = &[
  1232. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1233. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1234. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1235. /// ];
  1236. ///
  1237. /// // Instantiate a VM.
  1238. /// let vm = rbpf::EbpfVmNoData::new(prog);
  1239. /// ```
  1240. pub fn new(prog: &'a [u8]) -> EbpfVmNoData<'a> {
  1241. let parent = EbpfVmRaw::new(prog);
  1242. EbpfVmNoData {
  1243. parent: parent,
  1244. }
  1245. }
  1246. /// Load a new eBPF program into the virtual machine instance.
  1247. ///
  1248. /// # Panics
  1249. ///
  1250. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  1251. ///
  1252. /// # Examples
  1253. ///
  1254. /// ```
  1255. /// let prog1 = &[
  1256. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1257. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1258. /// ];
  1259. /// let prog2 = &[
  1260. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1261. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1262. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1263. /// ];
  1264. ///
  1265. /// let mut vm = rbpf::EbpfVmNoData::new(prog1);
  1266. ///
  1267. /// let res = vm.prog_exec();
  1268. /// assert_eq!(res, 0x2211);
  1269. ///
  1270. /// vm.set_prog(prog2);
  1271. ///
  1272. /// let res = vm.prog_exec();
  1273. /// assert_eq!(res, 0x1122);
  1274. /// ```
  1275. pub fn set_prog(&mut self, prog: &'a [u8]) {
  1276. self.parent.set_prog(prog)
  1277. }
  1278. /// Register a built-in or user-defined helper function in order to use it later from within
  1279. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  1280. ///
  1281. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  1282. /// program. You should be able to change registered helpers after compiling, but not to add
  1283. /// new ones (i.e. with new keys).
  1284. ///
  1285. /// # Examples
  1286. ///
  1287. /// ```
  1288. /// use rbpf::helpers;
  1289. ///
  1290. /// let prog = &[
  1291. /// 0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // mov r1, 0x010000000
  1292. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  1293. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  1294. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  1295. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  1296. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  1297. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1298. /// ];
  1299. ///
  1300. /// let mut vm = rbpf::EbpfVmNoData::new(prog);
  1301. ///
  1302. /// // Register a helper. This helper will store the result of the square root of r1 into r0.
  1303. /// vm.register_helper(1, helpers::sqrti);
  1304. ///
  1305. /// let res = vm.prog_exec();
  1306. /// assert_eq!(res, 0x1000);
  1307. /// ```
  1308. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  1309. self.parent.register_helper(key, function);
  1310. }
  1311. /// JIT-compile the loaded program. No argument required for this.
  1312. ///
  1313. /// If using helper functions, be sure to register them into the VM before calling this
  1314. /// function.
  1315. ///
  1316. /// # Panics
  1317. ///
  1318. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  1319. /// unknown eBPF operation code.
  1320. ///
  1321. /// # Examples
  1322. ///
  1323. /// ```
  1324. /// let prog = &[
  1325. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1326. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1327. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1328. /// ];
  1329. ///
  1330. /// let mut vm = rbpf::EbpfVmNoData::new(prog);
  1331. ///
  1332. ///
  1333. /// vm.jit_compile();
  1334. /// ```
  1335. #[cfg(not(windows))]
  1336. pub fn jit_compile(&mut self) {
  1337. self.parent.jit_compile();
  1338. }
  1339. /// Execute the program loaded, without providing pointers to any memory area whatsoever.
  1340. ///
  1341. /// # Panics
  1342. ///
  1343. /// This function is currently expected to panic if it encounters any error during the program
  1344. /// execution, such as memory accesses or division by zero attempts. This may be changed in the
  1345. /// future (we could raise errors instead).
  1346. ///
  1347. /// # Examples
  1348. ///
  1349. /// ```
  1350. /// let prog = &[
  1351. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1352. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1353. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1354. /// ];
  1355. ///
  1356. /// let vm = rbpf::EbpfVmNoData::new(prog);
  1357. ///
  1358. /// // For this kind of VM, the `prog_exec()` function needs no argument.
  1359. /// let res = vm.prog_exec();
  1360. /// assert_eq!(res, 0x1122);
  1361. /// ```
  1362. pub fn prog_exec(&self) -> u64 {
  1363. self.parent.prog_exec(&mut [])
  1364. }
  1365. /// Execute the previously JIT-compiled program, without providing pointers to any memory area
  1366. /// whatsoever, in a manner very similar to `prog_exec()`.
  1367. ///
  1368. /// # Panics
  1369. ///
  1370. /// This function panics if an error occurs during the execution of the program.
  1371. ///
  1372. /// # Safety
  1373. ///
  1374. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  1375. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  1376. /// very bad (program may segfault). It may be wise to check that the program works with the
  1377. /// interpreter before running the JIT-compiled version of it.
  1378. ///
  1379. /// For this reason the function should be called from within an `unsafe` bloc.
  1380. ///
  1381. /// # Examples
  1382. ///
  1383. /// ```
  1384. /// let prog = &[
  1385. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1386. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1387. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1388. /// ];
  1389. ///
  1390. /// let mut vm = rbpf::EbpfVmNoData::new(prog);
  1391. ///
  1392. /// # #[cfg(not(windows))]
  1393. /// vm.jit_compile();
  1394. ///
  1395. /// # #[cfg(not(windows))]
  1396. /// unsafe {
  1397. /// let res = vm.prog_exec_jit();
  1398. /// assert_eq!(res, 0x1122);
  1399. /// }
  1400. /// ```
  1401. pub unsafe fn prog_exec_jit(&self) -> u64 {
  1402. self.parent.prog_exec_jit(&mut [])
  1403. }
  1404. }