lib.rs 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475
  1. // Derived from uBPF <https://github.com/iovisor/ubpf>
  2. // Copyright 2015 Big Switch Networks, Inc
  3. // (uBPF: VM architecture, parts of the interpreter, originally in C)
  4. // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
  5. // (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
  6. //
  7. // Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
  8. // the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
  9. // copied, modified, or distributed except according to those terms.
  10. //! Virtual machine and JIT compiler for eBPF programs.
  11. #![doc(html_logo_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.png",
  12. html_favicon_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.ico")]
  13. #![warn(missing_docs)]
  14. // There are unused mut warnings due to unsafe code.
  15. #![allow(unused_mut)]
  16. // Allows old-style clippy
  17. #![allow(renamed_and_removed_lints)]
  18. #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names, single_match, cast_lossless, doc_markdown, match_same_arms, unreadable_literal))]
  19. extern crate byteorder;
  20. extern crate combine;
  21. extern crate time;
  22. use std::u32;
  23. use std::collections::HashMap;
  24. use byteorder::{ByteOrder, LittleEndian};
  25. pub mod assembler;
  26. pub mod disassembler;
  27. pub mod ebpf;
  28. pub mod helpers;
  29. pub mod insn_builder;
  30. mod asm_parser;
  31. #[cfg(not(windows))]
  32. mod jit;
  33. mod verifier;
  34. // A metadata buffer with two offset indications. It can be used in one kind of eBPF VM to simulate
  35. // the use of a metadata buffer each time the program is executed, without the user having to
  36. // actually handle it. The offsets are used to tell the VM where in the buffer the pointers to
  37. // packet data start and end should be stored each time the program is run on a new packet.
  38. struct MetaBuff {
  39. data_offset: usize,
  40. data_end_offset: usize,
  41. buffer: Vec<u8>,
  42. }
  43. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  44. /// on a metadata buffer containing pointers to packet data.
  45. ///
  46. /// # Examples
  47. ///
  48. /// ```
  49. /// let prog = &[
  50. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff at offset 8 into R1.
  51. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  52. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  53. /// ];
  54. /// let mem = &mut [
  55. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  56. /// ];
  57. ///
  58. /// // Just for the example we create our metadata buffer from scratch, and we store the pointers
  59. /// // to packet data start and end in it.
  60. /// let mut mbuff = [0u8; 32];
  61. /// unsafe {
  62. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  63. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  64. /// *data = mem.as_ptr() as u64;
  65. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  66. /// }
  67. ///
  68. /// // Instantiate a VM.
  69. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  70. ///
  71. /// // Provide both a reference to the packet data, and to the metadata buffer.
  72. /// let res = vm.prog_exec(mem, &mut mbuff);
  73. /// assert_eq!(res, 0x2211);
  74. /// ```
  75. pub struct EbpfVmMbuff<'a> {
  76. prog: &'a [u8],
  77. jit: (unsafe fn (*mut u8, usize, *mut u8, usize, usize, usize) -> u64),
  78. helpers: HashMap<u32, ebpf::Helper>,
  79. }
  80. impl<'a> EbpfVmMbuff<'a> {
  81. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  82. /// When attempting to load the program, it passes through a simple verifier.
  83. ///
  84. /// # Panics
  85. ///
  86. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  87. ///
  88. /// # Examples
  89. ///
  90. /// ```
  91. /// let prog = &[
  92. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  93. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  94. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  95. /// ];
  96. ///
  97. /// // Instantiate a VM.
  98. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  99. /// ```
  100. pub fn new(prog: &'a [u8]) -> EbpfVmMbuff<'a> {
  101. verifier::check(prog);
  102. fn no_jit(_mbuff: *mut u8, _len: usize, _mem: *mut u8, _mem_len: usize,
  103. _nodata_offset: usize, _nodata_end_offset: usize) -> u64 {
  104. panic!("Error: program has not been JIT-compiled");
  105. }
  106. EbpfVmMbuff {
  107. prog: prog,
  108. jit: no_jit,
  109. helpers: HashMap::new(),
  110. }
  111. }
  112. /// Load a new eBPF program into the virtual machine instance.
  113. ///
  114. /// # Panics
  115. ///
  116. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  117. ///
  118. /// # Examples
  119. ///
  120. /// ```
  121. /// let prog1 = &[
  122. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  123. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  124. /// ];
  125. /// let prog2 = &[
  126. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  127. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  128. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  129. /// ];
  130. ///
  131. /// // Instantiate a VM.
  132. /// let mut vm = rbpf::EbpfVmMbuff::new(prog1);
  133. /// vm.set_prog(prog2);
  134. /// ```
  135. pub fn set_prog(&mut self, prog: &'a [u8]) {
  136. verifier::check(prog);
  137. self.prog = prog;
  138. }
  139. /// Register a built-in or user-defined helper function in order to use it later from within
  140. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  141. ///
  142. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  143. /// program. You should be able to change registered helpers after compiling, but not to add
  144. /// new ones (i.e. with new keys).
  145. ///
  146. /// # Examples
  147. ///
  148. /// ```
  149. /// use rbpf::helpers;
  150. ///
  151. /// // This program was compiled with clang, from a C program containing the following single
  152. /// // instruction: `return bpf_trace_printk("foo %c %c %c\n", 10, 1, 2, 3);`
  153. /// let prog = &[
  154. /// 0x18, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load 0 as u64 into r1 (That would be
  155. /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // replaced by tc by the address of
  156. /// // the format string, in the .map
  157. /// // section of the ELF file).
  158. /// 0xb7, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, // mov r2, 10
  159. /// 0xb7, 0x03, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // mov r3, 1
  160. /// 0xb7, 0x04, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov r4, 2
  161. /// 0xb7, 0x05, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // mov r5, 3
  162. /// 0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call helper with key 6
  163. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  164. /// ];
  165. ///
  166. /// // Instantiate a VM.
  167. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  168. ///
  169. /// // Register a helper.
  170. /// // On running the program this helper will print the content of registers r3, r4 and r5 to
  171. /// // standard output.
  172. /// vm.register_helper(6, helpers::bpf_trace_printf);
  173. /// ```
  174. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  175. self.helpers.insert(key, function);
  176. }
  177. /// Execute the program loaded, with the given packet data and metadata buffer.
  178. ///
  179. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  180. /// address of the beginning and of the end of the memory area used for packet data from the
  181. /// metadata buffer, at some appointed offsets. It is up to the user to ensure that these
  182. /// pointers are correctly stored in the buffer.
  183. ///
  184. /// # Panics
  185. ///
  186. /// This function is currently expected to panic if it encounters any error during the program
  187. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  188. /// in the future (we could raise errors instead).
  189. ///
  190. /// # Examples
  191. ///
  192. /// ```
  193. /// let prog = &[
  194. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  195. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  196. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  197. /// ];
  198. /// let mem = &mut [
  199. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  200. /// ];
  201. ///
  202. /// // Just for the example we create our metadata buffer from scratch, and we store the
  203. /// // pointers to packet data start and end in it.
  204. /// let mut mbuff = [0u8; 32];
  205. /// unsafe {
  206. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  207. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  208. /// *data = mem.as_ptr() as u64;
  209. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  210. /// }
  211. ///
  212. /// // Instantiate a VM.
  213. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  214. ///
  215. /// // Provide both a reference to the packet data, and to the metadata buffer.
  216. /// let res = vm.prog_exec(mem, &mut mbuff);
  217. /// assert_eq!(res, 0x2211);
  218. /// ```
  219. #[allow(unknown_lints)]
  220. #[allow(cyclomatic_complexity)]
  221. pub fn prog_exec(&self, mem: &[u8], mbuff: &[u8]) -> u64 {
  222. const U32MAX: u64 = u32::MAX as u64;
  223. let stack = vec![0u8;ebpf::STACK_SIZE];
  224. // R1 points to beginning of memory area, R10 to stack
  225. let mut reg: [u64;11] = [
  226. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, stack.as_ptr() as u64 + stack.len() as u64
  227. ];
  228. if !mbuff.is_empty() {
  229. reg[1] = mbuff.as_ptr() as u64;
  230. }
  231. else if !mem.is_empty() {
  232. reg[1] = mem.as_ptr() as u64;
  233. }
  234. let check_mem_load = | addr: u64, len: usize, insn_ptr: usize | {
  235. EbpfVmMbuff::check_mem(addr, len, "load", insn_ptr, mbuff, mem, &stack);
  236. };
  237. let check_mem_store = | addr: u64, len: usize, insn_ptr: usize | {
  238. EbpfVmMbuff::check_mem(addr, len, "store", insn_ptr, mbuff, mem, &stack);
  239. };
  240. // Loop on instructions
  241. let mut insn_ptr:usize = 0;
  242. while insn_ptr * ebpf::INSN_SIZE < self.prog.len() {
  243. let insn = ebpf::get_insn(self.prog, insn_ptr);
  244. insn_ptr += 1;
  245. let _dst = insn.dst as usize;
  246. let _src = insn.src as usize;
  247. match insn.opc {
  248. // BPF_LD class
  249. // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
  250. // Since this pointer is constant, and since we already know it (mem), do not
  251. // bother re-fetching it, just use mem already.
  252. ebpf::LD_ABS_B => reg[0] = unsafe {
  253. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
  254. check_mem_load(x as u64, 8, insn_ptr);
  255. *x as u64
  256. },
  257. ebpf::LD_ABS_H => reg[0] = unsafe {
  258. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
  259. check_mem_load(x as u64, 8, insn_ptr);
  260. *x as u64
  261. },
  262. ebpf::LD_ABS_W => reg[0] = unsafe {
  263. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
  264. check_mem_load(x as u64, 8, insn_ptr);
  265. *x as u64
  266. },
  267. ebpf::LD_ABS_DW => reg[0] = unsafe {
  268. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
  269. check_mem_load(x as u64, 8, insn_ptr);
  270. *x as u64
  271. },
  272. ebpf::LD_IND_B => reg[0] = unsafe {
  273. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
  274. check_mem_load(x as u64, 8, insn_ptr);
  275. *x as u64
  276. },
  277. ebpf::LD_IND_H => reg[0] = unsafe {
  278. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
  279. check_mem_load(x as u64, 8, insn_ptr);
  280. *x as u64
  281. },
  282. ebpf::LD_IND_W => reg[0] = unsafe {
  283. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
  284. check_mem_load(x as u64, 8, insn_ptr);
  285. *x as u64
  286. },
  287. ebpf::LD_IND_DW => reg[0] = unsafe {
  288. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
  289. check_mem_load(x as u64, 8, insn_ptr);
  290. *x as u64
  291. },
  292. ebpf::LD_DW_IMM => {
  293. let next_insn = ebpf::get_insn(self.prog, insn_ptr);
  294. insn_ptr += 1;
  295. reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
  296. },
  297. // BPF_LDX class
  298. ebpf::LD_B_REG => reg[_dst] = unsafe {
  299. #[allow(cast_ptr_alignment)]
  300. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u8;
  301. check_mem_load(x as u64, 1, insn_ptr);
  302. *x as u64
  303. },
  304. ebpf::LD_H_REG => reg[_dst] = unsafe {
  305. #[allow(cast_ptr_alignment)]
  306. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16;
  307. check_mem_load(x as u64, 2, insn_ptr);
  308. *x as u64
  309. },
  310. ebpf::LD_W_REG => reg[_dst] = unsafe {
  311. #[allow(cast_ptr_alignment)]
  312. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32;
  313. check_mem_load(x as u64, 4, insn_ptr);
  314. *x as u64
  315. },
  316. ebpf::LD_DW_REG => reg[_dst] = unsafe {
  317. #[allow(cast_ptr_alignment)]
  318. let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64;
  319. check_mem_load(x as u64, 8, insn_ptr);
  320. *x as u64
  321. },
  322. // BPF_ST class
  323. ebpf::ST_B_IMM => unsafe {
  324. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
  325. check_mem_store(x as u64, 1, insn_ptr);
  326. *x = insn.imm as u8;
  327. },
  328. ebpf::ST_H_IMM => unsafe {
  329. #[allow(cast_ptr_alignment)]
  330. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
  331. check_mem_store(x as u64, 2, insn_ptr);
  332. *x = insn.imm as u16;
  333. },
  334. ebpf::ST_W_IMM => unsafe {
  335. #[allow(cast_ptr_alignment)]
  336. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
  337. check_mem_store(x as u64, 4, insn_ptr);
  338. *x = insn.imm as u32;
  339. },
  340. ebpf::ST_DW_IMM => unsafe {
  341. #[allow(cast_ptr_alignment)]
  342. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
  343. check_mem_store(x as u64, 8, insn_ptr);
  344. *x = insn.imm as u64;
  345. },
  346. // BPF_STX class
  347. ebpf::ST_B_REG => unsafe {
  348. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
  349. check_mem_store(x as u64, 1, insn_ptr);
  350. *x = reg[_src] as u8;
  351. },
  352. ebpf::ST_H_REG => unsafe {
  353. #[allow(cast_ptr_alignment)]
  354. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
  355. check_mem_store(x as u64, 2, insn_ptr);
  356. *x = reg[_src] as u16;
  357. },
  358. ebpf::ST_W_REG => unsafe {
  359. #[allow(cast_ptr_alignment)]
  360. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
  361. check_mem_store(x as u64, 4, insn_ptr);
  362. *x = reg[_src] as u32;
  363. },
  364. ebpf::ST_DW_REG => unsafe {
  365. #[allow(cast_ptr_alignment)]
  366. let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
  367. check_mem_store(x as u64, 8, insn_ptr);
  368. *x = reg[_src] as u64;
  369. },
  370. ebpf::ST_W_XADD => unimplemented!(),
  371. ebpf::ST_DW_XADD => unimplemented!(),
  372. // BPF_ALU class
  373. // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
  374. // before we do the operation?
  375. // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
  376. ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX,
  377. ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
  378. ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
  379. ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
  380. ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
  381. ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
  382. ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
  383. ebpf::DIV32_REG => {
  384. if reg[_src] == 0 {
  385. panic!("Error: division by 0");
  386. }
  387. reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64;
  388. },
  389. ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
  390. ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
  391. ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
  392. ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
  393. ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
  394. ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
  395. ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
  396. ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
  397. ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
  398. ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
  399. ebpf::MOD32_REG => {
  400. if reg[_src] == 0 {
  401. panic!("Error: division by 0");
  402. }
  403. reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64;
  404. },
  405. ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
  406. ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
  407. ebpf::MOV32_IMM => reg[_dst] = insn.imm as u64,
  408. ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
  409. ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
  410. ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
  411. ebpf::LE => {
  412. reg[_dst] = match insn.imm {
  413. 16 => (reg[_dst] as u16).to_le() as u64,
  414. 32 => (reg[_dst] as u32).to_le() as u64,
  415. 64 => reg[_dst].to_le(),
  416. _ => unreachable!(),
  417. };
  418. },
  419. ebpf::BE => {
  420. reg[_dst] = match insn.imm {
  421. 16 => (reg[_dst] as u16).to_be() as u64,
  422. 32 => (reg[_dst] as u32).to_be() as u64,
  423. 64 => reg[_dst].to_be(),
  424. _ => unreachable!(),
  425. };
  426. },
  427. // BPF_ALU64 class
  428. ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
  429. ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
  430. ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
  431. ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
  432. ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
  433. ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
  434. ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
  435. ebpf::DIV64_REG => {
  436. if reg[_src] == 0 {
  437. panic!("Error: division by 0");
  438. }
  439. reg[_dst] /= reg[_src];
  440. },
  441. ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
  442. ebpf::OR64_REG => reg[_dst] |= reg[_src],
  443. ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
  444. ebpf::AND64_REG => reg[_dst] &= reg[_src],
  445. ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64,
  446. ebpf::LSH64_REG => reg[_dst] <<= reg[_src],
  447. ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64,
  448. ebpf::RSH64_REG => reg[_dst] >>= reg[_src],
  449. ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
  450. ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
  451. ebpf::MOD64_REG => {
  452. if reg[_src] == 0 {
  453. panic!("Error: division by 0");
  454. }
  455. reg[_dst] %= reg[_src];
  456. },
  457. ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
  458. ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
  459. ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
  460. ebpf::MOV64_REG => reg[_dst] = reg[_src],
  461. ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
  462. ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
  463. // BPF_JMP class
  464. // TODO: check this actually works as expected for signed / unsigned ops
  465. ebpf::JA => insn_ptr = (insn_ptr as i16 + insn.off) as usize,
  466. ebpf::JEQ_IMM => if reg[_dst] == insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  467. ebpf::JEQ_REG => if reg[_dst] == reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  468. ebpf::JGT_IMM => if reg[_dst] > insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  469. ebpf::JGT_REG => if reg[_dst] > reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  470. ebpf::JGE_IMM => if reg[_dst] >= insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  471. ebpf::JGE_REG => if reg[_dst] >= reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  472. ebpf::JLT_IMM => if reg[_dst] < insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  473. ebpf::JLT_REG => if reg[_dst] < reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  474. ebpf::JLE_IMM => if reg[_dst] <= insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  475. ebpf::JLE_REG => if reg[_dst] <= reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  476. ebpf::JSET_IMM => if reg[_dst] & insn.imm as u64 != 0 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  477. ebpf::JSET_REG => if reg[_dst] & reg[_src] != 0 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  478. ebpf::JNE_IMM => if reg[_dst] != insn.imm as u64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  479. ebpf::JNE_REG => if reg[_dst] != reg[_src] { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  480. ebpf::JSGT_IMM => if reg[_dst] as i64 > insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  481. ebpf::JSGT_REG => if reg[_dst] as i64 > reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  482. ebpf::JSGE_IMM => if reg[_dst] as i64 >= insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  483. ebpf::JSGE_REG => if reg[_dst] as i64 >= reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  484. ebpf::JSLT_IMM => if (reg[_dst] as i64) < insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  485. ebpf::JSLT_REG => if (reg[_dst] as i64) < reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  486. ebpf::JSLE_IMM => if (reg[_dst] as i64) <= insn.imm as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  487. ebpf::JSLE_REG => if (reg[_dst] as i64) <= reg[_src] as i64 { insn_ptr = (insn_ptr as i16 + insn.off) as usize; },
  488. // Do not delegate the check to the verifier, since registered functions can be
  489. // changed after the program has been verified.
  490. ebpf::CALL => if let Some(function) = self.helpers.get(&(insn.imm as u32)) {
  491. reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
  492. } else {
  493. panic!("Error: unknown helper function (id: {:#x})", insn.imm as u32);
  494. },
  495. ebpf::TAIL_CALL => unimplemented!(),
  496. ebpf::EXIT => return reg[0],
  497. _ => unreachable!()
  498. }
  499. }
  500. unreachable!()
  501. }
  502. fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize,
  503. mbuff: &[u8], mem: &[u8], stack: &[u8]) {
  504. if mbuff.as_ptr() as u64 <= addr && addr + len as u64 <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
  505. return
  506. }
  507. if mem.as_ptr() as u64 <= addr && addr + len as u64 <= mem.as_ptr() as u64 + mem.len() as u64 {
  508. return
  509. }
  510. if stack.as_ptr() as u64 <= addr && addr + len as u64 <= stack.as_ptr() as u64 + stack.len() as u64 {
  511. return
  512. }
  513. panic!(
  514. "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
  515. access_type, insn_ptr, addr, len,
  516. mbuff.as_ptr() as u64, mbuff.len(),
  517. mem.as_ptr() as u64, mem.len(),
  518. stack.as_ptr() as u64, stack.len()
  519. );
  520. }
  521. /// JIT-compile the loaded program. No argument required for this.
  522. ///
  523. /// If using helper functions, be sure to register them into the VM before calling this
  524. /// function.
  525. ///
  526. /// # Panics
  527. ///
  528. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  529. /// unknown eBPF operation code.
  530. ///
  531. /// # Examples
  532. ///
  533. /// ```
  534. /// let prog = &[
  535. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into R1.
  536. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  537. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  538. /// ];
  539. ///
  540. /// // Instantiate a VM.
  541. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  542. ///
  543. /// vm.jit_compile();
  544. /// ```
  545. #[cfg(not(windows))]
  546. pub fn jit_compile(&mut self) {
  547. self.jit = jit::compile(self.prog, &self.helpers, true, false);
  548. }
  549. /// Execute the previously JIT-compiled program, with the given packet data and metadata
  550. /// buffer, in a manner very similar to `prog_exec()`.
  551. ///
  552. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  553. /// address of the beginning and of the end of the memory area used for packet data from the
  554. /// metadata buffer, at some appointed offsets. It is up to the user to ensure that these
  555. /// pointers are correctly stored in the buffer.
  556. ///
  557. /// # Panics
  558. ///
  559. /// This function panics if an error occurs during the execution of the program.
  560. ///
  561. /// # Safety
  562. ///
  563. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  564. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  565. /// very bad (program may segfault). It may be wise to check that the program works with the
  566. /// interpreter before running the JIT-compiled version of it.
  567. ///
  568. /// For this reason the function should be called from within an `unsafe` bloc.
  569. ///
  570. /// # Examples
  571. ///
  572. /// ```
  573. /// let prog = &[
  574. /// 0x79, 0x11, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, // Load mem from mbuff into r1.
  575. /// 0x69, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // ldhx r1[2], r0
  576. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  577. /// ];
  578. /// let mem = &mut [
  579. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  580. /// ];
  581. ///
  582. /// // Just for the example we create our metadata buffer from scratch, and we store the
  583. /// // pointers to packet data start and end in it.
  584. /// let mut mbuff = [0u8; 32];
  585. /// unsafe {
  586. /// let mut data = mbuff.as_ptr().offset(8) as *mut u64;
  587. /// let mut data_end = mbuff.as_ptr().offset(24) as *mut u64;
  588. /// *data = mem.as_ptr() as u64;
  589. /// *data_end = mem.as_ptr() as u64 + mem.len() as u64;
  590. /// }
  591. ///
  592. /// // Instantiate a VM.
  593. /// let mut vm = rbpf::EbpfVmMbuff::new(prog);
  594. ///
  595. /// # #[cfg(not(windows))]
  596. /// vm.jit_compile();
  597. ///
  598. /// // Provide both a reference to the packet data, and to the metadata buffer.
  599. /// # #[cfg(not(windows))]
  600. /// unsafe {
  601. /// let res = vm.prog_exec_jit(mem, &mut mbuff);
  602. /// assert_eq!(res, 0x2211);
  603. /// }
  604. /// ```
  605. pub unsafe fn prog_exec_jit(&self, mem: &mut [u8], mbuff: &'a mut [u8]) -> u64 {
  606. // If packet data is empty, do not send the address of an empty slice; send a null pointer
  607. // as first argument instead, as this is uBPF's behavior (empty packet should not happen
  608. // in the kernel; anyway the verifier would prevent the use of uninitialized registers).
  609. // See `mul_loop` test.
  610. let mem_ptr = match mem.len() {
  611. 0 => std::ptr::null_mut(),
  612. _ => mem.as_ptr() as *mut u8
  613. };
  614. // The last two arguments are not used in this function. They would be used if there was a
  615. // need to indicate to the JIT at which offset in the mbuff mem_ptr and mem_ptr + mem.len()
  616. // should be stored; this is what happens with struct EbpfVmFixedMbuff.
  617. (self.jit)(mbuff.as_ptr() as *mut u8, mbuff.len(), mem_ptr, mem.len(), 0, 0)
  618. }
  619. }
  620. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  621. /// on a metadata buffer containing pointers to packet data, but it internally handles the buffer
  622. /// so as to save the effort to manually handle the metadata buffer for the user.
  623. ///
  624. /// This struct implements a static internal buffer that is passed to the program. The user has to
  625. /// indicate the offset values at which the eBPF program expects to find the start and the end of
  626. /// packet data in the buffer. On calling the `prog_exec()` or `prog_exec_jit()` functions, the
  627. /// struct automatically updates the addresses in this static buffer, at the appointed offsets, for
  628. /// the start and the end of the packet data the program is called upon.
  629. ///
  630. /// # Examples
  631. ///
  632. /// This was compiled with clang from the following program, in C:
  633. ///
  634. /// ```c
  635. /// #include <linux/bpf.h>
  636. /// #include "path/to/linux/samples/bpf/bpf_helpers.h"
  637. ///
  638. /// SEC(".classifier")
  639. /// int classifier(struct __sk_buff *skb)
  640. /// {
  641. /// void *data = (void *)(long)skb->data;
  642. /// void *data_end = (void *)(long)skb->data_end;
  643. ///
  644. /// // Check program is long enough.
  645. /// if (data + 5 > data_end)
  646. /// return 0;
  647. ///
  648. /// return *((char *)data + 5);
  649. /// }
  650. /// ```
  651. ///
  652. /// Some small modifications have been brought to have it work, see comments.
  653. ///
  654. /// ```
  655. /// let prog = &[
  656. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  657. /// // Here opcode 0x61 had to be replace by 0x79 so as to load a 8-bytes long address.
  658. /// // Also, offset 0x4c had to be replace with e.g. 0x40 so as to prevent the two pointers
  659. /// // from overlapping in the buffer.
  660. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load pointer to mem from r1[0x40] to r2
  661. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  662. /// // Here opcode 0x61 had to be replace by 0x79 so as to load a 8-bytes long address.
  663. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load ptr to mem_end from r1[0x50] to r1
  664. /// 0x2d, 0x12, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  665. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  666. /// 0x67, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, // r0 >>= 56
  667. /// 0xc7, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, // r0 <<= 56 (arsh) extend byte sign to u64
  668. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  669. /// ];
  670. /// let mem1 = &mut [
  671. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  672. /// ];
  673. /// let mem2 = &mut [
  674. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  675. /// ];
  676. ///
  677. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  678. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  679. ///
  680. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  681. /// let res = vm.prog_exec(mem1);
  682. /// assert_eq!(res, 0xffffffffffffffdd);
  683. ///
  684. /// let res = vm.prog_exec(mem2);
  685. /// assert_eq!(res, 0x27);
  686. /// ```
  687. pub struct EbpfVmFixedMbuff<'a> {
  688. parent: EbpfVmMbuff<'a>,
  689. mbuff: MetaBuff,
  690. }
  691. impl<'a> EbpfVmFixedMbuff<'a> {
  692. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  693. /// When attempting to load the program, it passes through a simple verifier.
  694. ///
  695. /// # Panics
  696. ///
  697. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  698. ///
  699. /// # Examples
  700. ///
  701. /// ```
  702. /// let prog = &[
  703. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  704. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  705. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  706. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  707. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  708. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  709. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  710. /// ];
  711. ///
  712. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  713. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  714. /// ```
  715. pub fn new(prog: &'a [u8], data_offset: usize, data_end_offset: usize) -> EbpfVmFixedMbuff<'a> {
  716. let parent = EbpfVmMbuff::new(prog);
  717. let get_buff_len = | x: usize, y: usize | if x >= y { x + 8 } else { y + 8 };
  718. let buffer = vec![0u8; get_buff_len(data_offset, data_end_offset)];
  719. let mbuff = MetaBuff {
  720. data_offset: data_offset,
  721. data_end_offset: data_end_offset,
  722. buffer: buffer,
  723. };
  724. EbpfVmFixedMbuff {
  725. parent: parent,
  726. mbuff: mbuff,
  727. }
  728. }
  729. /// Load a new eBPF program into the virtual machine instance.
  730. ///
  731. /// At the same time, load new offsets for storing pointers to start and end of packet data in
  732. /// the internal metadata buffer.
  733. ///
  734. /// # Panics
  735. ///
  736. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  737. ///
  738. /// # Examples
  739. ///
  740. /// ```
  741. /// let prog1 = &[
  742. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  743. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  744. /// ];
  745. /// let prog2 = &[
  746. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  747. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  748. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  749. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  750. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  751. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  752. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  753. /// ];
  754. ///
  755. /// let mem = &mut [
  756. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27,
  757. /// ];
  758. ///
  759. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog1, 0, 0);
  760. /// vm.set_prog(prog2, 0x40, 0x50);
  761. ///
  762. /// let res = vm.prog_exec(mem);
  763. /// assert_eq!(res, 0x27);
  764. /// ```
  765. pub fn set_prog(&mut self, prog: &'a [u8], data_offset: usize, data_end_offset: usize) {
  766. let get_buff_len = | x: usize, y: usize | if x >= y { x + 8 } else { y + 8 };
  767. let buffer = vec![0u8; get_buff_len(data_offset, data_end_offset)];
  768. self.mbuff.buffer = buffer;
  769. self.mbuff.data_offset = data_offset;
  770. self.mbuff.data_end_offset = data_end_offset;
  771. self.parent.set_prog(prog)
  772. }
  773. /// Register a built-in or user-defined helper function in order to use it later from within
  774. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  775. ///
  776. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  777. /// program. You should be able to change registered helpers after compiling, but not to add
  778. /// new ones (i.e. with new keys).
  779. ///
  780. /// # Examples
  781. ///
  782. /// ```
  783. /// use rbpf::helpers;
  784. ///
  785. /// // This program was compiled with clang, from a C program containing the following single
  786. /// // instruction: `return bpf_trace_printk("foo %c %c %c\n", 10, 1, 2, 3);`
  787. /// let prog = &[
  788. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  789. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  790. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  791. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  792. /// 0x2d, 0x12, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 6 instructions
  793. /// 0x71, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r1
  794. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  795. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  796. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  797. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  798. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  799. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  800. /// ];
  801. ///
  802. /// let mem = &mut [
  803. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x09,
  804. /// ];
  805. ///
  806. /// // Instantiate a VM.
  807. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  808. ///
  809. /// // Register a helper. This helper will store the result of the square root of r1 into r0.
  810. /// vm.register_helper(1, helpers::sqrti);
  811. ///
  812. /// let res = vm.prog_exec(mem);
  813. /// assert_eq!(res, 3);
  814. /// ```
  815. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  816. self.parent.register_helper(key, function);
  817. }
  818. /// Execute the program loaded, with the given packet data.
  819. ///
  820. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  821. /// address of the beginning and of the end of the memory area used for packet data from some
  822. /// metadata buffer, which in the case of this VM is handled internally. The offsets at which
  823. /// the addresses should be placed should have be set at the creation of the VM.
  824. ///
  825. /// # Panics
  826. ///
  827. /// This function is currently expected to panic if it encounters any error during the program
  828. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  829. /// in the future (we could raise errors instead).
  830. ///
  831. /// # Examples
  832. ///
  833. /// ```
  834. /// let prog = &[
  835. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  836. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  837. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  838. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  839. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  840. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  841. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  842. /// ];
  843. /// let mem = &mut [
  844. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  845. /// ];
  846. ///
  847. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  848. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  849. ///
  850. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  851. /// let res = vm.prog_exec(mem);
  852. /// assert_eq!(res, 0xdd);
  853. /// ```
  854. pub fn prog_exec(&mut self, mem: &'a mut [u8]) -> u64 {
  855. let l = self.mbuff.buffer.len();
  856. // Can this ever happen? Probably not, should be ensured at mbuff creation.
  857. if self.mbuff.data_offset + 8 > l || self.mbuff.data_end_offset + 8 > l {
  858. panic!("Error: buffer too small ({:?}), cannot use data_offset {:?} and data_end_offset {:?}",
  859. l, self.mbuff.data_offset, self.mbuff.data_end_offset);
  860. }
  861. LittleEndian::write_u64(&mut self.mbuff.buffer[(self.mbuff.data_offset) .. ], mem.as_ptr() as u64);
  862. LittleEndian::write_u64(&mut self.mbuff.buffer[(self.mbuff.data_end_offset) .. ], mem.as_ptr() as u64 + mem.len() as u64);
  863. self.parent.prog_exec(mem, &self.mbuff.buffer)
  864. }
  865. /// JIT-compile the loaded program. No argument required for this.
  866. ///
  867. /// If using helper functions, be sure to register them into the VM before calling this
  868. /// function.
  869. ///
  870. /// # Panics
  871. ///
  872. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  873. /// unknown eBPF operation code.
  874. ///
  875. /// # Examples
  876. ///
  877. /// ```
  878. /// let prog = &[
  879. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  880. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  881. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  882. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  883. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  884. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  885. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  886. /// ];
  887. ///
  888. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  889. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  890. ///
  891. /// vm.jit_compile();
  892. /// ```
  893. #[cfg(not(windows))]
  894. pub fn jit_compile(&mut self) {
  895. self.parent.jit = jit::compile(self.parent.prog, &self.parent.helpers, true, true);
  896. }
  897. /// Execute the previously JIT-compiled program, with the given packet data, in a manner very
  898. /// similar to `prog_exec()`.
  899. ///
  900. /// If the program is made to be compatible with Linux kernel, it is expected to load the
  901. /// address of the beginning and of the end of the memory area used for packet data from some
  902. /// metadata buffer, which in the case of this VM is handled internally. The offsets at which
  903. /// the addresses should be placed should have be set at the creation of the VM.
  904. ///
  905. /// # Panics
  906. ///
  907. /// This function panics if an error occurs during the execution of the program.
  908. ///
  909. /// # Safety
  910. ///
  911. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  912. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  913. /// very bad (program may segfault). It may be wise to check that the program works with the
  914. /// interpreter before running the JIT-compiled version of it.
  915. ///
  916. /// For this reason the function should be called from within an `unsafe` bloc.
  917. ///
  918. /// # Examples
  919. ///
  920. /// ```
  921. /// let prog = &[
  922. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  923. /// 0x79, 0x12, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem from r1[0x40] to r2
  924. /// 0x07, 0x02, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // add r2, 5
  925. /// 0x79, 0x11, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, // load mem_end from r1[0x50] to r1
  926. /// 0x2d, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // if r2 > r1 skip 3 instructions
  927. /// 0x71, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // load r2 (= *(mem + 5)) into r0
  928. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  929. /// ];
  930. /// let mem = &mut [
  931. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  932. /// ];
  933. ///
  934. /// // Instantiate a VM. Note that we provide the start and end offsets for mem pointers.
  935. /// let mut vm = rbpf::EbpfVmFixedMbuff::new(prog, 0x40, 0x50);
  936. ///
  937. /// # #[cfg(not(windows))]
  938. /// vm.jit_compile();
  939. ///
  940. /// // Provide only a reference to the packet data. We do not manage the metadata buffer.
  941. /// # #[cfg(not(windows))]
  942. /// unsafe {
  943. /// let res = vm.prog_exec_jit(mem);
  944. /// assert_eq!(res, 0xdd);
  945. /// }
  946. /// ```
  947. // This struct redefines the `prog_exec_jit()` function, in order to pass the offsets
  948. // associated with the fixed mbuff.
  949. pub unsafe fn prog_exec_jit(&mut self, mem: &'a mut [u8]) -> u64 {
  950. // If packet data is empty, do not send the address of an empty slice; send a null pointer
  951. // as first argument instead, as this is uBPF's behavior (empty packet should not happen
  952. // in the kernel; anyway the verifier would prevent the use of uninitialized registers).
  953. // See `mul_loop` test.
  954. let mem_ptr = match mem.len() {
  955. 0 => std::ptr::null_mut(),
  956. _ => mem.as_ptr() as *mut u8
  957. };
  958. (self.parent.jit)(self.mbuff.buffer.as_ptr() as *mut u8, self.mbuff.buffer.len(),
  959. mem_ptr, mem.len(), self.mbuff.data_offset, self.mbuff.data_end_offset)
  960. }
  961. }
  962. /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
  963. /// directly on the memory area representing packet data.
  964. ///
  965. /// # Examples
  966. ///
  967. /// ```
  968. /// let prog = &[
  969. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  970. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  971. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  972. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  973. /// ];
  974. /// let mem = &mut [
  975. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
  976. /// ];
  977. ///
  978. /// // Instantiate a VM.
  979. /// let vm = rbpf::EbpfVmRaw::new(prog);
  980. ///
  981. /// // Provide only a reference to the packet data.
  982. /// let res = vm.prog_exec(mem);
  983. /// assert_eq!(res, 0x22cc);
  984. /// ```
  985. pub struct EbpfVmRaw<'a> {
  986. parent: EbpfVmMbuff<'a>,
  987. }
  988. impl<'a> EbpfVmRaw<'a> {
  989. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  990. /// When attempting to load the program, it passes through a simple verifier.
  991. ///
  992. /// # Panics
  993. ///
  994. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  995. ///
  996. /// # Examples
  997. ///
  998. /// ```
  999. /// let prog = &[
  1000. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1001. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1002. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1003. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1004. /// ];
  1005. ///
  1006. /// // Instantiate a VM.
  1007. /// let vm = rbpf::EbpfVmRaw::new(prog);
  1008. /// ```
  1009. pub fn new(prog: &'a [u8]) -> EbpfVmRaw<'a> {
  1010. let parent = EbpfVmMbuff::new(prog);
  1011. EbpfVmRaw {
  1012. parent: parent,
  1013. }
  1014. }
  1015. /// Load a new eBPF program into the virtual machine instance.
  1016. ///
  1017. /// # Panics
  1018. ///
  1019. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  1020. ///
  1021. /// # Examples
  1022. ///
  1023. /// ```
  1024. /// let prog1 = &[
  1025. /// 0xb7, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  1026. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1027. /// ];
  1028. /// let prog2 = &[
  1029. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1030. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1031. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1032. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1033. /// ];
  1034. ///
  1035. /// let mem = &mut [
  1036. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27,
  1037. /// ];
  1038. ///
  1039. /// let mut vm = rbpf::EbpfVmRaw::new(prog1);
  1040. /// vm.set_prog(prog2);
  1041. ///
  1042. /// let res = vm.prog_exec(mem);
  1043. /// assert_eq!(res, 0x22cc);
  1044. /// ```
  1045. pub fn set_prog(&mut self, prog: &'a [u8]) {
  1046. self.parent.set_prog(prog)
  1047. }
  1048. /// Register a built-in or user-defined helper function in order to use it later from within
  1049. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  1050. ///
  1051. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  1052. /// program. You should be able to change registered helpers after compiling, but not to add
  1053. /// new ones (i.e. with new keys).
  1054. ///
  1055. /// # Examples
  1056. ///
  1057. /// ```
  1058. /// use rbpf::helpers;
  1059. ///
  1060. /// let prog = &[
  1061. /// 0x79, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxdw r1, r1[0x00]
  1062. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  1063. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  1064. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  1065. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  1066. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  1067. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1068. /// ];
  1069. ///
  1070. /// let mem = &mut [
  1071. /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
  1072. /// ];
  1073. ///
  1074. /// // Instantiate a VM.
  1075. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1076. ///
  1077. /// // Register a helper. This helper will store the result of the square root of r1 into r0.
  1078. /// vm.register_helper(1, helpers::sqrti);
  1079. ///
  1080. /// let res = vm.prog_exec(mem);
  1081. /// assert_eq!(res, 0x10000000);
  1082. /// ```
  1083. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  1084. self.parent.register_helper(key, function);
  1085. }
  1086. /// Execute the program loaded, with the given packet data.
  1087. ///
  1088. /// # Panics
  1089. ///
  1090. /// This function is currently expected to panic if it encounters any error during the program
  1091. /// execution, such as out of bounds accesses or division by zero attempts. This may be changed
  1092. /// in the future (we could raise errors instead).
  1093. ///
  1094. /// # Examples
  1095. ///
  1096. /// ```
  1097. /// let prog = &[
  1098. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1099. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1100. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1101. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1102. /// ];
  1103. ///
  1104. /// let mem = &mut [
  1105. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  1106. /// ];
  1107. ///
  1108. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1109. ///
  1110. /// let res = vm.prog_exec(mem);
  1111. /// assert_eq!(res, 0x22cc);
  1112. /// ```
  1113. pub fn prog_exec(&self, mem: &'a mut [u8]) -> u64 {
  1114. self.parent.prog_exec(mem, &[])
  1115. }
  1116. /// JIT-compile the loaded program. No argument required for this.
  1117. ///
  1118. /// If using helper functions, be sure to register them into the VM before calling this
  1119. /// function.
  1120. ///
  1121. /// # Panics
  1122. ///
  1123. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  1124. /// unknown eBPF operation code.
  1125. ///
  1126. /// # Examples
  1127. ///
  1128. /// ```
  1129. /// let prog = &[
  1130. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1131. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1132. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1133. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1134. /// ];
  1135. ///
  1136. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1137. ///
  1138. /// vm.jit_compile();
  1139. /// ```
  1140. #[cfg(not(windows))]
  1141. pub fn jit_compile(&mut self) {
  1142. self.parent.jit = jit::compile(self.parent.prog, &self.parent.helpers, false, false);
  1143. }
  1144. /// Execute the previously JIT-compiled program, with the given packet data, in a manner very
  1145. /// similar to `prog_exec()`.
  1146. ///
  1147. /// # Panics
  1148. ///
  1149. /// This function panics if an error occurs during the execution of the program.
  1150. ///
  1151. /// # Safety
  1152. ///
  1153. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  1154. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  1155. /// very bad (program may segfault). It may be wise to check that the program works with the
  1156. /// interpreter before running the JIT-compiled version of it.
  1157. ///
  1158. /// For this reason the function should be called from within an `unsafe` bloc.
  1159. ///
  1160. /// # Examples
  1161. ///
  1162. /// ```
  1163. /// let prog = &[
  1164. /// 0x71, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // ldxb r1[0x04], r1
  1165. /// 0x07, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, // add r1, 0x22
  1166. /// 0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, r1
  1167. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1168. /// ];
  1169. ///
  1170. /// let mem = &mut [
  1171. /// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0x27
  1172. /// ];
  1173. ///
  1174. /// let mut vm = rbpf::EbpfVmRaw::new(prog);
  1175. ///
  1176. /// # #[cfg(not(windows))]
  1177. /// vm.jit_compile();
  1178. ///
  1179. /// # #[cfg(not(windows))]
  1180. /// unsafe {
  1181. /// let res = vm.prog_exec_jit(mem);
  1182. /// assert_eq!(res, 0x22cc);
  1183. /// }
  1184. /// ```
  1185. pub unsafe fn prog_exec_jit(&self, mem: &'a mut [u8]) -> u64 {
  1186. let mut mbuff = vec![];
  1187. self.parent.prog_exec_jit(mem, &mut mbuff)
  1188. }
  1189. }
  1190. /// A virtual machine to run eBPF program. This kind of VM is used for programs that do not work
  1191. /// with any memory area—no metadata buffer, no packet data either.
  1192. ///
  1193. /// # Examples
  1194. ///
  1195. /// ```
  1196. /// let prog = &[
  1197. /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0
  1198. /// 0xb7, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // mov r1, 1
  1199. /// 0xb7, 0x02, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov r2, 2
  1200. /// 0xb7, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // mov r3, 3
  1201. /// 0xb7, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, // mov r4, 4
  1202. /// 0xb7, 0x05, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, // mov r5, 5
  1203. /// 0xb7, 0x06, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // mov r6, 6
  1204. /// 0xb7, 0x07, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, // mov r7, 7
  1205. /// 0xb7, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // mov r8, 8
  1206. /// 0x4f, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // or r0, r5
  1207. /// 0x47, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, // or r0, 0xa0
  1208. /// 0x57, 0x00, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, // and r0, 0xa3
  1209. /// 0xb7, 0x09, 0x00, 0x00, 0x91, 0x00, 0x00, 0x00, // mov r9, 0x91
  1210. /// 0x5f, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // and r0, r9
  1211. /// 0x67, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // lsh r0, 32
  1212. /// 0x67, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, // lsh r0, 22
  1213. /// 0x6f, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // lsh r0, r8
  1214. /// 0x77, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // rsh r0, 32
  1215. /// 0x77, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, // rsh r0, 19
  1216. /// 0x7f, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // rsh r0, r7
  1217. /// 0xa7, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // xor r0, 0x03
  1218. /// 0xaf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // xor r0, r2
  1219. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1220. /// ];
  1221. ///
  1222. /// // Instantiate a VM.
  1223. /// let vm = rbpf::EbpfVmNoData::new(prog);
  1224. ///
  1225. /// // Provide only a reference to the packet data.
  1226. /// let res = vm.prog_exec();
  1227. /// assert_eq!(res, 0x11);
  1228. /// ```
  1229. pub struct EbpfVmNoData<'a> {
  1230. parent: EbpfVmRaw<'a>,
  1231. }
  1232. impl<'a> EbpfVmNoData<'a> {
  1233. /// Create a new virtual machine instance, and load an eBPF program into that instance.
  1234. /// When attempting to load the program, it passes through a simple verifier.
  1235. ///
  1236. /// # Panics
  1237. ///
  1238. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  1239. ///
  1240. /// # Examples
  1241. ///
  1242. /// ```
  1243. /// let prog = &[
  1244. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1245. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1246. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1247. /// ];
  1248. ///
  1249. /// // Instantiate a VM.
  1250. /// let vm = rbpf::EbpfVmNoData::new(prog);
  1251. /// ```
  1252. pub fn new(prog: &'a [u8]) -> EbpfVmNoData<'a> {
  1253. let parent = EbpfVmRaw::new(prog);
  1254. EbpfVmNoData {
  1255. parent: parent,
  1256. }
  1257. }
  1258. /// Load a new eBPF program into the virtual machine instance.
  1259. ///
  1260. /// # Panics
  1261. ///
  1262. /// The simple verifier may panic if it finds errors in the eBPF program at load time.
  1263. ///
  1264. /// # Examples
  1265. ///
  1266. /// ```
  1267. /// let prog1 = &[
  1268. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1269. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1270. /// ];
  1271. /// let prog2 = &[
  1272. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1273. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1274. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1275. /// ];
  1276. ///
  1277. /// let mut vm = rbpf::EbpfVmNoData::new(prog1);
  1278. ///
  1279. /// let res = vm.prog_exec();
  1280. /// assert_eq!(res, 0x2211);
  1281. ///
  1282. /// vm.set_prog(prog2);
  1283. ///
  1284. /// let res = vm.prog_exec();
  1285. /// assert_eq!(res, 0x1122);
  1286. /// ```
  1287. pub fn set_prog(&mut self, prog: &'a [u8]) {
  1288. self.parent.set_prog(prog)
  1289. }
  1290. /// Register a built-in or user-defined helper function in order to use it later from within
  1291. /// the eBPF program. The helper is registered into a hashmap, so the `key` can be any `u32`.
  1292. ///
  1293. /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the
  1294. /// program. You should be able to change registered helpers after compiling, but not to add
  1295. /// new ones (i.e. with new keys).
  1296. ///
  1297. /// # Examples
  1298. ///
  1299. /// ```
  1300. /// use rbpf::helpers;
  1301. ///
  1302. /// let prog = &[
  1303. /// 0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // mov r1, 0x010000000
  1304. /// 0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r2, 0
  1305. /// 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r3, 0
  1306. /// 0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r4, 0
  1307. /// 0xb7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r5, 0
  1308. /// 0x85, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // call helper with key 1
  1309. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1310. /// ];
  1311. ///
  1312. /// let mut vm = rbpf::EbpfVmNoData::new(prog);
  1313. ///
  1314. /// // Register a helper. This helper will store the result of the square root of r1 into r0.
  1315. /// vm.register_helper(1, helpers::sqrti);
  1316. ///
  1317. /// let res = vm.prog_exec();
  1318. /// assert_eq!(res, 0x1000);
  1319. /// ```
  1320. pub fn register_helper(&mut self, key: u32, function: fn (u64, u64, u64, u64, u64) -> u64) {
  1321. self.parent.register_helper(key, function);
  1322. }
  1323. /// JIT-compile the loaded program. No argument required for this.
  1324. ///
  1325. /// If using helper functions, be sure to register them into the VM before calling this
  1326. /// function.
  1327. ///
  1328. /// # Panics
  1329. ///
  1330. /// This function panics if an error occurs during JIT-compiling, such as the occurrence of an
  1331. /// unknown eBPF operation code.
  1332. ///
  1333. /// # Examples
  1334. ///
  1335. /// ```
  1336. /// let prog = &[
  1337. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1338. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1339. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1340. /// ];
  1341. ///
  1342. /// let mut vm = rbpf::EbpfVmNoData::new(prog);
  1343. ///
  1344. ///
  1345. /// vm.jit_compile();
  1346. /// ```
  1347. #[cfg(not(windows))]
  1348. pub fn jit_compile(&mut self) {
  1349. self.parent.jit_compile();
  1350. }
  1351. /// Execute the program loaded, without providing pointers to any memory area whatsoever.
  1352. ///
  1353. /// # Panics
  1354. ///
  1355. /// This function is currently expected to panic if it encounters any error during the program
  1356. /// execution, such as memory accesses or division by zero attempts. This may be changed in the
  1357. /// future (we could raise errors instead).
  1358. ///
  1359. /// # Examples
  1360. ///
  1361. /// ```
  1362. /// let prog = &[
  1363. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1364. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1365. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1366. /// ];
  1367. ///
  1368. /// let vm = rbpf::EbpfVmNoData::new(prog);
  1369. ///
  1370. /// // For this kind of VM, the `prog_exec()` function needs no argument.
  1371. /// let res = vm.prog_exec();
  1372. /// assert_eq!(res, 0x1122);
  1373. /// ```
  1374. pub fn prog_exec(&self) -> u64 {
  1375. self.parent.prog_exec(&mut [])
  1376. }
  1377. /// Execute the previously JIT-compiled program, without providing pointers to any memory area
  1378. /// whatsoever, in a manner very similar to `prog_exec()`.
  1379. ///
  1380. /// # Panics
  1381. ///
  1382. /// This function panics if an error occurs during the execution of the program.
  1383. ///
  1384. /// # Safety
  1385. ///
  1386. /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime
  1387. /// check for memory access; so if the eBPF program attempts erroneous accesses, this may end
  1388. /// very bad (program may segfault). It may be wise to check that the program works with the
  1389. /// interpreter before running the JIT-compiled version of it.
  1390. ///
  1391. /// For this reason the function should be called from within an `unsafe` bloc.
  1392. ///
  1393. /// # Examples
  1394. ///
  1395. /// ```
  1396. /// let prog = &[
  1397. /// 0xb7, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00, 0x00, // mov r0, 0x2211
  1398. /// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // be16 r0
  1399. /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
  1400. /// ];
  1401. ///
  1402. /// let mut vm = rbpf::EbpfVmNoData::new(prog);
  1403. ///
  1404. /// # #[cfg(not(windows))]
  1405. /// vm.jit_compile();
  1406. ///
  1407. /// # #[cfg(not(windows))]
  1408. /// unsafe {
  1409. /// let res = vm.prog_exec_jit();
  1410. /// assert_eq!(res, 0x1122);
  1411. /// }
  1412. /// ```
  1413. pub unsafe fn prog_exec_jit(&self) -> u64 {
  1414. self.parent.prog_exec_jit(&mut [])
  1415. }
  1416. }