interpreter.rs 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. // SPDX-License-Identifier: (Apache-2.0 OR MIT)
  2. // Derived from uBPF <https://github.com/iovisor/ubpf>
  3. // Copyright 2015 Big Switch Networks, Inc
  4. // (uBPF: VM architecture, parts of the interpreter, originally in C)
  5. // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
  6. // (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
  7. use crate::ebpf;
  8. use crate::ebpf::MAX_CALL_DEPTH;
  9. use crate::lib::*;
  10. use crate::stack::{StackFrame, StackUsage};
  11. #[allow(clippy::too_many_arguments)]
  12. fn check_mem(
  13. addr: u64,
  14. len: usize,
  15. access_type: &str,
  16. insn_ptr: usize,
  17. mbuff: &[u8],
  18. mem: &[u8],
  19. stack: &[u8],
  20. allowed_memory: &HashSet<u64>,
  21. ) -> Result<(), Error> {
  22. if let Some(addr_end) = addr.checked_add(len as u64) {
  23. if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
  24. return Ok(());
  25. }
  26. if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 {
  27. return Ok(());
  28. }
  29. if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 {
  30. return Ok(());
  31. }
  32. if allowed_memory.contains(&addr) {
  33. return Ok(());
  34. }
  35. }
  36. Err(Error::new(ErrorKind::Other, format!(
  37. "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
  38. access_type, insn_ptr, addr, len,
  39. mbuff.as_ptr() as u64, mbuff.len(),
  40. mem.as_ptr() as u64, mem.len(),
  41. stack.as_ptr() as u64, stack.len()
  42. )))
  43. }
  44. pub fn execute_program(
  45. prog_: Option<&[u8]>,
  46. stack_usage: Option<&StackUsage>,
  47. mem: &[u8],
  48. mbuff: &[u8],
  49. helpers: &HashMap<u32, ebpf::Helper>,
  50. allowed_memory: &HashSet<u64>,
  51. ) -> Result<u64, Error> {
  52. const U32MAX: u64 = u32::MAX as u64;
  53. const SHIFT_MASK_64: u64 = 0x3f;
  54. let (prog, stack_usage) = match prog_ {
  55. Some(prog) => (prog, stack_usage.unwrap()),
  56. None => Err(Error::new(
  57. ErrorKind::Other,
  58. "Error: No program set, call prog_set() to load one",
  59. ))?,
  60. };
  61. let stack = vec![0u8; ebpf::STACK_SIZE];
  62. let mut stacks = [StackFrame::new(); MAX_CALL_DEPTH];
  63. let mut stack_frame_idx = 0;
  64. // R1 points to beginning of memory area, R10 to stack
  65. let mut reg: [u64; 11] = [
  66. 0,
  67. 0,
  68. 0,
  69. 0,
  70. 0,
  71. 0,
  72. 0,
  73. 0,
  74. 0,
  75. 0,
  76. stack.as_ptr() as u64 + stack.len() as u64,
  77. ];
  78. if !mbuff.is_empty() {
  79. reg[1] = mbuff.as_ptr() as u64;
  80. } else if !mem.is_empty() {
  81. reg[1] = mem.as_ptr() as u64;
  82. }
  83. let check_mem_load = |addr: u64, len: usize, insn_ptr: usize| {
  84. check_mem(
  85. addr,
  86. len,
  87. "load",
  88. insn_ptr,
  89. mbuff,
  90. mem,
  91. &stack,
  92. allowed_memory,
  93. )
  94. };
  95. let check_mem_store = |addr: u64, len: usize, insn_ptr: usize| {
  96. check_mem(
  97. addr,
  98. len,
  99. "store",
  100. insn_ptr,
  101. mbuff,
  102. mem,
  103. &stack,
  104. allowed_memory,
  105. )
  106. };
  107. // Loop on instructions
  108. let mut insn_ptr: usize = 0;
  109. while insn_ptr * ebpf::INSN_SIZE < prog.len() {
  110. let insn = ebpf::get_insn(prog, insn_ptr);
  111. if stack_frame_idx < MAX_CALL_DEPTH {
  112. if let Some(usage) = stack_usage.stack_usage_for_local_func(insn_ptr) {
  113. stacks[stack_frame_idx].set_stack_usage(usage);
  114. }
  115. }
  116. insn_ptr += 1;
  117. let _dst = insn.dst as usize;
  118. let _src = insn.src as usize;
  119. let mut do_jump = || {
  120. insn_ptr = (insn_ptr as i16 + insn.off) as usize;
  121. };
  122. macro_rules! unsigned_u64 {
  123. ($imm:expr) => {
  124. ($imm as u32) as u64
  125. };
  126. }
  127. #[rustfmt::skip]
  128. #[allow(clippy::let_unit_value)] // assign, to avoid #[rustfmt::skip] on an expression
  129. let _ = match insn.opc {
  130. // BPF_LD class
  131. // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
  132. // Since this pointer is constant, and since we already know it (mem), do not
  133. // bother re-fetching it, just use mem already.
  134. ebpf::LD_ABS_B => reg[0] = unsafe {
  135. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
  136. check_mem_load(x as u64, 8, insn_ptr)?;
  137. x.read_unaligned() as u64
  138. },
  139. ebpf::LD_ABS_H => reg[0] = unsafe {
  140. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
  141. check_mem_load(x as u64, 8, insn_ptr)?;
  142. x.read_unaligned() as u64
  143. },
  144. ebpf::LD_ABS_W => reg[0] = unsafe {
  145. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
  146. check_mem_load(x as u64, 8, insn_ptr)?;
  147. x.read_unaligned() as u64
  148. },
  149. ebpf::LD_ABS_DW => reg[0] = unsafe {
  150. let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
  151. check_mem_load(x as u64, 8, insn_ptr)?;
  152. x.read_unaligned()
  153. },
  154. ebpf::LD_IND_B => reg[0] = unsafe {
  155. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
  156. check_mem_load(x as u64, 8, insn_ptr)?;
  157. x.read_unaligned() as u64
  158. },
  159. ebpf::LD_IND_H => reg[0] = unsafe {
  160. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
  161. check_mem_load(x as u64, 8, insn_ptr)?;
  162. x.read_unaligned() as u64
  163. },
  164. ebpf::LD_IND_W => reg[0] = unsafe {
  165. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
  166. check_mem_load(x as u64, 8, insn_ptr)?;
  167. x.read_unaligned() as u64
  168. },
  169. ebpf::LD_IND_DW => reg[0] = unsafe {
  170. let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
  171. check_mem_load(x as u64, 8, insn_ptr)?;
  172. x.read_unaligned()
  173. },
  174. ebpf::LD_DW_IMM => {
  175. let next_insn = ebpf::get_insn(prog, insn_ptr);
  176. insn_ptr += 1;
  177. reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
  178. },
  179. // BPF_LDX class
  180. ebpf::LD_B_REG => reg[_dst] = unsafe {
  181. let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize);
  182. check_mem_load(x as u64, 1, insn_ptr)?;
  183. x.read_unaligned() as u64
  184. },
  185. ebpf::LD_H_REG => reg[_dst] = unsafe {
  186. let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize) as *const u16;
  187. check_mem_load(x as u64, 2, insn_ptr)?;
  188. x.read_unaligned() as u64
  189. },
  190. ebpf::LD_W_REG => reg[_dst] = unsafe {
  191. let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize) as *const u32;
  192. check_mem_load(x as u64, 4, insn_ptr)?;
  193. x.read_unaligned() as u64
  194. },
  195. ebpf::LD_DW_REG => reg[_dst] = unsafe {
  196. let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize) as *const u64;
  197. check_mem_load(x as u64, 8, insn_ptr)?;
  198. x.read_unaligned()
  199. },
  200. // BPF_ST class
  201. ebpf::ST_B_IMM => unsafe {
  202. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u8;
  203. check_mem_store(x as u64, 1, insn_ptr)?;
  204. x.write_unaligned(insn.imm as u8);
  205. },
  206. ebpf::ST_H_IMM => unsafe {
  207. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u16;
  208. check_mem_store(x as u64, 2, insn_ptr)?;
  209. x.write_unaligned(insn.imm as u16);
  210. },
  211. ebpf::ST_W_IMM => unsafe {
  212. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u32;
  213. check_mem_store(x as u64, 4, insn_ptr)?;
  214. x.write_unaligned(insn.imm as u32);
  215. },
  216. ebpf::ST_DW_IMM => unsafe {
  217. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u64;
  218. check_mem_store(x as u64, 8, insn_ptr)?;
  219. x.write_unaligned(insn.imm as u64);
  220. },
  221. // BPF_STX class
  222. ebpf::ST_B_REG => unsafe {
  223. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u8;
  224. check_mem_store(x as u64, 1, insn_ptr)?;
  225. x.write_unaligned(reg[_src] as u8);
  226. },
  227. ebpf::ST_H_REG => unsafe {
  228. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u16;
  229. check_mem_store(x as u64, 2, insn_ptr)?;
  230. x.write_unaligned(reg[_src] as u16);
  231. },
  232. ebpf::ST_W_REG => unsafe {
  233. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u32;
  234. check_mem_store(x as u64, 4, insn_ptr)?;
  235. x.write_unaligned(reg[_src] as u32);
  236. },
  237. ebpf::ST_DW_REG => unsafe {
  238. let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u64;
  239. check_mem_store(x as u64, 8, insn_ptr)?;
  240. x.write_unaligned(reg[_src]);
  241. },
  242. ebpf::ST_W_XADD => unimplemented!(),
  243. ebpf::ST_DW_XADD => unimplemented!(),
  244. // BPF_ALU class
  245. // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
  246. // before we do the operation?
  247. // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
  248. ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX,
  249. ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
  250. ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
  251. ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
  252. ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
  253. ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
  254. ebpf::DIV32_IMM if insn.imm as u32 == 0 => reg[_dst] = 0,
  255. ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
  256. ebpf::DIV32_REG if reg[_src] as u32 == 0 => reg[_dst] = 0,
  257. ebpf::DIV32_REG => reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64,
  258. ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
  259. ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
  260. ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
  261. ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
  262. // As for the 64-bit version, we should mask the number of bits to shift with
  263. // 0x1f, but .wrappping_shr() already takes care of it for us.
  264. ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
  265. ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
  266. ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
  267. ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
  268. ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
  269. ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
  270. ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
  271. ebpf::MOD32_REG if reg[_src] as u32 == 0 => (),
  272. ebpf::MOD32_REG => reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64,
  273. ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
  274. ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
  275. ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64,
  276. ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
  277. // As for the 64-bit version, we should mask the number of bits to shift with
  278. // 0x1f, but .wrappping_shr() already takes care of it for us.
  279. ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
  280. ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
  281. ebpf::LE => {
  282. reg[_dst] = match insn.imm {
  283. 16 => (reg[_dst] as u16).to_le() as u64,
  284. 32 => (reg[_dst] as u32).to_le() as u64,
  285. 64 => reg[_dst].to_le(),
  286. _ => unreachable!(),
  287. };
  288. },
  289. ebpf::BE => {
  290. reg[_dst] = match insn.imm {
  291. 16 => (reg[_dst] as u16).to_be() as u64,
  292. 32 => (reg[_dst] as u32).to_be() as u64,
  293. 64 => reg[_dst].to_be(),
  294. _ => unreachable!(),
  295. };
  296. },
  297. // BPF_ALU64 class
  298. ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
  299. ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
  300. ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
  301. ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
  302. ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
  303. ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
  304. ebpf::DIV64_IMM if insn.imm == 0 => reg[_dst] = 0,
  305. ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
  306. ebpf::DIV64_REG if reg[_src] == 0 => reg[_dst] = 0,
  307. ebpf::DIV64_REG => reg[_dst] /= reg[_src],
  308. ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
  309. ebpf::OR64_REG => reg[_dst] |= reg[_src],
  310. ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
  311. ebpf::AND64_REG => reg[_dst] &= reg[_src],
  312. ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64,
  313. ebpf::LSH64_REG => reg[_dst] <<= reg[_src] & SHIFT_MASK_64,
  314. ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64,
  315. ebpf::RSH64_REG => reg[_dst] >>= reg[_src] & SHIFT_MASK_64,
  316. ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
  317. ebpf::MOD64_IMM if insn.imm == 0 => (),
  318. ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
  319. ebpf::MOD64_REG if reg[_src] == 0 => (),
  320. ebpf::MOD64_REG => reg[_dst] %= reg[_src],
  321. ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
  322. ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
  323. ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
  324. ebpf::MOV64_REG => reg[_dst] = reg[_src],
  325. ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64)) as u64,
  326. ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64,
  327. // BPF_JMP class
  328. // TODO: check this actually works as expected for signed / unsigned ops
  329. // J-EQ, J-NE, J-GT, J-GE, J-LT, J-LE: unsigned
  330. // JS-GT, JS-GE, JS-LT, JS-LE: signed
  331. ebpf::JA => do_jump(),
  332. ebpf::JEQ_IMM => if reg[_dst] == unsigned_u64!(insn.imm) { do_jump(); },
  333. ebpf::JEQ_REG => if reg[_dst] == reg[_src] { do_jump(); },
  334. ebpf::JGT_IMM => if reg[_dst] > unsigned_u64!(insn.imm) { do_jump(); },
  335. ebpf::JGT_REG => if reg[_dst] > reg[_src] { do_jump(); },
  336. ebpf::JGE_IMM => if reg[_dst] >= unsigned_u64!(insn.imm) { do_jump(); },
  337. ebpf::JGE_REG => if reg[_dst] >= reg[_src] { do_jump(); },
  338. ebpf::JLT_IMM => if reg[_dst] < unsigned_u64!(insn.imm) { do_jump(); },
  339. ebpf::JLT_REG => if reg[_dst] < reg[_src] { do_jump(); },
  340. ebpf::JLE_IMM => if reg[_dst] <= unsigned_u64!(insn.imm) { do_jump(); },
  341. ebpf::JLE_REG => if reg[_dst] <= reg[_src] { do_jump(); },
  342. ebpf::JSET_IMM => if reg[_dst] & insn.imm as u64 != 0 { do_jump(); },
  343. ebpf::JSET_REG => if reg[_dst] & reg[_src] != 0 { do_jump(); },
  344. ebpf::JNE_IMM => if reg[_dst] != unsigned_u64!(insn.imm) { do_jump(); },
  345. ebpf::JNE_REG => if reg[_dst] != reg[_src] { do_jump(); },
  346. ebpf::JSGT_IMM => if reg[_dst] as i64 > insn.imm as i64 { do_jump(); },
  347. ebpf::JSGT_REG => if reg[_dst] as i64 > reg[_src] as i64 { do_jump(); },
  348. ebpf::JSGE_IMM => if reg[_dst] as i64 >= insn.imm as i64 { do_jump(); },
  349. ebpf::JSGE_REG => if reg[_dst] as i64 >= reg[_src] as i64 { do_jump(); },
  350. ebpf::JSLT_IMM => if (reg[_dst] as i64) < insn.imm as i64 { do_jump(); },
  351. ebpf::JSLT_REG => if (reg[_dst] as i64) < reg[_src] as i64 { do_jump(); },
  352. ebpf::JSLE_IMM => if reg[_dst] as i64 <= insn.imm as i64 { do_jump(); },
  353. ebpf::JSLE_REG => if reg[_dst] as i64 <= reg[_src] as i64 { do_jump(); },
  354. // BPF_JMP32 class
  355. ebpf::JEQ_IMM32 => if reg[_dst] as u32 == insn.imm as u32 { do_jump(); },
  356. ebpf::JEQ_REG32 => if reg[_dst] as u32 == reg[_src] as u32 { do_jump(); },
  357. ebpf::JGT_IMM32 => if reg[_dst] as u32 > insn.imm as u32 { do_jump(); },
  358. ebpf::JGT_REG32 => if reg[_dst] as u32 > reg[_src] as u32 { do_jump(); },
  359. ebpf::JGE_IMM32 => if reg[_dst] as u32 >= insn.imm as u32 { do_jump(); },
  360. ebpf::JGE_REG32 => if reg[_dst] as u32 >= reg[_src] as u32 { do_jump(); },
  361. ebpf::JLT_IMM32 => if (reg[_dst] as u32) < insn.imm as u32 { do_jump(); },
  362. ebpf::JLT_REG32 => if (reg[_dst] as u32) < reg[_src] as u32 { do_jump(); },
  363. ebpf::JLE_IMM32 => if reg[_dst] as u32 <= insn.imm as u32 { do_jump(); },
  364. ebpf::JLE_REG32 => if reg[_dst] as u32 <= reg[_src] as u32 { do_jump(); },
  365. ebpf::JSET_IMM32 => if reg[_dst] as u32 & insn.imm as u32 != 0 { do_jump(); },
  366. ebpf::JSET_REG32 => if reg[_dst] as u32 & reg[_src] as u32 != 0 { do_jump(); },
  367. ebpf::JNE_IMM32 => if reg[_dst] as u32 != insn.imm as u32 { do_jump(); },
  368. ebpf::JNE_REG32 => if reg[_dst] as u32 != reg[_src] as u32 { do_jump(); },
  369. ebpf::JSGT_IMM32 => if reg[_dst] as i32 > insn.imm { do_jump(); },
  370. ebpf::JSGT_REG32 => if reg[_dst] as i32 > reg[_src] as i32 { do_jump(); },
  371. ebpf::JSGE_IMM32 => if reg[_dst] as i32 >= insn.imm { do_jump(); },
  372. ebpf::JSGE_REG32 => if reg[_dst] as i32 >= reg[_src] as i32 { do_jump(); },
  373. ebpf::JSLT_IMM32 => if (reg[_dst] as i32) < insn.imm { do_jump(); },
  374. ebpf::JSLT_REG32 => if (reg[_dst] as i32) < reg[_src] as i32 { do_jump(); },
  375. ebpf::JSLE_IMM32 => if reg[_dst] as i32 <= insn.imm { do_jump(); },
  376. ebpf::JSLE_REG32 => if reg[_dst] as i32 <= reg[_src] as i32 { do_jump(); },
  377. // Do not delegate the check to the verifier, since registered functions can be
  378. // changed after the program has been verified.
  379. ebpf::CALL => {
  380. match _src {
  381. // Call helper function
  382. 0 => {
  383. if let Some(function) = helpers.get(&(insn.imm as u32)) {
  384. reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
  385. } else {
  386. Err(Error::new(
  387. ErrorKind::Other,
  388. format!(
  389. "Error: unknown helper function (id: {:#x})",
  390. insn.imm as u32
  391. )
  392. ))?;
  393. }
  394. }
  395. // eBPF-to-eBPF call
  396. 1 => {
  397. if stack_frame_idx >= MAX_CALL_DEPTH {
  398. Err(Error::new(
  399. ErrorKind::Other,
  400. format!(
  401. "Error: too many nested calls (max: {MAX_CALL_DEPTH})"
  402. )
  403. ))?;
  404. }
  405. stacks[stack_frame_idx].save_registers(&reg[6..=9]);
  406. stacks[stack_frame_idx].save_return_address(insn_ptr);
  407. // Why we don't need to check the stack usage here?
  408. // When the stack is exhausted, if there are instructions in the new function
  409. // that read or write to the stack, check_mem_load or check_mem_store will return an error.
  410. reg[10] -= stacks[stack_frame_idx].get_stack_usage().stack_usage() as u64;
  411. stack_frame_idx += 1;
  412. insn_ptr += insn.imm as usize;
  413. }
  414. _ => {
  415. Err(Error::new(
  416. ErrorKind::Other,
  417. format!("Error: unsupported call type #{} (insn #{})",
  418. _src,
  419. insn_ptr-1
  420. )
  421. ))?;
  422. }
  423. }
  424. }
  425. ebpf::TAIL_CALL => unimplemented!(),
  426. ebpf::EXIT => {
  427. if stack_frame_idx > 0 {
  428. stack_frame_idx -= 1;
  429. reg[6..=9].copy_from_slice(&stacks[stack_frame_idx].get_registers());
  430. insn_ptr = stacks[stack_frame_idx].get_return_address();
  431. reg[10] += stacks[stack_frame_idx].get_stack_usage().stack_usage() as u64;
  432. } else {
  433. return Ok(reg[0]);
  434. }
  435. }
  436. _ => unreachable!()
  437. };
  438. }
  439. unreachable!()
  440. }