cranelift.rs 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. // SPDX-License-Identifier: (Apache-2.0 OR MIT)
  2. use alloc::{collections::BTreeMap, format, vec, vec::Vec};
  3. use core::{mem, mem::ManuallyDrop};
  4. use std::io::ErrorKind;
  5. use cranelift_codegen::{
  6. entity::EntityRef,
  7. ir::{
  8. condcodes::IntCC,
  9. types::{I16, I32, I64, I8},
  10. AbiParam, Block, Endianness, FuncRef, Function, InstBuilder, MemFlags, Signature,
  11. SourceLoc, StackSlotData, StackSlotKind, TrapCode, Type, UserFuncName, Value,
  12. },
  13. isa::OwnedTargetIsa,
  14. settings::{self, Configurable},
  15. };
  16. use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
  17. use cranelift_jit::{JITBuilder, JITModule};
  18. use cranelift_module::{FuncId, Linkage, Module};
  19. use super::{Error, HashMap, HashSet};
  20. use crate::ebpf::{
  21. self, Insn, BPF_ALU_OP_MASK, BPF_IND, BPF_JEQ, BPF_JGE, BPF_JGT, BPF_JLE, BPF_JLT, BPF_JMP32,
  22. BPF_JNE, BPF_JSET, BPF_JSGE, BPF_JSGT, BPF_JSLE, BPF_JSLT, BPF_X, STACK_SIZE,
  23. };
  24. pub type JittedFunction = extern "C" fn(
  25. *mut u8, // mem_ptr
  26. usize, // mem_len
  27. *mut u8, // mbuff_ptr
  28. usize, // mbuff_len
  29. ) -> u64;
  30. pub(crate) struct CraneliftCompiler {
  31. isa: OwnedTargetIsa,
  32. module: JITModule,
  33. helpers: HashMap<u32, ebpf::Helper>,
  34. helper_func_refs: HashMap<u32, FuncRef>,
  35. /// List of blocks corresponding to each instruction.
  36. /// We only store the first instruction that observes a new block
  37. insn_blocks: BTreeMap<u32, Block>,
  38. /// Map of block targets for each jump/branching instruction.
  39. insn_targets: BTreeMap<u32, (Block, Block)>,
  40. filled_blocks: HashSet<Block>,
  41. /// Map of register numbers to Cranelift variables.
  42. registers: [Variable; 11],
  43. /// Other usefull variables used throughout the program.
  44. mem_start: Variable,
  45. mem_end: Variable,
  46. mbuf_start: Variable,
  47. mbuf_end: Variable,
  48. stack_start: Variable,
  49. stack_end: Variable,
  50. }
  51. impl CraneliftCompiler {
  52. pub(crate) fn new(helpers: HashMap<u32, ebpf::Helper>) -> Self {
  53. let mut flag_builder = settings::builder();
  54. flag_builder.set("opt_level", "speed").unwrap();
  55. // Enable stack probes
  56. flag_builder.enable("enable_probestack").unwrap();
  57. flag_builder.set("probestack_strategy", "inline").unwrap();
  58. let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| {
  59. panic!("host machine is not supported: {}", msg);
  60. });
  61. let isa = isa_builder
  62. .finish(settings::Flags::new(flag_builder))
  63. .unwrap();
  64. let mut jit_builder =
  65. JITBuilder::with_isa(isa.clone(), cranelift_module::default_libcall_names());
  66. // Register all the helpers
  67. for (k, v) in helpers.iter() {
  68. let name = format!("helper_{}", k);
  69. jit_builder.symbol(name, (*v) as usize as *const u8);
  70. }
  71. let mut module = JITModule::new(jit_builder);
  72. let registers = (0..11)
  73. .map(|i| Variable::new(i))
  74. .collect::<Vec<_>>()
  75. .try_into()
  76. .unwrap();
  77. Self {
  78. isa,
  79. module,
  80. helpers,
  81. helper_func_refs: HashMap::new(),
  82. insn_blocks: BTreeMap::new(),
  83. insn_targets: BTreeMap::new(),
  84. filled_blocks: HashSet::new(),
  85. registers,
  86. mem_start: Variable::new(11),
  87. mem_end: Variable::new(12),
  88. mbuf_start: Variable::new(13),
  89. mbuf_end: Variable::new(14),
  90. stack_start: Variable::new(15),
  91. stack_end: Variable::new(16),
  92. }
  93. }
  94. pub(crate) fn compile_function(mut self, prog: &[u8]) -> Result<CraneliftProgram, Error> {
  95. let name = "main";
  96. // This is not a standard eBPF function! We use an informal ABI with just 4 parameters.
  97. // See [JittedFunction] which is the signature of this function.
  98. //
  99. // Since this function only serves as the entrypoint for the JITed program, it doesen't
  100. // really matter.
  101. let sig = Signature {
  102. params: vec![
  103. AbiParam::new(I64),
  104. AbiParam::new(I64),
  105. AbiParam::new(I64),
  106. AbiParam::new(I64),
  107. ],
  108. returns: vec![AbiParam::new(I64)],
  109. call_conv: self.isa.default_call_conv(),
  110. };
  111. let func_id = self
  112. .module
  113. .declare_function(name, Linkage::Local, &sig)
  114. .unwrap();
  115. let mut ctx = self.module.make_context();
  116. ctx.func = Function::with_name_signature(UserFuncName::testcase(name.as_bytes()), sig);
  117. let mut func_ctx = FunctionBuilderContext::new();
  118. {
  119. let mut builder: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
  120. let entry = builder.create_block();
  121. builder.append_block_params_for_function_params(entry);
  122. builder.switch_to_block(entry);
  123. self.build_cfg(&mut builder, prog)?;
  124. self.build_function_prelude(&mut builder, entry)?;
  125. self.translate_program(&mut builder, prog)?;
  126. builder.seal_all_blocks();
  127. builder.finalize();
  128. }
  129. self.module.define_function(func_id, &mut ctx).unwrap();
  130. self.module.finalize_definitions().unwrap();
  131. self.module.clear_context(&mut ctx);
  132. Ok(CraneliftProgram::new(self.module, func_id))
  133. }
  134. fn build_function_prelude(
  135. &mut self,
  136. bcx: &mut FunctionBuilder,
  137. entry: Block,
  138. ) -> Result<(), Error> {
  139. // Register the VM registers as variables
  140. for var in self.registers.iter() {
  141. bcx.declare_var(*var, I64);
  142. }
  143. // Register the bounds check variables
  144. bcx.declare_var(self.mem_start, I64);
  145. bcx.declare_var(self.mem_end, I64);
  146. bcx.declare_var(self.mbuf_start, I64);
  147. bcx.declare_var(self.mbuf_end, I64);
  148. bcx.declare_var(self.stack_start, I64);
  149. bcx.declare_var(self.stack_end, I64);
  150. // Register the helpers
  151. for (k, _) in self.helpers.iter() {
  152. let name = format!("helper_{}", k);
  153. let sig = Signature {
  154. params: vec![
  155. AbiParam::new(I64),
  156. AbiParam::new(I64),
  157. AbiParam::new(I64),
  158. AbiParam::new(I64),
  159. AbiParam::new(I64),
  160. ],
  161. returns: vec![AbiParam::new(I64)],
  162. call_conv: self.isa.default_call_conv(),
  163. };
  164. let func_id = self
  165. .module
  166. .declare_function(&name, Linkage::Import, &sig)
  167. .unwrap();
  168. let func_ref = self.module.declare_func_in_func(func_id, bcx.func);
  169. self.helper_func_refs.insert(*k, func_ref);
  170. }
  171. // Register the stack
  172. let ss = bcx.create_sized_stack_slot(StackSlotData {
  173. kind: StackSlotKind::ExplicitSlot,
  174. size: STACK_SIZE as u32,
  175. });
  176. let addr_ty = self.isa.pointer_type();
  177. let stack_addr = bcx.ins().stack_addr(addr_ty, ss, STACK_SIZE as i32);
  178. bcx.def_var(self.registers[10], stack_addr);
  179. // Initialize the bounds check variables
  180. let stack_start = bcx.ins().stack_addr(addr_ty, ss, 0);
  181. bcx.def_var(self.stack_start, stack_start);
  182. let stack_end = bcx.ins().stack_addr(addr_ty, ss, STACK_SIZE as i32);
  183. bcx.def_var(self.stack_end, stack_end);
  184. // This is our internal ABI where the first 2 params are the memory
  185. let mem_start = bcx.block_params(entry)[0];
  186. let mem_len = bcx.block_params(entry)[1];
  187. let mem_end = bcx.ins().iadd(mem_start, mem_len);
  188. bcx.def_var(self.mem_start, mem_start);
  189. bcx.def_var(self.mem_end, mem_end);
  190. // And the next 2 are the mbuf
  191. let mbuf_start = bcx.block_params(entry)[2];
  192. let mbuf_len = bcx.block_params(entry)[3];
  193. let mbuf_end = bcx.ins().iadd(mbuf_start, mbuf_len);
  194. bcx.def_var(self.mbuf_start, mbuf_start);
  195. bcx.def_var(self.mbuf_end, mbuf_end);
  196. // The ABI for eBPF specifies that R1 must contain either the memory, or mbuff pointer
  197. // If the mbuf length is non-zero, then we use that, otherwise we use the memory pointer
  198. let mbuf_exists = bcx.ins().icmp_imm(IntCC::NotEqual, mbuf_len, 0);
  199. let mem_or_mbuf = bcx.ins().select(mbuf_exists, mbuf_start, mem_start);
  200. bcx.def_var(self.registers[1], mem_or_mbuf);
  201. // R2 should contain the length of the memory or mbuf
  202. // At least ebpf-conformance tests expect this
  203. let mem_or_mbuf_len = bcx.ins().select(mbuf_exists, mbuf_len, mem_len);
  204. bcx.def_var(self.registers[2], mem_or_mbuf_len);
  205. // Insert the *actual* initial block
  206. let program_entry = bcx.create_block();
  207. bcx.ins().jump(program_entry, &[]);
  208. self.filled_blocks.insert(bcx.current_block().unwrap());
  209. self.insn_blocks.insert(0, program_entry);
  210. Ok(())
  211. }
  212. fn translate_program(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
  213. let mut insn_ptr: usize = 0;
  214. while insn_ptr * ebpf::INSN_SIZE < prog.len() {
  215. let insn = ebpf::get_insn(prog, insn_ptr);
  216. // If this instruction is on a new block switch to it.
  217. if let Some(block) = self.insn_blocks.get(&(insn_ptr as u32)) {
  218. // Blocks must have a terminator instruction at the end before we switch away from them
  219. let current_block = bcx.current_block().unwrap();
  220. if !self.filled_blocks.contains(&current_block) {
  221. bcx.ins().jump(*block, &[]);
  222. }
  223. bcx.switch_to_block(*block);
  224. }
  225. // Set the source location for the instruction
  226. bcx.set_srcloc(SourceLoc::new(insn_ptr as u32));
  227. match insn.opc {
  228. // BPF_LD class
  229. // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
  230. // Since this pointer is constant, and since we already know it (mem), do not
  231. // bother re-fetching it, just use mem already.
  232. ebpf::LD_ABS_B
  233. | ebpf::LD_ABS_H
  234. | ebpf::LD_ABS_W
  235. | ebpf::LD_ABS_DW
  236. | ebpf::LD_IND_B
  237. | ebpf::LD_IND_H
  238. | ebpf::LD_IND_W
  239. | ebpf::LD_IND_DW => {
  240. let ty = match insn.opc {
  241. ebpf::LD_ABS_B | ebpf::LD_IND_B => I8,
  242. ebpf::LD_ABS_H | ebpf::LD_IND_H => I16,
  243. ebpf::LD_ABS_W | ebpf::LD_IND_W => I32,
  244. ebpf::LD_ABS_DW | ebpf::LD_IND_DW => I64,
  245. _ => unreachable!(),
  246. };
  247. // Both instructions add the imm part of the instruction to the pointer
  248. let ptr = bcx.use_var(self.mem_start);
  249. let offset = bcx
  250. .ins()
  251. .iconst(self.isa.pointer_type(), insn.imm as u32 as i64);
  252. let addr = bcx.ins().iadd(ptr, offset);
  253. // IND instructions additionally add the value of the source register
  254. let is_ind = (insn.opc & BPF_IND) != 0;
  255. let addr = if is_ind {
  256. let src_reg = self.insn_src(bcx, &insn);
  257. bcx.ins().iadd(addr, src_reg)
  258. } else {
  259. addr
  260. };
  261. // The offset here has already been added to the pointer, so we pass 0
  262. let loaded = self.reg_load(bcx, ty, addr, 0);
  263. let ext = if ty != I64 {
  264. bcx.ins().uextend(I64, loaded)
  265. } else {
  266. loaded
  267. };
  268. self.set_dst(bcx, &insn, ext);
  269. }
  270. ebpf::LD_DW_IMM => {
  271. insn_ptr += 1;
  272. let next_insn = ebpf::get_insn(prog, insn_ptr);
  273. let imm = (((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32)) as i64;
  274. let iconst = bcx.ins().iconst(I64, imm);
  275. self.set_dst(bcx, &insn, iconst);
  276. }
  277. // BPF_LDX class
  278. ebpf::LD_B_REG | ebpf::LD_H_REG | ebpf::LD_W_REG | ebpf::LD_DW_REG => {
  279. let ty = match insn.opc {
  280. ebpf::LD_B_REG => I8,
  281. ebpf::LD_H_REG => I16,
  282. ebpf::LD_W_REG => I32,
  283. ebpf::LD_DW_REG => I64,
  284. _ => unreachable!(),
  285. };
  286. let base = self.insn_src(bcx, &insn);
  287. let loaded = self.reg_load(bcx, ty, base, insn.off);
  288. let ext = if ty != I64 {
  289. bcx.ins().uextend(I64, loaded)
  290. } else {
  291. loaded
  292. };
  293. self.set_dst(bcx, &insn, ext);
  294. }
  295. // BPF_ST and BPF_STX class
  296. ebpf::ST_B_IMM
  297. | ebpf::ST_H_IMM
  298. | ebpf::ST_W_IMM
  299. | ebpf::ST_DW_IMM
  300. | ebpf::ST_B_REG
  301. | ebpf::ST_H_REG
  302. | ebpf::ST_W_REG
  303. | ebpf::ST_DW_REG => {
  304. let ty = match insn.opc {
  305. ebpf::ST_B_IMM | ebpf::ST_B_REG => I8,
  306. ebpf::ST_H_IMM | ebpf::ST_H_REG => I16,
  307. ebpf::ST_W_IMM | ebpf::ST_W_REG => I32,
  308. ebpf::ST_DW_IMM | ebpf::ST_DW_REG => I64,
  309. _ => unreachable!(),
  310. };
  311. let is_imm = match insn.opc {
  312. ebpf::ST_B_IMM | ebpf::ST_H_IMM | ebpf::ST_W_IMM | ebpf::ST_DW_IMM => true,
  313. ebpf::ST_B_REG | ebpf::ST_H_REG | ebpf::ST_W_REG | ebpf::ST_DW_REG => false,
  314. _ => unreachable!(),
  315. };
  316. let value = if is_imm {
  317. self.insn_imm64(bcx, &insn)
  318. } else {
  319. self.insn_src(bcx, &insn)
  320. };
  321. let narrow = if ty != I64 {
  322. bcx.ins().ireduce(ty, value)
  323. } else {
  324. value
  325. };
  326. let base = self.insn_dst(bcx, &insn);
  327. self.reg_store(bcx, ty, base, insn.off, narrow);
  328. }
  329. ebpf::ST_W_XADD => unimplemented!(),
  330. ebpf::ST_DW_XADD => unimplemented!(),
  331. // BPF_ALU class
  332. // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
  333. // before we do the operation?
  334. // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
  335. ebpf::ADD32_IMM => {
  336. let src = self.insn_dst32(bcx, &insn);
  337. let imm = self.insn_imm32(bcx, &insn);
  338. let res = bcx.ins().iadd(src, imm);
  339. self.set_dst32(bcx, &insn, res);
  340. }
  341. ebpf::ADD32_REG => {
  342. //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
  343. let lhs = self.insn_dst32(bcx, &insn);
  344. let rhs = self.insn_src32(bcx, &insn);
  345. let res = bcx.ins().iadd(lhs, rhs);
  346. self.set_dst32(bcx, &insn, res);
  347. }
  348. ebpf::SUB32_IMM => {
  349. // reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
  350. let src = self.insn_dst32(bcx, &insn);
  351. let imm = self.insn_imm32(bcx, &insn);
  352. let res = bcx.ins().isub(src, imm);
  353. self.set_dst32(bcx, &insn, res);
  354. }
  355. ebpf::SUB32_REG => {
  356. // reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
  357. let lhs = self.insn_dst32(bcx, &insn);
  358. let rhs = self.insn_src32(bcx, &insn);
  359. let res = bcx.ins().isub(lhs, rhs);
  360. self.set_dst32(bcx, &insn, res);
  361. }
  362. ebpf::MUL32_IMM => {
  363. // reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
  364. let src = self.insn_dst32(bcx, &insn);
  365. let imm = self.insn_imm32(bcx, &insn);
  366. let res = bcx.ins().imul(src, imm);
  367. self.set_dst32(bcx, &insn, res);
  368. }
  369. ebpf::MUL32_REG => {
  370. // reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
  371. let lhs = self.insn_dst32(bcx, &insn);
  372. let rhs = self.insn_src32(bcx, &insn);
  373. let res = bcx.ins().imul(lhs, rhs);
  374. self.set_dst32(bcx, &insn, res);
  375. }
  376. ebpf::DIV32_IMM => {
  377. // reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
  378. let res = if insn.imm == 0 {
  379. bcx.ins().iconst(I32, 0)
  380. } else {
  381. let imm = self.insn_imm32(bcx, &insn);
  382. let src = self.insn_dst32(bcx, &insn);
  383. bcx.ins().udiv(src, imm)
  384. };
  385. self.set_dst32(bcx, &insn, res);
  386. }
  387. ebpf::DIV32_REG => {
  388. // reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64,
  389. let zero = bcx.ins().iconst(I32, 0);
  390. let one = bcx.ins().iconst(I32, 1);
  391. let lhs = self.insn_dst32(bcx, &insn);
  392. let rhs = self.insn_src32(bcx, &insn);
  393. let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
  394. let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
  395. let div_res = bcx.ins().udiv(lhs, safe_rhs);
  396. let res = bcx.ins().select(rhs_is_zero, zero, div_res);
  397. self.set_dst32(bcx, &insn, res);
  398. }
  399. ebpf::OR32_IMM => {
  400. // reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
  401. let src = self.insn_dst32(bcx, &insn);
  402. let imm = self.insn_imm32(bcx, &insn);
  403. let res = bcx.ins().bor(src, imm);
  404. self.set_dst32(bcx, &insn, res);
  405. }
  406. ebpf::OR32_REG => {
  407. // reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
  408. let lhs = self.insn_dst32(bcx, &insn);
  409. let rhs = self.insn_src32(bcx, &insn);
  410. let res = bcx.ins().bor(lhs, rhs);
  411. self.set_dst32(bcx, &insn, res);
  412. }
  413. ebpf::AND32_IMM => {
  414. // reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
  415. let src = self.insn_dst32(bcx, &insn);
  416. let imm = self.insn_imm32(bcx, &insn);
  417. let res = bcx.ins().band(src, imm);
  418. self.set_dst32(bcx, &insn, res);
  419. }
  420. ebpf::AND32_REG => {
  421. // reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
  422. let lhs = self.insn_dst32(bcx, &insn);
  423. let rhs = self.insn_src32(bcx, &insn);
  424. let res = bcx.ins().band(lhs, rhs);
  425. self.set_dst32(bcx, &insn, res);
  426. }
  427. ebpf::LSH32_IMM => {
  428. // reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
  429. let src = self.insn_dst32(bcx, &insn);
  430. let imm = self.insn_imm32(bcx, &insn);
  431. let res = bcx.ins().ishl(src, imm);
  432. self.set_dst32(bcx, &insn, res);
  433. }
  434. ebpf::LSH32_REG => {
  435. // reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
  436. let lhs = self.insn_dst32(bcx, &insn);
  437. let rhs = self.insn_src32(bcx, &insn);
  438. let res = bcx.ins().ishl(lhs, rhs);
  439. self.set_dst32(bcx, &insn, res);
  440. }
  441. ebpf::RSH32_IMM => {
  442. // reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
  443. let src = self.insn_dst32(bcx, &insn);
  444. let imm = self.insn_imm32(bcx, &insn);
  445. let res = bcx.ins().ushr(src, imm);
  446. self.set_dst32(bcx, &insn, res);
  447. }
  448. ebpf::RSH32_REG => {
  449. // reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
  450. let lhs = self.insn_dst32(bcx, &insn);
  451. let rhs = self.insn_src32(bcx, &insn);
  452. let res = bcx.ins().ushr(lhs, rhs);
  453. self.set_dst32(bcx, &insn, res);
  454. }
  455. ebpf::NEG32 => {
  456. // { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
  457. let src = self.insn_dst32(bcx, &insn);
  458. let res = bcx.ins().ineg(src);
  459. // TODO: Do we need to mask the result?
  460. self.set_dst32(bcx, &insn, res);
  461. }
  462. ebpf::MOD32_IMM => {
  463. // reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
  464. if insn.imm != 0 {
  465. let imm = self.insn_imm32(bcx, &insn);
  466. let src = self.insn_dst32(bcx, &insn);
  467. let res = bcx.ins().urem(src, imm);
  468. self.set_dst32(bcx, &insn, res);
  469. }
  470. }
  471. ebpf::MOD32_REG => {
  472. // reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64,
  473. let zero = bcx.ins().iconst(I32, 0);
  474. let one = bcx.ins().iconst(I32, 1);
  475. let lhs = self.insn_dst32(bcx, &insn);
  476. let rhs = self.insn_src32(bcx, &insn);
  477. let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
  478. let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
  479. let div_res = bcx.ins().urem(lhs, safe_rhs);
  480. let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
  481. self.set_dst32(bcx, &insn, res);
  482. }
  483. ebpf::XOR32_IMM => {
  484. // reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
  485. let src = self.insn_dst32(bcx, &insn);
  486. let imm = self.insn_imm32(bcx, &insn);
  487. let res = bcx.ins().bxor(src, imm);
  488. self.set_dst32(bcx, &insn, res);
  489. }
  490. ebpf::XOR32_REG => {
  491. // reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
  492. let lhs = self.insn_dst32(bcx, &insn);
  493. let rhs = self.insn_src32(bcx, &insn);
  494. let res = bcx.ins().bxor(lhs, rhs);
  495. self.set_dst32(bcx, &insn, res);
  496. }
  497. ebpf::MOV32_IMM => {
  498. let imm = self.insn_imm32(bcx, &insn);
  499. self.set_dst32(bcx, &insn, imm);
  500. }
  501. ebpf::MOV32_REG => {
  502. // reg[_dst] = (reg[_src] as u32) as u64,
  503. let src = self.insn_src32(bcx, &insn);
  504. self.set_dst32(bcx, &insn, src);
  505. }
  506. ebpf::ARSH32_IMM => {
  507. // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
  508. let src = self.insn_dst32(bcx, &insn);
  509. let imm = self.insn_imm32(bcx, &insn);
  510. let res = bcx.ins().sshr(src, imm);
  511. self.set_dst32(bcx, &insn, res);
  512. }
  513. ebpf::ARSH32_REG => {
  514. // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
  515. let lhs = self.insn_dst32(bcx, &insn);
  516. let rhs = self.insn_src32(bcx, &insn);
  517. let res = bcx.ins().sshr(lhs, rhs);
  518. self.set_dst32(bcx, &insn, res);
  519. }
  520. ebpf::BE | ebpf::LE => {
  521. let should_swap = match insn.opc {
  522. ebpf::BE => self.isa.endianness() == Endianness::Little,
  523. ebpf::LE => self.isa.endianness() == Endianness::Big,
  524. _ => unreachable!(),
  525. };
  526. let ty: Type = match insn.imm {
  527. 16 => I16,
  528. 32 => I32,
  529. 64 => I64,
  530. _ => unreachable!(),
  531. };
  532. if should_swap {
  533. let src = self.insn_dst(bcx, &insn);
  534. let src_narrow = if ty != I64 {
  535. bcx.ins().ireduce(ty, src)
  536. } else {
  537. src
  538. };
  539. let res = bcx.ins().bswap(src_narrow);
  540. let res_wide = if ty != I64 {
  541. bcx.ins().uextend(I64, res)
  542. } else {
  543. res
  544. };
  545. self.set_dst(bcx, &insn, res_wide);
  546. }
  547. }
  548. // BPF_ALU64 class
  549. ebpf::ADD64_IMM => {
  550. // reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
  551. let imm = self.insn_imm64(bcx, &insn);
  552. let src = self.insn_dst(bcx, &insn);
  553. let res = bcx.ins().iadd(src, imm);
  554. self.set_dst(bcx, &insn, res);
  555. }
  556. ebpf::ADD64_REG => {
  557. // reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
  558. let lhs = self.insn_dst(bcx, &insn);
  559. let rhs = self.insn_src(bcx, &insn);
  560. let res = bcx.ins().iadd(lhs, rhs);
  561. self.set_dst(bcx, &insn, res);
  562. }
  563. ebpf::SUB64_IMM => {
  564. // reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
  565. let imm = self.insn_imm64(bcx, &insn);
  566. let src = self.insn_dst(bcx, &insn);
  567. let res = bcx.ins().isub(src, imm);
  568. self.set_dst(bcx, &insn, res);
  569. }
  570. ebpf::SUB64_REG => {
  571. // reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
  572. let lhs = self.insn_dst(bcx, &insn);
  573. let rhs = self.insn_src(bcx, &insn);
  574. let res = bcx.ins().isub(lhs, rhs);
  575. self.set_dst(bcx, &insn, res);
  576. }
  577. ebpf::MUL64_IMM => {
  578. // reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
  579. let imm = self.insn_imm64(bcx, &insn);
  580. let src = self.insn_dst(bcx, &insn);
  581. let res = bcx.ins().imul(src, imm);
  582. self.set_dst(bcx, &insn, res);
  583. }
  584. ebpf::MUL64_REG => {
  585. // reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
  586. let lhs = self.insn_dst(bcx, &insn);
  587. let rhs = self.insn_src(bcx, &insn);
  588. let res = bcx.ins().imul(lhs, rhs);
  589. self.set_dst(bcx, &insn, res);
  590. }
  591. ebpf::DIV64_IMM => {
  592. // reg[_dst] /= insn.imm as u64,
  593. let res = if insn.imm == 0 {
  594. bcx.ins().iconst(I64, 0)
  595. } else {
  596. let imm = self.insn_imm64(bcx, &insn);
  597. let src = self.insn_dst(bcx, &insn);
  598. bcx.ins().udiv(src, imm)
  599. };
  600. self.set_dst(bcx, &insn, res);
  601. }
  602. ebpf::DIV64_REG => {
  603. // reg[_dst] /= reg[_src], if reg[_src] != 0
  604. // reg[_dst] = 0, if reg[_src] == 0
  605. let zero = bcx.ins().iconst(I64, 0);
  606. let one = bcx.ins().iconst(I64, 1);
  607. let lhs = self.insn_dst(bcx, &insn);
  608. let rhs = self.insn_src(bcx, &insn);
  609. let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
  610. let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
  611. let div_res = bcx.ins().udiv(lhs, safe_rhs);
  612. let res = bcx.ins().select(rhs_is_zero, zero, div_res);
  613. self.set_dst(bcx, &insn, res);
  614. }
  615. ebpf::MOD64_IMM => {
  616. // reg[_dst] %= insn.imm as u64,
  617. if insn.imm != 0 {
  618. let imm = self.insn_imm64(bcx, &insn);
  619. let src = self.insn_dst(bcx, &insn);
  620. let res = bcx.ins().urem(src, imm);
  621. self.set_dst(bcx, &insn, res);
  622. };
  623. }
  624. ebpf::MOD64_REG => {
  625. // reg[_dst] %= reg[_src], if reg[_src] != 0
  626. let zero = bcx.ins().iconst(I64, 0);
  627. let one = bcx.ins().iconst(I64, 1);
  628. let lhs = self.insn_dst(bcx, &insn);
  629. let rhs = self.insn_src(bcx, &insn);
  630. let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
  631. let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
  632. let div_res = bcx.ins().urem(lhs, safe_rhs);
  633. let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
  634. self.set_dst(bcx, &insn, res);
  635. }
  636. ebpf::OR64_IMM => {
  637. // reg[_dst] |= insn.imm as u64,
  638. let imm = self.insn_imm64(bcx, &insn);
  639. let src = self.insn_dst(bcx, &insn);
  640. let res = bcx.ins().bor(src, imm);
  641. self.set_dst(bcx, &insn, res);
  642. }
  643. ebpf::OR64_REG => {
  644. // reg[_dst] |= reg[_src],
  645. let lhs = self.insn_dst(bcx, &insn);
  646. let rhs = self.insn_src(bcx, &insn);
  647. let res = bcx.ins().bor(lhs, rhs);
  648. self.set_dst(bcx, &insn, res);
  649. }
  650. ebpf::AND64_IMM => {
  651. // reg[_dst] &= insn.imm as u64,
  652. let imm = self.insn_imm64(bcx, &insn);
  653. let src = self.insn_dst(bcx, &insn);
  654. let res = bcx.ins().band(src, imm);
  655. self.set_dst(bcx, &insn, res);
  656. }
  657. ebpf::AND64_REG => {
  658. // reg[_dst] &= reg[_src],
  659. let lhs = self.insn_dst(bcx, &insn);
  660. let rhs = self.insn_src(bcx, &insn);
  661. let res = bcx.ins().band(lhs, rhs);
  662. self.set_dst(bcx, &insn, res);
  663. }
  664. ebpf::LSH64_IMM => {
  665. // reg[_dst] <<= insn.imm as u64,
  666. let imm = self.insn_imm64(bcx, &insn);
  667. let src = self.insn_dst(bcx, &insn);
  668. let res = bcx.ins().ishl(src, imm);
  669. self.set_dst(bcx, &insn, res);
  670. }
  671. ebpf::LSH64_REG => {
  672. // reg[_dst] <<= reg[_src],
  673. let lhs = self.insn_dst(bcx, &insn);
  674. let rhs = self.insn_src(bcx, &insn);
  675. let res = bcx.ins().ishl(lhs, rhs);
  676. self.set_dst(bcx, &insn, res);
  677. }
  678. ebpf::RSH64_IMM => {
  679. // reg[_dst] >>= insn.imm as u64,
  680. let imm = self.insn_imm64(bcx, &insn);
  681. let src = self.insn_dst(bcx, &insn);
  682. let res = bcx.ins().ushr(src, imm);
  683. self.set_dst(bcx, &insn, res);
  684. }
  685. ebpf::RSH64_REG => {
  686. // reg[_dst] >>= reg[_src],
  687. let lhs = self.insn_dst(bcx, &insn);
  688. let rhs = self.insn_src(bcx, &insn);
  689. let res = bcx.ins().ushr(lhs, rhs);
  690. self.set_dst(bcx, &insn, res);
  691. }
  692. ebpf::NEG64 => {
  693. // reg[_dst] = -(reg[_dst] as i64) as u64,
  694. let src = self.insn_dst(bcx, &insn);
  695. let res = bcx.ins().ineg(src);
  696. self.set_dst(bcx, &insn, res);
  697. }
  698. ebpf::XOR64_IMM => {
  699. // reg[_dst] ^= insn.imm as u64,
  700. let imm = self.insn_imm64(bcx, &insn);
  701. let src = self.insn_dst(bcx, &insn);
  702. let res = bcx.ins().bxor(src, imm);
  703. self.set_dst(bcx, &insn, res);
  704. }
  705. ebpf::XOR64_REG => {
  706. // reg[_dst] ^= reg[_src],
  707. let lhs = self.insn_dst(bcx, &insn);
  708. let rhs = self.insn_src(bcx, &insn);
  709. let res = bcx.ins().bxor(lhs, rhs);
  710. self.set_dst(bcx, &insn, res);
  711. }
  712. ebpf::MOV64_IMM => {
  713. // reg[_dst] = insn.imm as u64,
  714. let imm = self.insn_imm64(bcx, &insn);
  715. bcx.def_var(self.registers[insn.dst as usize], imm);
  716. }
  717. ebpf::MOV64_REG => {
  718. // reg[_dst] = reg[_src],
  719. let src = self.insn_src(bcx, &insn);
  720. bcx.def_var(self.registers[insn.dst as usize], src);
  721. }
  722. ebpf::ARSH64_IMM => {
  723. // reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
  724. let imm = self.insn_imm64(bcx, &insn);
  725. let src = self.insn_dst(bcx, &insn);
  726. let res = bcx.ins().sshr(src, imm);
  727. self.set_dst(bcx, &insn, res);
  728. }
  729. ebpf::ARSH64_REG => {
  730. // reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
  731. let lhs = self.insn_dst(bcx, &insn);
  732. let rhs = self.insn_src(bcx, &insn);
  733. let res = bcx.ins().sshr(lhs, rhs);
  734. self.set_dst(bcx, &insn, res);
  735. }
  736. // BPF_JMP & BPF_JMP32 class
  737. ebpf::JA => {
  738. let (_, target_block) = self.insn_targets[&(insn_ptr as u32)];
  739. bcx.ins().jump(target_block, &[]);
  740. self.filled_blocks.insert(bcx.current_block().unwrap());
  741. }
  742. ebpf::JEQ_IMM
  743. | ebpf::JEQ_REG
  744. | ebpf::JGT_IMM
  745. | ebpf::JGT_REG
  746. | ebpf::JGE_IMM
  747. | ebpf::JGE_REG
  748. | ebpf::JLT_IMM
  749. | ebpf::JLT_REG
  750. | ebpf::JLE_IMM
  751. | ebpf::JLE_REG
  752. | ebpf::JNE_IMM
  753. | ebpf::JNE_REG
  754. | ebpf::JSGT_IMM
  755. | ebpf::JSGT_REG
  756. | ebpf::JSGE_IMM
  757. | ebpf::JSGE_REG
  758. | ebpf::JSLT_IMM
  759. | ebpf::JSLT_REG
  760. | ebpf::JSLE_IMM
  761. | ebpf::JSLE_REG
  762. | ebpf::JSET_IMM
  763. | ebpf::JSET_REG
  764. | ebpf::JEQ_IMM32
  765. | ebpf::JEQ_REG32
  766. | ebpf::JGT_IMM32
  767. | ebpf::JGT_REG32
  768. | ebpf::JGE_IMM32
  769. | ebpf::JGE_REG32
  770. | ebpf::JLT_IMM32
  771. | ebpf::JLT_REG32
  772. | ebpf::JLE_IMM32
  773. | ebpf::JLE_REG32
  774. | ebpf::JNE_IMM32
  775. | ebpf::JNE_REG32
  776. | ebpf::JSGT_IMM32
  777. | ebpf::JSGT_REG32
  778. | ebpf::JSGE_IMM32
  779. | ebpf::JSGE_REG32
  780. | ebpf::JSLT_IMM32
  781. | ebpf::JSLT_REG32
  782. | ebpf::JSLE_IMM32
  783. | ebpf::JSLE_REG32
  784. | ebpf::JSET_IMM32
  785. | ebpf::JSET_REG32 => {
  786. let (fallthrough, target) = self.insn_targets[&(insn_ptr as u32)];
  787. let is_reg = (insn.opc & BPF_X) != 0;
  788. let is_32 = (insn.opc & BPF_JMP32) != 0;
  789. let intcc = match insn.opc {
  790. c if (c & BPF_ALU_OP_MASK) == BPF_JEQ => IntCC::Equal,
  791. c if (c & BPF_ALU_OP_MASK) == BPF_JNE => IntCC::NotEqual,
  792. c if (c & BPF_ALU_OP_MASK) == BPF_JGT => IntCC::UnsignedGreaterThan,
  793. c if (c & BPF_ALU_OP_MASK) == BPF_JGE => IntCC::UnsignedGreaterThanOrEqual,
  794. c if (c & BPF_ALU_OP_MASK) == BPF_JLT => IntCC::UnsignedLessThan,
  795. c if (c & BPF_ALU_OP_MASK) == BPF_JLE => IntCC::UnsignedLessThanOrEqual,
  796. c if (c & BPF_ALU_OP_MASK) == BPF_JSGT => IntCC::SignedGreaterThan,
  797. c if (c & BPF_ALU_OP_MASK) == BPF_JSGE => IntCC::SignedGreaterThanOrEqual,
  798. c if (c & BPF_ALU_OP_MASK) == BPF_JSLT => IntCC::SignedLessThan,
  799. c if (c & BPF_ALU_OP_MASK) == BPF_JSLE => IntCC::SignedLessThanOrEqual,
  800. // JSET is handled specially below
  801. c if (c & BPF_ALU_OP_MASK) == BPF_JSET => IntCC::NotEqual,
  802. _ => unreachable!(),
  803. };
  804. let lhs = if is_32 {
  805. self.insn_dst32(bcx, &insn)
  806. } else {
  807. self.insn_dst(bcx, &insn)
  808. };
  809. let rhs = match (is_reg, is_32) {
  810. (true, false) => self.insn_src(bcx, &insn),
  811. (true, true) => self.insn_src32(bcx, &insn),
  812. (false, false) => self.insn_imm64(bcx, &insn),
  813. (false, true) => self.insn_imm32(bcx, &insn),
  814. };
  815. let cmp_res = if (insn.opc & BPF_ALU_OP_MASK) == BPF_JSET {
  816. bcx.ins().band(lhs, rhs)
  817. } else {
  818. bcx.ins().icmp(intcc, lhs, rhs)
  819. };
  820. bcx.ins().brif(cmp_res, target, &[], fallthrough, &[]);
  821. self.filled_blocks.insert(bcx.current_block().unwrap());
  822. }
  823. // Do not delegate the check to the verifier, since registered functions can be
  824. // changed after the program has been verified.
  825. ebpf::CALL => {
  826. let func_ref = self
  827. .helper_func_refs
  828. .get(&(insn.imm as u32))
  829. .copied()
  830. .ok_or_else(|| {
  831. Error::new(
  832. ErrorKind::Other,
  833. format!(
  834. "[CRANELIFT] Error: unknown helper function (id: {:#x})",
  835. insn.imm as u32
  836. ),
  837. )
  838. })?;
  839. let arg0 = bcx.use_var(self.registers[1]);
  840. let arg1 = bcx.use_var(self.registers[2]);
  841. let arg2 = bcx.use_var(self.registers[3]);
  842. let arg3 = bcx.use_var(self.registers[4]);
  843. let arg4 = bcx.use_var(self.registers[5]);
  844. let call = bcx.ins().call(func_ref, &[arg0, arg1, arg2, arg3, arg4]);
  845. let ret = bcx.inst_results(call)[0];
  846. self.set_dst(bcx, &insn, ret);
  847. }
  848. ebpf::TAIL_CALL => unimplemented!(),
  849. ebpf::EXIT => {
  850. let ret = bcx.use_var(self.registers[0]);
  851. bcx.ins().return_(&[ret]);
  852. self.filled_blocks.insert(bcx.current_block().unwrap());
  853. }
  854. _ => unimplemented!("inst: {:?}", insn),
  855. }
  856. insn_ptr += 1;
  857. }
  858. Ok(())
  859. }
  860. fn insn_imm64(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
  861. bcx.ins().iconst(I64, insn.imm as u64 as i64)
  862. }
  863. fn insn_imm32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
  864. bcx.ins().iconst(I32, insn.imm as u32 as u64 as i64)
  865. }
  866. fn insn_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
  867. bcx.use_var(self.registers[insn.dst as usize])
  868. }
  869. fn insn_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
  870. let dst = self.insn_dst(bcx, insn);
  871. bcx.ins().ireduce(I32, dst)
  872. }
  873. fn insn_src(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
  874. bcx.use_var(self.registers[insn.src as usize])
  875. }
  876. fn insn_src32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
  877. let src = self.insn_src(bcx, insn);
  878. bcx.ins().ireduce(I32, src)
  879. }
  880. fn set_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
  881. bcx.def_var(self.registers[insn.dst as usize], val);
  882. }
  883. fn set_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
  884. let val32 = bcx.ins().uextend(I64, val);
  885. self.set_dst(bcx, insn, val32);
  886. }
  887. fn reg_load(&mut self, bcx: &mut FunctionBuilder, ty: Type, base: Value, offset: i16) -> Value {
  888. self.insert_bounds_check(bcx, ty, base, offset);
  889. let mut flags = MemFlags::new();
  890. flags.set_endianness(Endianness::Little);
  891. bcx.ins().load(ty, flags, base, offset as i32)
  892. }
  893. fn reg_store(
  894. &mut self,
  895. bcx: &mut FunctionBuilder,
  896. ty: Type,
  897. base: Value,
  898. offset: i16,
  899. val: Value,
  900. ) {
  901. self.insert_bounds_check(bcx, ty, base, offset);
  902. let mut flags = MemFlags::new();
  903. flags.set_endianness(Endianness::Little);
  904. bcx.ins().store(flags, val, base, offset as i32);
  905. }
  906. /// Inserts a bounds check for a memory access
  907. ///
  908. /// This emits a conditional trap if the access is out of bounds for any of the known
  909. /// valid memory regions. These are the stack, the memory, and the mbuf.
  910. fn insert_bounds_check(
  911. &mut self,
  912. bcx: &mut FunctionBuilder,
  913. ty: Type,
  914. base: Value,
  915. offset: i16,
  916. ) {
  917. let access_size = bcx.ins().iconst(I64, ty.bytes() as i64);
  918. let offset = bcx.ins().iconst(I64, offset as i64);
  919. let start_addr = bcx.ins().iadd(base, offset);
  920. let end_addr = bcx.ins().iadd(start_addr, access_size);
  921. let does_not_overflow =
  922. bcx.ins()
  923. .icmp(IntCC::UnsignedGreaterThanOrEqual, end_addr, start_addr);
  924. // Check if it's a valid stack access
  925. let stack_start = bcx.use_var(self.stack_start);
  926. let stack_end = bcx.use_var(self.stack_end);
  927. let stack_start_valid =
  928. bcx.ins()
  929. .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, stack_start);
  930. let stack_end_valid = bcx
  931. .ins()
  932. .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, stack_end);
  933. let stack_valid = bcx.ins().band(stack_start_valid, stack_end_valid);
  934. // Check if it's a valid memory access
  935. let mem_start = bcx.use_var(self.mem_start);
  936. let mem_end = bcx.use_var(self.mem_end);
  937. let has_mem = bcx.ins().icmp_imm(IntCC::NotEqual, mem_start, 0);
  938. let mem_start_valid =
  939. bcx.ins()
  940. .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, mem_start);
  941. let mem_end_valid = bcx
  942. .ins()
  943. .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, mem_end);
  944. let mem_valid = bcx.ins().band(mem_start_valid, mem_end_valid);
  945. let mem_valid = bcx.ins().band(mem_valid, has_mem);
  946. // Check if it's a valid mbuf access
  947. let mbuf_start = bcx.use_var(self.mbuf_start);
  948. let mbuf_end = bcx.use_var(self.mbuf_end);
  949. let has_mbuf = bcx.ins().icmp_imm(IntCC::NotEqual, mbuf_start, 0);
  950. let mbuf_start_valid =
  951. bcx.ins()
  952. .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, mbuf_start);
  953. let mbuf_end_valid = bcx
  954. .ins()
  955. .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, mbuf_end);
  956. let mbuf_valid = bcx.ins().band(mbuf_start_valid, mbuf_end_valid);
  957. let mbuf_valid = bcx.ins().band(mbuf_valid, has_mbuf);
  958. // Join all of these checks together and trap if any of them fails
  959. // We need it to be valid to at least one region of memory
  960. let valid_region = bcx.ins().bor(stack_valid, mem_valid);
  961. let valid_region = bcx.ins().bor(valid_region, mbuf_valid);
  962. // And that it does not overflow
  963. let valid = bcx.ins().band(does_not_overflow, valid_region);
  964. // TODO: We can potentially throw a custom trap code here to indicate
  965. // which check failed.
  966. bcx.ins().trapz(valid, TrapCode::HeapOutOfBounds);
  967. }
  968. /// Analyze the program and build the CFG
  969. ///
  970. /// We do this because cranelift does not allow us to switch back to a previously
  971. /// filled block and add instructions to it. So we can't split the program as we
  972. /// translate it.
  973. fn build_cfg(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
  974. let mut insn_ptr: usize = 0;
  975. while insn_ptr * ebpf::INSN_SIZE < prog.len() {
  976. let insn = ebpf::get_insn(prog, insn_ptr);
  977. match insn.opc {
  978. // This instruction consumes two opcodes
  979. ebpf::LD_DW_IMM => {
  980. insn_ptr += 1;
  981. }
  982. ebpf::JA
  983. | ebpf::JEQ_IMM
  984. | ebpf::JEQ_REG
  985. | ebpf::JGT_IMM
  986. | ebpf::JGT_REG
  987. | ebpf::JGE_IMM
  988. | ebpf::JGE_REG
  989. | ebpf::JLT_IMM
  990. | ebpf::JLT_REG
  991. | ebpf::JLE_IMM
  992. | ebpf::JLE_REG
  993. | ebpf::JNE_IMM
  994. | ebpf::JNE_REG
  995. | ebpf::JSGT_IMM
  996. | ebpf::JSGT_REG
  997. | ebpf::JSGE_IMM
  998. | ebpf::JSGE_REG
  999. | ebpf::JSLT_IMM
  1000. | ebpf::JSLT_REG
  1001. | ebpf::JSLE_IMM
  1002. | ebpf::JSLE_REG
  1003. | ebpf::JSET_IMM
  1004. | ebpf::JSET_REG
  1005. | ebpf::JEQ_IMM32
  1006. | ebpf::JEQ_REG32
  1007. | ebpf::JGT_IMM32
  1008. | ebpf::JGT_REG32
  1009. | ebpf::JGE_IMM32
  1010. | ebpf::JGE_REG32
  1011. | ebpf::JLT_IMM32
  1012. | ebpf::JLT_REG32
  1013. | ebpf::JLE_IMM32
  1014. | ebpf::JLE_REG32
  1015. | ebpf::JNE_IMM32
  1016. | ebpf::JNE_REG32
  1017. | ebpf::JSGT_IMM32
  1018. | ebpf::JSGT_REG32
  1019. | ebpf::JSGE_IMM32
  1020. | ebpf::JSGE_REG32
  1021. | ebpf::JSLT_IMM32
  1022. | ebpf::JSLT_REG32
  1023. | ebpf::JSLE_IMM32
  1024. | ebpf::JSLE_REG32
  1025. | ebpf::JSET_IMM32
  1026. | ebpf::JSET_REG32
  1027. | ebpf::EXIT
  1028. | ebpf::TAIL_CALL => {
  1029. self.prepare_jump_blocks(bcx, insn_ptr, &insn);
  1030. }
  1031. _ => {}
  1032. }
  1033. insn_ptr += 1;
  1034. }
  1035. Ok(())
  1036. }
  1037. fn prepare_jump_blocks(&mut self, bcx: &mut FunctionBuilder, insn_ptr: usize, insn: &Insn) {
  1038. let insn_ptr = insn_ptr as u32;
  1039. let next_pc: u32 = insn_ptr + 1;
  1040. let target_pc: u32 = (insn_ptr as isize + insn.off as isize + 1)
  1041. .try_into()
  1042. .unwrap();
  1043. // This is the fallthrough block
  1044. let fallthrough_block = *self
  1045. .insn_blocks
  1046. .entry(next_pc)
  1047. .or_insert_with(|| bcx.create_block());
  1048. // Jump Target
  1049. let target_block = *self
  1050. .insn_blocks
  1051. .entry(target_pc)
  1052. .or_insert_with(|| bcx.create_block());
  1053. // Mark the blocks for this instruction
  1054. self.insn_targets
  1055. .insert(insn_ptr, (fallthrough_block, target_block));
  1056. }
  1057. }
  1058. /// Contains the backing memory for a previously compiled function.
  1059. ///
  1060. /// Currently this will allways just contain code for a single function, but
  1061. /// in the future we might want to support multiple functions per module.
  1062. ///
  1063. /// Ensures that the backing memory is freed when dropped.
  1064. pub struct CraneliftProgram {
  1065. module: ManuallyDrop<JITModule>,
  1066. main_id: FuncId,
  1067. }
  1068. impl CraneliftProgram {
  1069. pub(crate) fn new(module: JITModule, main_id: FuncId) -> Self {
  1070. Self {
  1071. module: ManuallyDrop::new(module),
  1072. main_id,
  1073. }
  1074. }
  1075. /// We shouldn't allow this function pointer to be exposed outside of this
  1076. /// module, since it's not guaranteed to be valid after the module is dropped.
  1077. pub(crate) fn get_main_function(&self) -> JittedFunction {
  1078. let function_ptr = self.module.get_finalized_function(self.main_id);
  1079. unsafe { mem::transmute(function_ptr) }
  1080. }
  1081. /// Execute this module by calling the main function
  1082. pub fn execute(
  1083. &self,
  1084. mem_ptr: *mut u8,
  1085. mem_len: usize,
  1086. mbuff_ptr: *mut u8,
  1087. mbuff_len: usize,
  1088. ) -> u64 {
  1089. let main = self.get_main_function();
  1090. main(mem_ptr, mem_len, mbuff_ptr, mbuff_len)
  1091. }
  1092. }
  1093. impl Drop for CraneliftProgram {
  1094. fn drop(&mut self) {
  1095. // We need to have an owned version of `JITModule` to be able to free
  1096. // it's memory. Use `ManuallyDrop` to get the owned `JITModule`.
  1097. //
  1098. // We can no longer use `module` after this, but since we are `Drop`
  1099. // it should be safe.
  1100. unsafe {
  1101. let module = ManuallyDrop::take(&mut self.module);
  1102. module.free_memory()
  1103. };
  1104. }
  1105. }