Переглянути джерело

cranelift: Add initial Cranelift JIT

Signed-off-by: Afonso Bordado <afonsobordado@az8.co>
Signed-off-by: Quentin Monnet <quentin@isovalent.com>
Afonso Bordado 2 роки тому
батько
коміт
1e0aae882a
4 змінених файлів з 883 додано та 0 видалено
  1. 17 0
      Cargo.toml
  2. 605 0
      src/cranelift.rs
  3. 63 0
      src/lib.rs
  4. 198 0
      tests/cranelift.rs

+ 17 - 0
Cargo.toml

@@ -29,8 +29,25 @@ libc = "0.2"
 time = "0.2"
 byteorder = "1.2"
 
+# Optional Dependencies for the CraneLift JIT
+cranelift-codegen = { version = "0.99", optional = true }
+cranelift-frontend = { version = "0.99", optional = true }
+cranelift-jit = { version = "0.99", optional = true }
+cranelift-native = { version = "0.99", optional = true }
+cranelift-module = { version = "0.99", optional = true }
+
 [dev-dependencies]
 
 elf = "0.0.10"
 json = "0.11"
 hex = "0.4.3"
+
+[features]
+default = []
+cranelift = [
+    "dep:cranelift-codegen",
+    "dep:cranelift-frontend",
+    "dep:cranelift-jit",
+    "dep:cranelift-native",
+    "dep:cranelift-module",
+]

+ 605 - 0
src/cranelift.rs

@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+use std::convert::TryInto;
+
+use cranelift_codegen::{
+    entity::EntityRef,
+    ir::{
+        condcodes::IntCC,
+        types::{I32, I64},
+        AbiParam, Block, Function, InstBuilder, LibCall, Signature, UserFuncName, Value,
+    },
+    isa::{CallConv, OwnedTargetIsa},
+    settings::{self, Configurable},
+    Context,
+};
+use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+use cranelift_jit::{JITBuilder, JITModule};
+use cranelift_module::{FuncId, Linkage, Module};
+
+use crate::ebpf::{self, Insn};
+
+use super::Error;
+
+fn libcall_names(libcall: LibCall) -> String {
+    match libcall {
+        _ => unimplemented!(),
+    }
+}
+
+pub type JittedFunction = extern "C" fn(
+    *mut u8, // mbuff.as_ptr() as *mut u8,
+    usize,   // mbuff.len(),
+    *mut u8, // mem_ptr,
+    usize,   // mem.len(),
+    usize,   // 0,
+    usize,   // 0,
+) -> u64;
+
+pub(crate) struct CraneliftCompiler {
+    isa: OwnedTargetIsa,
+    module: JITModule,
+
+    /// Map of register numbers to Cranelift variables.
+    registers: [Variable; 16],
+}
+
+impl CraneliftCompiler {
+    pub(crate) fn new() -> Self {
+        let mut flag_builder = settings::builder();
+
+        flag_builder.set("opt_level", "speed").unwrap();
+
+        let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| {
+            panic!("host machine is not supported: {}", msg);
+        });
+        let isa = isa_builder
+            .finish(settings::Flags::new(flag_builder))
+            .unwrap();
+        let mut module = JITModule::new(JITBuilder::with_isa(isa.clone(), Box::new(libcall_names)));
+
+        let registers = (0..16)
+            .map(|i| Variable::new(i))
+            .collect::<Vec<_>>()
+            .try_into()
+            .unwrap();
+
+        Self {
+            isa,
+            module,
+            registers,
+        }
+    }
+
+    pub(crate) fn get_function(&mut self, id: FuncId) -> JittedFunction {
+        let function_ptr = self.module.get_finalized_function(id);
+
+        unsafe { std::mem::transmute(function_ptr) }
+    }
+
+    pub(crate) fn compile_function(&mut self, prog: &[u8]) -> Result<FuncId, Error> {
+        let name = "main";
+        let sig = Signature {
+            params: vec![
+                AbiParam::new(I64),
+                AbiParam::new(I64),
+                AbiParam::new(I64),
+                AbiParam::new(I64),
+                AbiParam::new(I64),
+                AbiParam::new(I64),
+            ],
+            returns: vec![AbiParam::new(I64)],
+            call_conv: CallConv::SystemV,
+        };
+
+        let func_id = self
+            .module
+            .declare_function(name, Linkage::Local, &sig)
+            .unwrap();
+
+        let mut ctx = Context::new();
+        ctx.func = Function::with_name_signature(UserFuncName::testcase(name.as_bytes()), sig);
+        let mut func_ctx = FunctionBuilderContext::new();
+
+        {
+            let mut builder: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+            let entry = builder.create_block();
+            builder.append_block_params_for_function_params(entry);
+            builder.switch_to_block(entry);
+
+            self.build_function_prelude(&mut builder, entry)?;
+            self.translate_program(&mut builder, prog)?;
+
+            builder.seal_all_blocks();
+            builder.finalize();
+        }
+
+        ctx.verify(&*self.isa).unwrap();
+        ctx.optimize(&*self.isa).unwrap();
+
+        self.module.define_function(func_id, &mut ctx).unwrap();
+        self.module.finalize_definitions().unwrap();
+
+        Ok(func_id)
+    }
+
+    fn build_function_prelude(
+        &mut self,
+        bcx: &mut FunctionBuilder,
+        entry: Block,
+    ) -> Result<(), Error> {
+        // Register the VM registers as variables
+        for var in self.registers.iter() {
+            bcx.declare_var(*var, I64);
+        }
+
+        // Set the first 5 arguments to the registers
+        // The eBPF ABI specifies that the first 5 arguments are available in
+        // registers r1-r5
+        for i in 0..5 {
+            let arg = bcx.block_params(entry)[i];
+            let var = self.registers[i + 1];
+            bcx.def_var(var, arg);
+        }
+
+        Ok(())
+    }
+
+    fn translate_program(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
+        let mut insn_ptr: usize = 0;
+        while insn_ptr * ebpf::INSN_SIZE < prog.len() {
+            let insn = ebpf::get_insn(prog, insn_ptr);
+
+            match insn.opc {
+                ebpf::LD_DW_IMM => {
+                    insn_ptr += 1;
+                    let next_insn = ebpf::get_insn(prog, insn_ptr);
+
+                    let imm = (((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32)) as i64;
+                    let iconst = bcx.ins().iconst(I64, imm);
+                    self.set_dst(bcx, &insn, iconst);
+                }
+
+                // BPF_ALU class
+                // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
+                // before we do the operation?
+                // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
+                ebpf::ADD32_IMM => {
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().iadd(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::ADD32_REG => {
+                    //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().iadd(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::SUB32_IMM => {
+                    // reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm)         as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().isub(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::SUB32_REG => {
+                    // reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().isub(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::MUL32_IMM => {
+                    // reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm)         as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().imul(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::MUL32_REG => {
+                    // reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().imul(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::DIV32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32 / insn.imm              as u32) as u64,
+                    let res = if insn.imm == 0 {
+                        bcx.ins().iconst(I32, 0)
+                    } else {
+                        let imm = self.insn_imm32(bcx, &insn);
+                        let src = self.insn_dst32(bcx, &insn);
+                        bcx.ins().udiv(src, imm)
+                    };
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::DIV32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32 / reg[_src]             as u32) as u64,
+                    let zero = bcx.ins().iconst(I32, 0);
+                    let one = bcx.ins().iconst(I32, 1);
+
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+
+                    let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
+                    let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
+                    let div_res = bcx.ins().udiv(lhs, safe_rhs);
+
+                    let res = bcx.ins().select(rhs_is_zero, zero, div_res);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::OR32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32             | insn.imm  as u32) as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().bor(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::OR32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32             | reg[_src] as u32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().bor(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::AND32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32             & insn.imm  as u32) as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().band(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::AND32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32             & reg[_src] as u32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().band(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::LSH32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm  as u32) as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().ishl(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::LSH32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().ishl(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::RSH32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm  as u32) as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().ushr(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::RSH32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().ushr(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::NEG32 => {
+                    // { reg[_dst] = (reg[_dst] as i32).wrapping_neg()                 as u64; reg[_dst] &= U32MAX; },
+                    let src = self.insn_dst32(bcx, &insn);
+                    let res = bcx.ins().ineg(src);
+                    // TODO: Do we need to mask the result?
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::MOD32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32             % insn.imm  as u32) as u64,
+
+                    if insn.imm != 0 {
+                        let imm = self.insn_imm32(bcx, &insn);
+                        let src = self.insn_dst32(bcx, &insn);
+                        let res = bcx.ins().urem(src, imm);
+                        self.set_dst32(bcx, &insn, res);
+                    }
+                }
+                ebpf::MOD32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32 % reg[_src]             as u32) as u64,
+                    let zero = bcx.ins().iconst(I32, 0);
+                    let one = bcx.ins().iconst(I32, 1);
+
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+
+                    let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
+                    let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
+                    let div_res = bcx.ins().urem(lhs, safe_rhs);
+
+                    let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::XOR32_IMM => {
+                    // reg[_dst] = (reg[_dst] as u32             ^ insn.imm  as u32) as u64,
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().bxor(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::XOR32_REG => {
+                    // reg[_dst] = (reg[_dst] as u32             ^ reg[_src] as u32) as u64,
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().bxor(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::MOV32_IMM => {
+                    let imm = self.insn_imm32(bcx, &insn);
+                    self.set_dst32(bcx, &insn, imm);
+                }
+                ebpf::MOV32_REG => {
+                    // reg[_dst] = (reg[_src] as u32)                                as u64,
+                    let src = self.insn_src32(bcx, &insn);
+                    self.set_dst32(bcx, &insn, src);
+                }
+                ebpf::ARSH32_IMM => {
+                    // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm  as u32) as u64; reg[_dst] &= U32MAX; },
+                    let src = self.insn_dst32(bcx, &insn);
+                    let imm = self.insn_imm32(bcx, &insn);
+                    let res = bcx.ins().sshr(src, imm);
+                    self.set_dst32(bcx, &insn, res);
+                }
+                ebpf::ARSH32_REG => {
+                    // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
+                    let lhs = self.insn_dst32(bcx, &insn);
+                    let rhs = self.insn_src32(bcx, &insn);
+                    let res = bcx.ins().sshr(lhs, rhs);
+                    self.set_dst32(bcx, &insn, res);
+                }
+
+                // BPF_ALU64 class
+                ebpf::ADD64_IMM => {
+                    // reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().iadd(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::ADD64_REG => {
+                    // reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().iadd(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::SUB64_IMM => {
+                    // reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().isub(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::SUB64_REG => {
+                    // reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().isub(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::MUL64_IMM => {
+                    // reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().imul(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::MUL64_REG => {
+                    // reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().imul(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::DIV64_IMM => {
+                    // reg[_dst] /= insn.imm as u64,
+                    let res = if insn.imm == 0 {
+                        bcx.ins().iconst(I64, 0)
+                    } else {
+                        let imm = self.insn_imm64(bcx, &insn);
+                        let src = self.insn_dst(bcx, &insn);
+                        bcx.ins().udiv(src, imm)
+                    };
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::DIV64_REG => {
+                    // reg[_dst] /= reg[_src], if reg[_src] != 0
+                    // reg[_dst] = 0, if reg[_src] == 0
+                    let zero = bcx.ins().iconst(I64, 0);
+                    let one = bcx.ins().iconst(I64, 1);
+
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+
+                    let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
+                    let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
+                    let div_res = bcx.ins().udiv(lhs, safe_rhs);
+
+                    let res = bcx.ins().select(rhs_is_zero, zero, div_res);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::MOD64_IMM => {
+                    // reg[_dst] %= insn.imm as u64,
+
+                    if insn.imm != 0 {
+                        let imm = self.insn_imm64(bcx, &insn);
+                        let src = self.insn_dst(bcx, &insn);
+                        let res = bcx.ins().urem(src, imm);
+                        self.set_dst(bcx, &insn, res);
+                    };
+                }
+                ebpf::MOD64_REG => {
+                    // reg[_dst] %= reg[_src], if reg[_src] != 0
+
+                    let zero = bcx.ins().iconst(I64, 0);
+                    let one = bcx.ins().iconst(I64, 1);
+
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+
+                    let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
+                    let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
+                    let div_res = bcx.ins().urem(lhs, safe_rhs);
+
+                    let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::OR64_IMM => {
+                    // reg[_dst] |= insn.imm as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().bor(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::OR64_REG => {
+                    // reg[_dst] |= reg[_src],
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().bor(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::AND64_IMM => {
+                    // reg[_dst] &= insn.imm as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().band(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::AND64_REG => {
+                    // reg[_dst] &= reg[_src],
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().band(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::LSH64_IMM => {
+                    // reg[_dst] <<= insn.imm as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().ishl(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::LSH64_REG => {
+                    // reg[_dst] <<= reg[_src],
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().ishl(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::RSH64_IMM => {
+                    // reg[_dst] >>= insn.imm as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().ushr(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::RSH64_REG => {
+                    // reg[_dst] >>= reg[_src],
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().ushr(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::NEG64 => {
+                    // reg[_dst] = -(reg[_dst] as i64) as u64,
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().ineg(src);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::XOR64_IMM => {
+                    // reg[_dst] ^= insn.imm as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().bxor(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::XOR64_REG => {
+                    // reg[_dst] ^= reg[_src],
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().bxor(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::MOV64_IMM => {
+                    // reg[_dst] = insn.imm as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    bcx.def_var(self.registers[insn.dst as usize], imm);
+                }
+                ebpf::MOV64_REG => {
+                    // reg[_dst] = reg[_src],
+                    let src = self.insn_src(bcx, &insn);
+                    bcx.def_var(self.registers[insn.dst as usize], src);
+                }
+                ebpf::ARSH64_IMM => {
+                    // reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
+                    let imm = self.insn_imm64(bcx, &insn);
+                    let src = self.insn_dst(bcx, &insn);
+                    let res = bcx.ins().sshr(src, imm);
+                    self.set_dst(bcx, &insn, res);
+                }
+                ebpf::ARSH64_REG => {
+                    // reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
+                    let lhs = self.insn_dst(bcx, &insn);
+                    let rhs = self.insn_src(bcx, &insn);
+                    let res = bcx.ins().sshr(lhs, rhs);
+                    self.set_dst(bcx, &insn, res);
+                }
+
+                // Do not delegate the check to the verifier, since registered functions can be
+                // changed after the program has been verified.
+                ebpf::CALL => unimplemented!(),
+                ebpf::TAIL_CALL => unimplemented!(),
+                ebpf::EXIT => {
+                    let ret = bcx.use_var(self.registers[0]);
+                    bcx.ins().return_(&[ret]);
+                }
+                _ => unimplemented!("inst: {:?}", insn),
+            }
+
+            insn_ptr += 1;
+        }
+
+        Ok(())
+    }
+
+    fn insn_imm64(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
+        bcx.ins().iconst(I64, insn.imm as u64 as i64)
+    }
+    fn insn_imm32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
+        bcx.ins().iconst(I32, insn.imm as u32 as u64 as i64)
+    }
+
+    fn insn_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
+        bcx.use_var(self.registers[insn.dst as usize])
+    }
+    fn insn_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
+        let dst = self.insn_dst(bcx, insn);
+        bcx.ins().ireduce(I32, dst)
+    }
+
+    fn insn_src(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
+        bcx.use_var(self.registers[insn.src as usize])
+    }
+    fn insn_src32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
+        let src = self.insn_src(bcx, insn);
+        bcx.ins().ireduce(I32, src)
+    }
+
+    fn set_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
+        bcx.def_var(self.registers[insn.dst as usize], val);
+    }
+    fn set_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
+        let val32 = bcx.ins().uextend(I64, val);
+        self.set_dst(bcx, insn, val32);
+    }
+}

+ 63 - 0
src/lib.rs

@@ -31,6 +31,17 @@ extern crate byteorder;
 extern crate combine;
 extern crate time;
 
+#[cfg(feature = "cranelift")]
+extern crate cranelift_codegen;
+#[cfg(feature = "cranelift")]
+extern crate cranelift_frontend;
+#[cfg(feature = "cranelift")]
+extern crate cranelift_jit;
+#[cfg(feature = "cranelift")]
+extern crate cranelift_module;
+#[cfg(feature = "cranelift")]
+extern crate cranelift_native;
+
 use byteorder::{ByteOrder, LittleEndian};
 use std::collections::HashMap;
 use std::io::{Error, ErrorKind};
@@ -38,6 +49,8 @@ use std::u32;
 
 mod asm_parser;
 pub mod assembler;
+#[cfg(feature = "cranelift")]
+mod cranelift;
 pub mod disassembler;
 pub mod ebpf;
 pub mod helpers;
@@ -399,6 +412,45 @@ impl<'a> EbpfVmMbuff<'a> {
             )),
         }
     }
+
+    /// Compiles and executes the program using the cranelift JIT.
+    #[cfg(feature = "cranelift")]
+    pub fn execute_cranelift(&self, mem: &mut [u8], mbuff: &'a mut [u8]) -> Result<u64, Error> {
+        use crate::cranelift::CraneliftCompiler;
+
+        let prog = match self.prog {
+            Some(prog) => prog,
+            None => Err(Error::new(
+                ErrorKind::Other,
+                "Error: No program set, call prog_set() to load one",
+            ))?,
+        };
+
+        // If packet data is empty, do not send the address of an empty slice; send a null pointer
+        //  as first argument instead, as this is uBPF's behavior (empty packet should not happen
+        //  in the kernel; anyway the verifier would prevent the use of uninitialized registers).
+        //  See `mul_loop` test.
+        let mem_ptr = match mem.len() {
+            0 => std::ptr::null_mut(),
+            _ => mem.as_ptr() as *mut u8,
+        };
+
+        let mut compiler = CraneliftCompiler::new();
+
+        let func = compiler.compile_function(prog)?;
+        let ptr = compiler.get_function(func);
+
+        let res = ptr(
+            mem_ptr,
+            mem.len(),
+            mbuff.as_ptr() as *mut u8,
+            mbuff.len(),
+            0,
+            0,
+        );
+
+        Ok(res)
+    }
 }
 
 /// A virtual machine to run eBPF program. This kind of VM is used for programs expecting to work
@@ -1061,6 +1113,12 @@ impl<'a> EbpfVmRaw<'a> {
         let mut mbuff = vec![];
         self.parent.execute_program_jit(mem, &mut mbuff)
     }
+
+    #[cfg(feature = "cranelift")]
+    pub fn execute_cranelift(&self, mem: &'a mut [u8]) -> Result<u64, Error> {
+        let mut mbuff = vec![];
+        self.parent.execute_cranelift(mem, &mut mbuff)
+    }
 }
 
 /// A virtual machine to run eBPF program. This kind of VM is used for programs that do not work
@@ -1310,4 +1368,9 @@ impl<'a> EbpfVmNoData<'a> {
     pub unsafe fn execute_program_jit(&self) -> Result<u64, Error> {
         self.parent.execute_program_jit(&mut [])
     }
+
+    #[cfg(feature = "cranelift")]
+    pub fn execute_cranelift(&self) -> Result<u64, Error> {
+        self.parent.execute_cranelift(&mut [])
+    }
 }

+ 198 - 0
tests/cranelift.rs

@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))]
+#![cfg(feature = "cranelift")]
+
+extern crate rbpf;
+mod common;
+
+use rbpf::assembler::assemble;
+
+macro_rules! test_cranelift {
+    ($name:ident, $prog:expr, $expected:expr) => {
+        #[test]
+        fn $name() {
+            let prog = assemble($prog).unwrap();
+            let vm = rbpf::EbpfVmNoData::new(Some(&prog)).unwrap();
+            assert_eq!(vm.execute_cranelift().unwrap(), $expected);
+        }
+    };
+    ($name:ident, $prog:expr, $mem:expr, $expected:expr) => {
+        #[test]
+        fn $name() {
+            let prog = assemble($prog).unwrap();
+            let mem = &mut $mem;
+            let vm = rbpf::EbpfVmRaw::new(Some(&prog)).unwrap();
+            assert_eq!(vm.execute_cranelift(mem).unwrap(), $expected);
+        }
+    };
+}
+
+test_cranelift!(
+    test_cranelift_add,
+    "
+    mov32 r0, 0
+    mov32 r1, 2
+    add32 r0, 1
+    add32 r0, r1
+    exit
+    ",
+    0x3
+);
+
+test_cranelift!(
+    test_cranelift_alu64_arith,
+    "
+    mov r0, 0
+    mov r1, 1
+    mov r2, 2
+    mov r3, 3
+    mov r4, 4
+    mov r5, 5
+    mov r6, 6
+    mov r7, 7
+    mov r8, 8
+    mov r9, 9
+    add r0, 23
+    add r0, r7
+    sub r0, 13
+    sub r0, r1
+    mul r0, 7
+    mul r0, r3
+    div r0, 2
+    div r0, r4
+    exit
+    ",
+    0x2a
+);
+
+test_cranelift!(
+    test_cranelift_alu64_bit,
+    "
+    mov r0, 0
+    mov r1, 1
+    mov r2, 2
+    mov r3, 3
+    mov r4, 4
+    mov r5, 5
+    mov r6, 6
+    mov r7, 7
+    mov r8, 8
+    or r0, r5
+    or r0, 0xa0
+    and r0, 0xa3
+    mov r9, 0x91
+    and r0, r9
+    lsh r0, 32
+    lsh r0, 22
+    lsh r0, r8
+    rsh r0, 32
+    rsh r0, 19
+    rsh r0, r7
+    xor r0, 0x03
+    xor r0, r2
+    exit
+    ",
+    0x11
+);
+
+test_cranelift!(
+    test_cranelift_alu_arith,
+    "
+    mov32 r0, 0
+    mov32 r1, 1
+    mov32 r2, 2
+    mov32 r3, 3
+    mov32 r4, 4
+    mov32 r5, 5
+    mov32 r6, 6
+    mov32 r7, 7
+    mov32 r8, 8
+    mov32 r9, 9
+    add32 r0, 23
+    add32 r0, r7
+    sub32 r0, 13
+    sub32 r0, r1
+    mul32 r0, 7
+    mul32 r0, r3
+    div32 r0, 2
+    div32 r0, r4
+    exit
+    ",
+    0x2a
+);
+
+test_cranelift!(
+    test_cranelift_alu_bit,
+    "
+    mov32 r0, 0
+    mov32 r1, 1
+    mov32 r2, 2
+    mov32 r3, 3
+    mov32 r4, 4
+    mov32 r5, 5
+    mov32 r6, 6
+    mov32 r7, 7
+    mov32 r8, 8
+    or32 r0, r5
+    or32 r0, 0xa0
+    and32 r0, 0xa3
+    mov32 r9, 0x91
+    and32 r0, r9
+    lsh32 r0, 22
+    lsh32 r0, r8
+    rsh32 r0, 19
+    rsh32 r0, r7
+    xor32 r0, 0x03
+    xor32 r0, r2
+    exit
+    ",
+    0x11
+);
+
+test_cranelift!(
+    test_cranelift_arsh32_high_shift,
+    "
+    mov r0, 8
+    lddw r1, 0x100000001
+    arsh32 r0, r1
+    exit
+    ",
+    0x4
+);
+
+test_cranelift!(
+    test_cranelift_arsh,
+    "
+    mov32 r0, 0xf8
+    lsh32 r0, 28
+    arsh32 r0, 16
+    exit
+    ",
+    0xffff8000
+);
+
+test_cranelift!(
+    test_cranelift_arsh64,
+    "
+    mov32 r0, 1
+    lsh r0, 63
+    arsh r0, 55
+    mov32 r1, 5
+    arsh r0, r1
+    exit
+    ",
+    0xfffffffffffffff8
+);
+
+test_cranelift!(
+    test_cranelift_arsh_reg,
+    "
+    mov32 r0, 0xf8
+    mov32 r1, 16
+    lsh32 r0, 28
+    arsh32 r0, r1
+    exit
+    ",
+    0xffff8000
+);