|
@@ -37,7 +37,6 @@ fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize,
|
|
#[allow(cyclomatic_complexity)]
|
|
#[allow(cyclomatic_complexity)]
|
|
pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: &HashMap<u32, ebpf::Helper>) -> Result<u64, Error> {
|
|
pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: &HashMap<u32, ebpf::Helper>) -> Result<u64, Error> {
|
|
const U32MAX: u64 = u32::MAX as u64;
|
|
const U32MAX: u64 = u32::MAX as u64;
|
|
- const SHIFT_MASK_32: u32 = 0x1f;
|
|
|
|
const SHIFT_MASK_64: u64 = 0x3f;
|
|
const SHIFT_MASK_64: u64 = 0x3f;
|
|
|
|
|
|
let prog = match prog_ {
|
|
let prog = match prog_ {
|
|
@@ -226,10 +225,12 @@ pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers:
|
|
ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
|
|
ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
|
|
ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
|
|
ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
|
|
ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
|
|
ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
|
|
- ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32 & SHIFT_MASK_32) as u64,
|
|
|
|
- ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32 & SHIFT_MASK_32) as u64,
|
|
|
|
- ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32 & SHIFT_MASK_32) as u64,
|
|
|
|
- ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32 & SHIFT_MASK_32) as u64,
|
|
|
|
|
|
+ // As for the 64-bit version, we should mask the number of bits to shift with
|
|
|
|
+ // 0x1f, but .wrappping_shr() already takes care of it for us.
|
|
|
|
+ ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
|
|
|
|
+ ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
|
|
|
|
+ ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
|
|
|
|
+ ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
|
|
ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
|
|
ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
|
|
ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
|
|
ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
|
|
ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
|
|
ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
|
|
@@ -239,6 +240,8 @@ pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers:
|
|
ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
|
|
ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
|
|
ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64,
|
|
ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64,
|
|
ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
|
|
ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
|
|
|
|
+ // As for the 64-bit version, we should mask the number of bits to shift with
|
|
|
|
+ // 0x1f, but .wrappping_shr() already takes care of it for us.
|
|
ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
|
|
ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
|
|
ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
|
|
ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
|
|
ebpf::LE => {
|
|
ebpf::LE => {
|