Browse Source

aya-bpf: generate bpf_probe_read getters for kernel types

Alessandro Decina 4 years ago
parent
commit
dc15c978f5

+ 985 - 0
bpf/aya-bpf/src/bpf/generated/getters.rs

@@ -0,0 +1,985 @@
+use crate::bpf::generated::bindings::*;
+impl sk_msg_md__bindgen_ty_1 {
+    pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_fib_lookup__bindgen_ty_2 {
+    pub fn ipv4_src(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_src) }.ok()
+    }
+    pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_src) }.ok()
+    }
+}
+impl sk_msg_md {
+    pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+    pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v =
+            unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+    pub fn family(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn remote_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip4) }.ok()
+    }
+    pub fn local_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip4) }.ok()
+    }
+    pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip6) }.ok()
+    }
+    pub fn local_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip6) }.ok()
+    }
+    pub fn remote_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_port) }.ok()
+    }
+    pub fn local_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_port) }.ok()
+    }
+    pub fn size(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.size) }.ok()
+    }
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_3.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl seq_file {}
+impl bpf_sysctl {
+    pub fn write(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.write) }.ok()
+    }
+    pub fn file_pos(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.file_pos) }.ok()
+    }
+}
+impl task_struct {}
+impl bpf_sock_addr {
+    pub fn user_family(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_family) }.ok()
+    }
+    pub fn user_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_ip4) }.ok()
+    }
+    pub fn user_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_ip6) }.ok()
+    }
+    pub fn user_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_port) }.ok()
+    }
+    pub fn family(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn type_(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
+    }
+    pub fn protocol(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.protocol) }.ok()
+    }
+    pub fn msg_src_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.msg_src_ip4) }.ok()
+    }
+    pub fn msg_src_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.msg_src_ip6) }.ok()
+    }
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_sock_tuple__bindgen_ty_1 {
+    pub fn ipv4(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4) }.ok()
+    }
+    pub fn ipv6(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6) }.ok()
+    }
+}
+impl sockaddr {}
+impl pt_regs {}
+impl bpf_tunnel_key {
+    pub fn tunnel_id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_id) }.ok()
+    }
+    pub fn remote_ipv4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok()
+    }
+    pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok()
+    }
+    pub fn tunnel_tos(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_tos) }.ok()
+    }
+    pub fn tunnel_ttl(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_ttl) }.ok()
+    }
+    pub fn tunnel_ext(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_ext) }.ok()
+    }
+    pub fn tunnel_label(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_label) }.ok()
+    }
+}
+impl __sk_buff__bindgen_ty_1 {
+    pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.flow_keys) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_perf_event_value {
+    pub fn counter(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.counter) }.ok()
+    }
+    pub fn enabled(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.enabled) }.ok()
+    }
+    pub fn running(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.running) }.ok()
+    }
+}
+impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 {
+    pub fn saddr(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.saddr) }.ok()
+    }
+    pub fn daddr(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.daddr) }.ok()
+    }
+    pub fn sport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
+    }
+    pub fn dport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
+    }
+}
+impl bpf_sock_ops__bindgen_ty_1 {
+    pub fn args(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.args) }.ok()
+    }
+    pub fn reply(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.reply) }.ok()
+    }
+    pub fn replylong(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.replylong) }.ok()
+    }
+}
+impl bpf_fib_lookup__bindgen_ty_1 {
+    pub fn tos(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tos) }.ok()
+    }
+    pub fn flowinfo(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.flowinfo) }.ok()
+    }
+    pub fn rt_metric(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rt_metric) }.ok()
+    }
+}
+impl bpf_redir_neigh {}
+impl socket {}
+impl file {}
+impl tcphdr {}
+impl bpf_pidns_info {
+    pub fn pid(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.pid) }.ok()
+    }
+    pub fn tgid(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tgid) }.ok()
+    }
+}
+impl bpf_tcp_sock {
+    pub fn snd_cwnd(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_cwnd) }.ok()
+    }
+    pub fn srtt_us(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.srtt_us) }.ok()
+    }
+    pub fn rtt_min(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rtt_min) }.ok()
+    }
+    pub fn snd_ssthresh(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_ssthresh) }.ok()
+    }
+    pub fn rcv_nxt(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rcv_nxt) }.ok()
+    }
+    pub fn snd_nxt(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_nxt) }.ok()
+    }
+    pub fn snd_una(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_una) }.ok()
+    }
+    pub fn mss_cache(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.mss_cache) }.ok()
+    }
+    pub fn ecn_flags(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ecn_flags) }.ok()
+    }
+    pub fn rate_delivered(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_delivered) }.ok()
+    }
+    pub fn rate_interval_us(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_interval_us) }.ok()
+    }
+    pub fn packets_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.packets_out) }.ok()
+    }
+    pub fn retrans_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.retrans_out) }.ok()
+    }
+    pub fn total_retrans(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.total_retrans) }.ok()
+    }
+    pub fn segs_in(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_in) }.ok()
+    }
+    pub fn data_segs_in(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_in) }.ok()
+    }
+    pub fn segs_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_out) }.ok()
+    }
+    pub fn data_segs_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_out) }.ok()
+    }
+    pub fn lost_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.lost_out) }.ok()
+    }
+    pub fn sacked_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sacked_out) }.ok()
+    }
+    pub fn bytes_received(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_received) }.ok()
+    }
+    pub fn bytes_acked(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_acked) }.ok()
+    }
+    pub fn dsack_dups(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dsack_dups) }.ok()
+    }
+    pub fn delivered(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.delivered) }.ok()
+    }
+    pub fn delivered_ce(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.delivered_ce) }.ok()
+    }
+    pub fn icsk_retransmits(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.icsk_retransmits) }.ok()
+    }
+}
+impl bpf_tunnel_key__bindgen_ty_1 {
+    pub fn remote_ipv4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv4) }.ok()
+    }
+    pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv6) }.ok()
+    }
+}
+impl bpf_spin_lock {
+    pub fn val(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.val) }.ok()
+    }
+}
+impl bpf_fib_lookup {
+    pub fn family(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn l4_protocol(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.l4_protocol) }.ok()
+    }
+    pub fn sport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
+    }
+    pub fn dport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
+    }
+    pub fn tot_len(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tot_len) }.ok()
+    }
+    pub fn ifindex(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ifindex) }.ok()
+    }
+    pub fn tos(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.tos) }.ok()
+    }
+    pub fn flowinfo(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.flowinfo) }.ok()
+    }
+    pub fn rt_metric(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.rt_metric) }.ok()
+    }
+    pub fn ipv4_src(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv4_src) }.ok()
+    }
+    pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv6_src) }.ok()
+    }
+    pub fn ipv4_dst(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_3.ipv4_dst) }.ok()
+    }
+    pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_3.ipv6_dst) }.ok()
+    }
+    pub fn h_vlan_proto(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.h_vlan_proto) }.ok()
+    }
+    pub fn h_vlan_TCI(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.h_vlan_TCI) }.ok()
+    }
+    pub fn smac(&self) -> Option<[__u8; 6usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.smac) }.ok()
+    }
+    pub fn dmac(&self) -> Option<[__u8; 6usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dmac) }.ok()
+    }
+}
+impl bpf_xfrm_state__bindgen_ty_1 {
+    pub fn remote_ipv4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv4) }.ok()
+    }
+    pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv6) }.ok()
+    }
+}
+impl sk_reuseport_md__bindgen_ty_2 {
+    pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl tcp_request_sock {}
+impl bpf_map_info {
+    pub fn type_(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
+    }
+    pub fn id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.id) }.ok()
+    }
+    pub fn key_size(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.key_size) }.ok()
+    }
+    pub fn value_size(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.value_size) }.ok()
+    }
+    pub fn max_entries(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.max_entries) }.ok()
+    }
+    pub fn map_flags(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.map_flags) }.ok()
+    }
+    pub fn name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.name) }.ok()
+    }
+    pub fn ifindex(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ifindex) }.ok()
+    }
+    pub fn btf_vmlinux_value_type_id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_vmlinux_value_type_id) }.ok()
+    }
+    pub fn netns_dev(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.netns_dev) }.ok()
+    }
+    pub fn netns_ino(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.netns_ino) }.ok()
+    }
+    pub fn btf_id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_id) }.ok()
+    }
+    pub fn btf_key_type_id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_key_type_id) }.ok()
+    }
+    pub fn btf_value_type_id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_value_type_id) }.ok()
+    }
+}
+impl linux_binprm {}
+impl sk_msg_md__bindgen_ty_2 {
+    pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_sock_ops__bindgen_ty_2 {
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl path {}
+impl __sk_buff {
+    pub fn len(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.len) }.ok()
+    }
+    pub fn pkt_type(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.pkt_type) }.ok()
+    }
+    pub fn mark(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.mark) }.ok()
+    }
+    pub fn queue_mapping(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.queue_mapping) }.ok()
+    }
+    pub fn protocol(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.protocol) }.ok()
+    }
+    pub fn vlan_present(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.vlan_present) }.ok()
+    }
+    pub fn vlan_tci(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.vlan_tci) }.ok()
+    }
+    pub fn vlan_proto(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.vlan_proto) }.ok()
+    }
+    pub fn priority(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.priority) }.ok()
+    }
+    pub fn ingress_ifindex(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ingress_ifindex) }.ok()
+    }
+    pub fn ifindex(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ifindex) }.ok()
+    }
+    pub fn tc_index(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tc_index) }.ok()
+    }
+    pub fn cb(&self) -> Option<[__u32; 5usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.cb) }.ok()
+    }
+    pub fn hash(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.hash) }.ok()
+    }
+    pub fn tc_classid(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tc_classid) }.ok()
+    }
+    pub fn data(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()
+    }
+    pub fn data_end(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()
+    }
+    pub fn napi_id(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.napi_id) }.ok()
+    }
+    pub fn family(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn remote_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip4) }.ok()
+    }
+    pub fn local_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip4) }.ok()
+    }
+    pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip6) }.ok()
+    }
+    pub fn local_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip6) }.ok()
+    }
+    pub fn remote_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_port) }.ok()
+    }
+    pub fn local_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_port) }.ok()
+    }
+    pub fn data_meta(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_meta) }.ok()
+    }
+    pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.flow_keys) }
+            .ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+    pub fn tstamp(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.tstamp) }.ok()
+    }
+    pub fn wire_len(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.wire_len) }.ok()
+    }
+    pub fn gso_segs(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.gso_segs) }.ok()
+    }
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+    pub fn gso_size(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.gso_size) }.ok()
+    }
+}
+impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 {
+    pub fn ipv4_src(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_src) }.ok()
+    }
+    pub fn ipv4_dst(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_dst) }.ok()
+    }
+}
+impl sk_msg_md__bindgen_ty_3 {
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_sock {
+    pub fn bound_dev_if(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bound_dev_if) }.ok()
+    }
+    pub fn family(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn type_(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
+    }
+    pub fn protocol(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.protocol) }.ok()
+    }
+    pub fn mark(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.mark) }.ok()
+    }
+    pub fn priority(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.priority) }.ok()
+    }
+    pub fn src_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.src_ip4) }.ok()
+    }
+    pub fn src_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.src_ip6) }.ok()
+    }
+    pub fn src_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.src_port) }.ok()
+    }
+    pub fn dst_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dst_port) }.ok()
+    }
+    pub fn dst_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dst_ip4) }.ok()
+    }
+    pub fn dst_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dst_ip6) }.ok()
+    }
+    pub fn state(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.state) }.ok()
+    }
+    pub fn rx_queue_mapping(&self) -> Option<__s32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rx_queue_mapping) }.ok()
+    }
+}
+impl bpf_xfrm_state {
+    pub fn reqid(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.reqid) }.ok()
+    }
+    pub fn spi(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.spi) }.ok()
+    }
+    pub fn family(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn ext(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ext) }.ok()
+    }
+    pub fn remote_ipv4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok()
+    }
+    pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok()
+    }
+}
+impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 {
+    pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_src) }.ok()
+    }
+    pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_dst) }.ok()
+    }
+}
+impl sk_reuseport_md__bindgen_ty_1 {
+    pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl<Storage> __BindgenBitfieldUnit<Storage> {}
+impl bpf_flow_keys {
+    pub fn nhoff(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.nhoff) }.ok()
+    }
+    pub fn thoff(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.thoff) }.ok()
+    }
+    pub fn addr_proto(&self) -> Option<__u16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.addr_proto) }.ok()
+    }
+    pub fn is_frag(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_frag) }.ok()
+    }
+    pub fn is_first_frag(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_first_frag) }.ok()
+    }
+    pub fn is_encap(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_encap) }.ok()
+    }
+    pub fn ip_proto(&self) -> Option<__u8> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ip_proto) }.ok()
+    }
+    pub fn n_proto(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.n_proto) }.ok()
+    }
+    pub fn sport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
+    }
+    pub fn dport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
+    }
+    pub fn ipv4_src(&self) -> Option<__be32> {
+        unsafe {
+            crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_src)
+        }
+        .ok()
+    }
+    pub fn ipv4_dst(&self) -> Option<__be32> {
+        unsafe {
+            crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_dst)
+        }
+        .ok()
+    }
+    pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
+        unsafe {
+            crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_src)
+        }
+        .ok()
+    }
+    pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
+        unsafe {
+            crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_dst)
+        }
+        .ok()
+    }
+    pub fn flags(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.flags) }.ok()
+    }
+    pub fn flow_label(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.flow_label) }.ok()
+    }
+}
+impl tcp6_sock {}
+impl inode {}
+impl tcp_timewait_sock {}
+impl bpf_sock_ops {
+    pub fn op(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.op) }.ok()
+    }
+    pub fn args(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.args) }.ok()
+    }
+    pub fn reply(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.reply) }.ok()
+    }
+    pub fn replylong(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.replylong) }.ok()
+    }
+    pub fn family(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
+    }
+    pub fn remote_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip4) }.ok()
+    }
+    pub fn local_ip4(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip4) }.ok()
+    }
+    pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip6) }.ok()
+    }
+    pub fn local_ip6(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip6) }.ok()
+    }
+    pub fn remote_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_port) }.ok()
+    }
+    pub fn local_port(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_port) }.ok()
+    }
+    pub fn is_fullsock(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_fullsock) }.ok()
+    }
+    pub fn snd_cwnd(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_cwnd) }.ok()
+    }
+    pub fn srtt_us(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.srtt_us) }.ok()
+    }
+    pub fn bpf_sock_ops_cb_flags(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bpf_sock_ops_cb_flags) }.ok()
+    }
+    pub fn state(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.state) }.ok()
+    }
+    pub fn rtt_min(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rtt_min) }.ok()
+    }
+    pub fn snd_ssthresh(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_ssthresh) }.ok()
+    }
+    pub fn rcv_nxt(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rcv_nxt) }.ok()
+    }
+    pub fn snd_nxt(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_nxt) }.ok()
+    }
+    pub fn snd_una(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_una) }.ok()
+    }
+    pub fn mss_cache(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.mss_cache) }.ok()
+    }
+    pub fn ecn_flags(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ecn_flags) }.ok()
+    }
+    pub fn rate_delivered(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_delivered) }.ok()
+    }
+    pub fn rate_interval_us(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_interval_us) }.ok()
+    }
+    pub fn packets_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.packets_out) }.ok()
+    }
+    pub fn retrans_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.retrans_out) }.ok()
+    }
+    pub fn total_retrans(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.total_retrans) }.ok()
+    }
+    pub fn segs_in(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_in) }.ok()
+    }
+    pub fn data_segs_in(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_in) }.ok()
+    }
+    pub fn segs_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_out) }.ok()
+    }
+    pub fn data_segs_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_out) }.ok()
+    }
+    pub fn lost_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.lost_out) }.ok()
+    }
+    pub fn sacked_out(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sacked_out) }.ok()
+    }
+    pub fn sk_txhash(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk_txhash) }.ok()
+    }
+    pub fn bytes_received(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_received) }.ok()
+    }
+    pub fn bytes_acked(&self) -> Option<__u64> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_acked) }.ok()
+    }
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_sock_addr__bindgen_ty_1 {
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_fib_lookup__bindgen_ty_3 {
+    pub fn ipv4_dst(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_dst) }.ok()
+    }
+    pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_dst) }.ok()
+    }
+}
+impl sk_reuseport_md {
+    pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+    pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
+        let v =
+            unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+    pub fn len(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.len) }.ok()
+    }
+    pub fn eth_protocol(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.eth_protocol) }.ok()
+    }
+    pub fn ip_protocol(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ip_protocol) }.ok()
+    }
+    pub fn bind_inany(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.bind_inany) }.ok()
+    }
+    pub fn hash(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.hash) }.ok()
+    }
+}
+impl xdp_md {
+    pub fn data(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()
+    }
+    pub fn data_end(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()
+    }
+    pub fn data_meta(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_meta) }.ok()
+    }
+    pub fn ingress_ifindex(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.ingress_ifindex) }.ok()
+    }
+    pub fn rx_queue_index(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.rx_queue_index) }.ok()
+    }
+    pub fn egress_ifindex(&self) -> Option<__u32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.egress_ifindex) }.ok()
+    }
+}
+impl bpf_perf_event_data {}
+impl tcp_sock {}
+impl __sk_buff__bindgen_ty_2 {
+    pub fn sk(&self) -> Option<*mut bpf_sock> {
+        let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
+        if v.is_null() {
+            None
+        } else {
+            Some(v)
+        }
+    }
+}
+impl bpf_sock_tuple {
+    pub fn ipv4(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv4) }.ok()
+    }
+    pub fn ipv6(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv6) }.ok()
+    }
+}
+impl btf_ptr {}
+impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 {
+    pub fn saddr(&self) -> Option<[__be32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.saddr) }.ok()
+    }
+    pub fn daddr(&self) -> Option<[__be32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.daddr) }.ok()
+    }
+    pub fn sport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
+    }
+    pub fn dport(&self) -> Option<__be16> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
+    }
+}
+impl bpf_map_def {
+    pub fn type_(&self) -> Option<::aya_bpf_cty::c_uint> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
+    }
+    pub fn key_size(&self) -> Option<::aya_bpf_cty::c_uint> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.key_size) }.ok()
+    }
+    pub fn value_size(&self) -> Option<::aya_bpf_cty::c_uint> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.value_size) }.ok()
+    }
+    pub fn max_entries(&self) -> Option<::aya_bpf_cty::c_uint> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.max_entries) }.ok()
+    }
+    pub fn map_flags(&self) -> Option<::aya_bpf_cty::c_uint> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.map_flags) }.ok()
+    }
+}
+impl bpf_flow_keys__bindgen_ty_1 {
+    pub fn ipv4_src(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv4_src) }.ok()
+    }
+    pub fn ipv4_dst(&self) -> Option<__be32> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv4_dst) }.ok()
+    }
+    pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv6_src) }.ok()
+    }
+    pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
+        unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv6_dst) }.ok()
+    }
+}
+impl udp6_sock {}

+ 1 - 0
bpf/aya-bpf/src/bpf/generated/mod.rs

@@ -1,4 +1,5 @@
 #![allow(dead_code, non_camel_case_types, non_snake_case)]
 
 pub(crate) mod bindings;
+pub(crate) mod getters;
 pub(crate) mod helpers;

+ 1 - 3
xtask/Cargo.toml

@@ -4,11 +4,9 @@ version = "0.1.0"
 authors = ["Alessandro Decina <[email protected]>"]
 edition = "2018"
 
-# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-
 [dependencies]
 structopt = {version = "0.3", default-features = false }
 anyhow = "1"
-syn = {version = "1", features = ["visit-mut"] }
+syn = {version = "1", features = ["visit-mut", "extra-traits"] }
 quote = "1"
 proc-macro2 = "1"

+ 41 - 1
xtask/src/codegen/aya_bpf.rs

@@ -14,6 +14,8 @@ use syn::{
     Type,
 };
 
+use crate::codegen::getters::{generate_getters_for_items, Getter};
+
 #[derive(StructOpt)]
 pub struct CodegenOptions {
     #[structopt(long)]
@@ -57,15 +59,17 @@ pub fn codegen(opts: CodegenOptions) -> Result<(), anyhow::Error> {
 
     // delete the helpers, then rewrite them in helpers.rs
     let mut tree = parse_str::<syn::File>(bindings).unwrap();
+
     let mut tx = RewriteBpfHelpers {
         helpers: Vec::new(),
     };
     tx.visit_file_mut(&mut tree);
 
+    let bindings = tree.to_token_stream().to_string();
     let filename = generated.join("bindings.rs");
     {
         let mut file = File::create(&filename)?;
-        write!(file, "{}", tree.to_token_stream())?;
+        write!(file, "{}", bindings)?;
     }
     Command::new("rustfmt").arg(filename).status()?;
 
@@ -79,9 +83,45 @@ pub fn codegen(opts: CodegenOptions) -> Result<(), anyhow::Error> {
     }
     Command::new("rustfmt").arg(filename).status()?;
 
+    let getters = generate_getters_for_items(&tree.items, gen_probe_read_getter);
+    let filename = generated.join("getters.rs");
+    {
+        let mut file = File::create(&filename)?;
+        write!(file, "use crate::bpf::generated::bindings::*;")?;
+        write!(file, "{}", getters)?;
+    }
+    Command::new("rustfmt").arg(filename).status()?;
+
     Ok(())
 }
 
+fn gen_probe_read_getter(getter: &Getter<'_>) -> TokenStream {
+    let ident = getter.ident;
+    let ty = getter.ty;
+    let prefix = &getter.prefix;
+    match ty {
+        Type::Ptr(_) => {
+            quote! {
+                pub fn #ident(&self) -> Option<#ty> {
+                    let v = unsafe { crate::bpf::helpers::bpf_probe_read(&#(#prefix).*.#ident) }.ok()?;
+                    if v.is_null() {
+                        None
+                    } else {
+                        Some(v)
+                    }
+                }
+            }
+        }
+        _ => {
+            quote! {
+                pub fn #ident(&self) -> Option<#ty> {
+                    unsafe { crate::bpf::helpers::bpf_probe_read(&#(#prefix).*.#ident) }.ok()
+                }
+            }
+        }
+    }
+}
+
 struct RewriteBpfHelpers {
     helpers: Vec<String>,
 }

+ 126 - 0
xtask/src/codegen/getters.rs

@@ -0,0 +1,126 @@
+use std::collections::HashMap;
+
+use proc_macro2::{Span, TokenStream};
+use quote::{quote, TokenStreamExt};
+use syn::{
+    self, Fields, FieldsNamed, Generics, Ident, Item, ItemStruct, ItemUnion, Path, Type, TypePath,
+    Visibility,
+};
+
+pub struct GetterList<'a> {
+    slf: Ident,
+    item_fields: HashMap<Ident, (&'a Item, &'a FieldsNamed)>,
+}
+
+impl<'a> GetterList<'a> {
+    pub fn new(items: &'a [Item]) -> GetterList<'a> {
+        let item_fields = items
+            .iter()
+            .filter_map(|item| {
+                unpack_item(item).map(|(ident, _generics, fields)| (ident.clone(), (item, fields)))
+            })
+            .collect();
+        GetterList {
+            slf: Ident::new("self", Span::call_site()),
+            item_fields,
+        }
+    }
+
+    pub fn iter(&self) -> impl Iterator<Item = (&'a Item, Vec<Getter<'_>>)> {
+        self.item_fields
+            .values()
+            .map(move |(item, fields)| (*item, self.getters(&self.slf, fields)))
+    }
+
+    fn getters(&self, ident: &'a Ident, fields: &'a FieldsNamed) -> Vec<Getter<'a>> {
+        let mut getters = Vec::new();
+        for field in &fields.named {
+            if field.vis == Visibility::Inherited {
+                continue;
+            }
+
+            let field_ident = field.ident.as_ref().unwrap();
+            let field_s = field_ident.to_string();
+
+            // FIXME: bindgen generates fields named `_bitfield_N` for bitfields. If a type T has
+            // two or more unions with bitfields, the getters for the bitfields - generated in impl
+            // T - will clash. To avoid that we skip getters for bitfields altogether for now.
+            // See sk_reuseport_md for an example where the clash happens.
+            if field_s.starts_with("_bitfield") {
+                continue;
+            }
+
+            if field_s.starts_with("__bindgen_anon") {
+                let field_ty_ident = match &field.ty {
+                    Type::Path(TypePath {
+                        path: Path { segments, .. },
+                        ..
+                    }) => &segments.first().unwrap().ident,
+                    _ => panic!(),
+                };
+                let sub_fields = self
+                    .item_fields
+                    .get(field_ty_ident)
+                    .expect(&field_ident.to_string())
+                    .1;
+                getters.extend(self.getters(field_ident, sub_fields).drain(..).map(
+                    |mut getter| {
+                        getter.prefix.insert(0, ident);
+                        getter
+                    },
+                ));
+            } else {
+                getters.push(Getter {
+                    ident: field_ident,
+                    prefix: vec![ident],
+                    ty: &field.ty,
+                });
+            }
+        }
+
+        getters
+    }
+}
+
+pub fn generate_getters_for_items(
+    items: &[Item],
+    gen_getter: fn(&Getter<'_>) -> TokenStream,
+) -> TokenStream {
+    let mut tokens = TokenStream::new();
+    tokens.append_all(GetterList::new(&items).iter().map(|(item, getters)| {
+        let getters = getters.iter().map(gen_getter);
+        let (ident, generics, _) = unpack_item(item).unwrap();
+        quote! {
+            impl#generics #ident#generics {
+                #(#getters)*
+            }
+        }
+    }));
+
+    tokens
+}
+
+#[derive(Debug)]
+pub struct Getter<'a> {
+    pub ident: &'a Ident,
+    pub prefix: Vec<&'a Ident>,
+    pub ty: &'a Type,
+}
+
+fn unpack_item(item: &Item) -> Option<(&Ident, &Generics, &FieldsNamed)> {
+    match item {
+        Item::Struct(ItemStruct {
+            ident,
+            generics,
+            fields: Fields::Named(fields),
+            ..
+        })
+        | Item::Union(ItemUnion {
+            ident,
+            generics,
+            fields,
+            ..
+        }) => Some((ident, generics, fields)),
+        _ => None,
+    }
+}

+ 1 - 0
xtask/src/codegen/mod.rs

@@ -1,4 +1,5 @@
 mod aya_bpf;
+pub mod getters;
 
 use structopt::StructOpt;