#define KBUILD_MODNAME "foo" #include #include #include #include #include #include #include #include #include "bpf_helpers.h" /* compiler workaround */ #define _htonl __builtin_bswap32 static inline void set_dst_mac(struct __sk_buff *skb, char *mac) { bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); } #define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check)) #define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos)) static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) { __u8 old_tos = load_byte(skb, TOS_OFF); bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); } #define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check)) #define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr)) #define IS_PSEUDO 0x10 static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) { __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0); } #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) { __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); } SEC("classifier") int bpf_prog1(struct __sk_buff *skb) { __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); long *value; if (proto == IPPROTO_TCP) { set_ip_tos(skb, 8); set_tcp_ip_src(skb, 0xA010101); set_tcp_dest_port(skb, 5001); } return 0; } SEC("redirect_xmit") int _redirect_xmit(struct __sk_buff *skb) { return bpf_redirect(skb->ifindex + 1, 0); } SEC("redirect_recv") int _redirect_recv(struct __sk_buff *skb) { return bpf_redirect(skb->ifindex + 1, 1); } SEC("clone_redirect_xmit") int _clone_redirect_xmit(struct __sk_buff *skb) { bpf_clone_redirect(skb, skb->ifindex + 1, 0); return TC_ACT_SHOT; } SEC("clone_redirect_recv") int _clone_redirect_recv(struct __sk_buff *skb) { bpf_clone_redirect(skb, skb->ifindex + 1, 1); return TC_ACT_SHOT; } char _license[] SEC("license") = "GPL"; name='id' value='303223092081963513494b4377fa1ac9e362ed4b'/>
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2017-01-23 18:21:58 +0100
committerPablo Neira Ayuso <pablo@netfilter.org>2017-02-02 14:31:55 +0100
commit303223092081963513494b4377fa1ac9e362ed4b (patch)
treee312cf241ada3f96d5844613770ebbeeb152585c
parentc74454fadd5ea6fc866ffe2c417a0dba56b2bf1c (diff)
netfilter: guarantee 8 byte minalign for template addresses
The next change will merge skb->nfct pointer and skb->nfctinfo status bits into single skb->_nfct (unsigned long) area. For this to work nf_conn addresses must always be aligned at least on an 8 byte boundary since we will need the lower 3bits to store nfctinfo. Conntrack templates are allocated via kmalloc. kbuild test robot reported BUILD_BUG_ON failed: NFCT_INFOMASK >= ARCH_KMALLOC_MINALIGN on v1 of this patchset, so not all platforms meet this requirement. Do manual alignment if needed, the alignment offset is stored in the nf_conn entry protocol area. This works because templates are not handed off to L4 protocol trackers. Reported-by: kbuild test robot <fengguang.wu@intel.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--net/netfilter/nf_conntrack_core.c29
2 files changed, 26 insertions, 5 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h