blob: 68f6bef49e6489ecdd28d63ef1bafec6fdc451e4 [file] [log] [blame]
/* XDP example of parsing TTL value of IP-header.
*
* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
*/
#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/if_vlan.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/in.h>
#include <uapi/linux/tcp.h>
#include "bpf_helpers.h"
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
struct bpf_map_def SEC("maps") ttl_map = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(u32), /* u8 does not work?! */
.value_size = sizeof(u64),
.max_entries = 256,
};
//#define DEBUG 1
#ifdef DEBUG
/* Only use this for debug output. Notice output from bpf_trace_printk()
* end-up in /sys/kernel/debug/tracing/trace_pipe
*/
#define bpf_debug(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
#else
#define bpf_debug(fmt, ...) { } while (0)
#endif
/* Parse Ethernet layer 2, extract network layer 3 offset and protocol
*
* Returns false on error and non-supported ether-type
*/
static __always_inline
bool parse_eth(struct ethhdr *eth, void *data_end,
u16 *eth_proto, u64 *l3_offset)
{
u16 eth_type;
u64 offset;
offset = sizeof(*eth);
if ((void *)eth + offset > data_end)
return false;
eth_type = eth->h_proto;
bpf_debug("Debug: eth_type:0x%x\n", ntohs(eth_type));
/* Skip non 802.3 Ethertypes */
if (unlikely(ntohs(eth_type) < ETH_P_802_3_MIN))
return false;
/* Handle VLAN tagged packet */
if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) {
struct vlan_hdr *vlan_hdr;
vlan_hdr = (void *)eth + offset;
offset += sizeof(*vlan_hdr);
if ((void *)eth + offset > data_end)
return false;
eth_type = vlan_hdr->h_vlan_encapsulated_proto;
}
/* TODO: Handle double VLAN tagged packet */
*eth_proto = ntohs(eth_type);
*l3_offset = offset;
return true;
}
static __always_inline
u32 parse_ipv4(struct xdp_md *ctx, u64 l3_offset)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct iphdr *iph = data + l3_offset;
u64 *counter;
u64 total = 0;
u32 ttl; /* type need to match map */
if (iph + 1 > data_end) {
bpf_debug("Invalid IPv4 packet: L3off:%llu\n", l3_offset);
return XDP_ABORTED;
}
/* Extract TTL*/
ttl = iph->ttl;
bpf_debug("Valid IPv4 packet: TTL:%u\n", ttl);
counter = bpf_map_lookup_elem(&ttl_map, &ttl);
if (counter) {
/* Don't need __sync_fetch_and_add(); as percpu map */
*counter += 1;
total = *counter;
}
if (total > 1000000) {
/* Too many packet drop some */
//if (total & 1)
// return XDP_DROP;
}
return XDP_PASS;
}
static __always_inline
u32 handle_eth_protocol(struct xdp_md *ctx, u16 eth_proto, u64 l3_offset)
{
switch (eth_proto) {
case ETH_P_IP:
return parse_ipv4(ctx, l3_offset);
break;
case ETH_P_IPV6: /* Not handler for IPv6 yet*/
case ETH_P_ARP: /* Let OS handle ARP */
/* Fall-through */
default:
bpf_debug("Not handling eth_proto:0x%x\n", eth_proto);
return XDP_PASS;
}
return XDP_PASS;
}
SEC("xdp_ttl")
int xdp_ttl_program(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
u16 eth_proto = 0;
u64 l3_offset = 0;
u32 action;
if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset))) {
bpf_debug("Cannot parse L2: L3off:%llu proto:0x%x\n",
l3_offset, eth_proto);
return XDP_PASS; /* Skip */
}
bpf_debug("Reached L3: L3off:%llu proto:0x%x\n", l3_offset, eth_proto);
/* */
action = handle_eth_protocol(ctx, eth_proto, l3_offset);
return action;
}
char _license[] SEC("license") = "GPL";