blob: ddfa0372a3b723d978823bea9a3d4e267abf0506 [file] [log] [blame]
/*
* Linux Socket Filter Data Structures
*/
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
#include <linux/compiler.h>
#include <linux/types.h>
#ifdef __KERNEL__
#include <asm/atomic.h>
#endif
/*
* Current version of the filter code architecture.
*/
#define BPF_MAJOR_VERSION 1
#define BPF_MINOR_VERSION 1
/*
* Try and keep these values and structures similar to BSD, especially
* the BPF code definitions which need to match so you can share filters
*/
struct sock_filter /* Filter block */
{
__u16 code; /* Actual filter code */
__u8 jt; /* Jump true */
__u8 jf; /* Jump false */
__u32 k; /* Generic multiuse field */
};
struct sock_fprog /* Required for SO_ATTACH_FILTER. */
{
unsigned short len; /* Number of filter blocks */
struct sock_filter __user *filter;
};
#ifdef __KERNEL__
struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
struct rcu_head rcu;
struct sock_filter insns[0];
};
static inline unsigned int sk_filter_len(struct sk_filter *fp)
{
return fp->len*sizeof(struct sock_filter) + sizeof(*fp);
}
#endif
/*
* Instruction classes
*/
#define BPF_CLASS(code) ((code) & 0x07)
#define BPF_LD 0x00
#define BPF_LDX 0x01
#define BPF_ST 0x02
#define BPF_STX 0x03
#define BPF_ALU 0x04
#define BPF_JMP 0x05
#define BPF_RET 0x06
#define BPF_MISC 0x07
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
#define BPF_W 0x00
#define BPF_H 0x08
#define BPF_B 0x10
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
#define BPF_IND 0x40
#define BPF_MEM 0x60
#define BPF_LEN 0x80
#define BPF_MSH 0xa0
/* alu/jmp fields */
#define BPF_OP(code) ((code) & 0xf0)
#define BPF_ADD 0x00
#define BPF_SUB 0x10
#define BPF_MUL 0x20
#define BPF_DIV 0x30
#define BPF_OR 0x40
#define BPF_AND 0x50
#define BPF_LSH 0x60
#define BPF_RSH 0x70
#define BPF_NEG 0x80
#define BPF_JA 0x00
#define BPF_JEQ 0x10
#define BPF_JGT 0x20
#define BPF_JGE 0x30
#define BPF_JSET 0x40
#define BPF_SRC(code) ((code) & 0x08)
#define BPF_K 0x00
#define BPF_X 0x08
/* ret - BPF_K and BPF_X also apply */
#define BPF_RVAL(code) ((code) & 0x18)
#define BPF_A 0x10
/* misc */
#define BPF_MISCOP(code) ((code) & 0xf8)
#define BPF_TAX 0x00
#define BPF_TXA 0x80
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif
/*
* Macros for filter block array initializers.
*/
#ifndef BPF_STMT
#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
#endif
#ifndef BPF_JUMP
#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
#endif
/*
* Number of scratch memory words for: BPF_ST and BPF_STX
*/
#define BPF_MEMWORDS 16
/* RATIONALE. Negative offsets are invalid in BPF.
We use them to reference ancillary data.
Unlike introduction new instructions, it does not break
existing compilers/optimizers.
*/
#define SKF_AD_OFF (-0x1000)
#define SKF_AD_PROTOCOL 0
#define SKF_AD_PKTTYPE 4
#define SKF_AD_IFINDEX 8
#define SKF_AD_MAX 12
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)
#ifdef __KERNEL__
struct sk_buff;
struct sock;
extern unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, int flen);
#endif /* __KERNEL__ */
#endif /* __LINUX_FILTER_H__ */