nft: Add black to red codepath
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 1287228..7931cea 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -17,17 +17,13 @@ struct flow_offload;
 enum flow_offload_tuple_dir;
 
 struct nft_bulk_cb {
-	/* This is non-zero if the packet cannot be merged with the new skb. */
-	u16	flush;
-
-	/* Used in ipv6_gro_receive() and foo-over-udp */
-	u16	proto;
-
-	/* This is non-zero if the packet may be of the same flow. */
-	u8	same_flow:1;
-
 	struct sk_buff *last;
 	struct flow_offload_tuple_rhash *tuplehash;
+
+	/* This is non-zero if the packet may be of the same flow. */
+//	u8	same_flow:1;
+
+
 };
 
 #define NFT_BULK_CB(skb) ((struct nft_bulk_cb *)(skb)->cb)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1434912..0ef49bd 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -25,6 +25,7 @@
 #include <net/ip6_fib.h>
 #include <net/flow.h>
 #include <net/gro_cells.h>
+#include <net/netfilter/nf_flow_table.h>
 
 #include <linux/interrupt.h>
 
@@ -578,6 +579,7 @@ int xfrm_unregister_km(struct xfrm_mgr *km);
 
 struct xfrm_tunnel_skb_cb {
 	union {
+		struct nft_bulk_cb ncb;
 		struct inet_skb_parm h4;
 		struct inet6_skb_parm h6;
 	} header;
@@ -655,6 +657,7 @@ struct xfrm_spi_skb_cb {
 	unsigned int daddroff;
 	unsigned int family;
 	__be32 seq;
+	__be32 spi;
 };
 
 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
@@ -1567,6 +1570,7 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
 int xfrm_init_state(struct xfrm_state *x);
 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_list(struct sk_buff **skb, int nexthdr, __be32 spi, int encap_type);
 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 			 int (*finish)(struct net *, struct sock *,
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 80a2b30..b59396c 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -18,6 +18,7 @@
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/xfrm.h>
 #include <net/pkt_sched.h>
+#include <net/esp.h>
 /* For layer 4 checksum field offset. */
 #include <linux/tcp.h>
 #include <linux/udp.h>
@@ -176,7 +177,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
 	struct iphdr *iph;
 
 	if (!pskb_may_pull(skb, sizeof(*iph) + offset))
-		return -1;
+		return 0;
 
 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
 	thoff = (iph->ihl * 4);
@@ -194,15 +195,25 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
 	case IPPROTO_UDP:
 		*hdrsize = sizeof(struct udphdr);
 		break;
+	case IPPROTO_ESP:
+		*hdrsize = sizeof(struct ip_esp_hdr);
+		break;
 	default:
-		return -1;
+		return 0;
 	}
 
 	if (iph->ttl <= 1)
-		return -1;
+		return 0;
 
 	if (!pskb_may_pull(skb, thoff + *hdrsize))
-		return -1;
+		return 0;
+
+	if ((iph->protocol == IPPROTO_ESP)) {
+		skb_pull(skb, offset);
+		skb_reset_network_header(skb);
+		skb_set_transport_header(skb, thoff);
+		return 2;
+	}
 
 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
@@ -216,7 +227,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
 	tuple->iifidx		= dev->ifindex;
 	nf_flow_tuple_encap(skb, tuple);
 
-	return 0;
+	return 1;
 }
 
 /* Based on ip_exceeds_mtu(). */
@@ -326,14 +337,88 @@ static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
 	return NF_STOLEN;
 }
 
+/*
 int nft_bulk_receive_list(struct sk_buff *p, struct sk_buff *skb)
 {
 	NFT_BULK_CB(p)->last->next = skb;
 	NFT_BULK_CB(p)->last = skb;
-	NFT_BULK_CB(skb)->same_flow = 1;
 
 	return 0;
 }
+*/
+
+static int nft_esp_bulk_receive(struct list_head *head, struct sk_buff *skb)
+{
+	const struct iphdr *iph;
+	struct sk_buff *p;
+	struct xfrm_state *x;
+	struct sec_path *sp;
+	__be32 daddr;
+	__be32 spi;
+
+	if (xfrm_offload(skb))
+		return -EINVAL;
+
+	iph = ip_hdr(skb);
+	daddr = iph->daddr;
+
+	BUG_ON(iph->protocol != IPPROTO_ESP);
+
+	spi = ip_esp_hdr(skb)->spi;
+
+	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+	XFRM_SPI_SKB_CB(skb)->seq = ip_esp_hdr(skb)->seq_no;
+	XFRM_SPI_SKB_CB(skb)->spi = spi;
+
+	list_for_each_entry(p, head, list) {
+
+		if (daddr != ip_hdr(p)->daddr) {
+			continue;
+		}
+
+		if (spi != ip_esp_hdr(p)->spi) {
+			continue;
+		}
+
+		goto found;
+	}
+
+	goto out;
+
+found:
+	if (NFT_BULK_CB(p)->last == p)
+		skb_shinfo(p)->frag_list = skb;
+	else
+		NFT_BULK_CB(p)->last->next = skb;
+
+	NFT_BULK_CB(p)->last = skb;
+	skb_pull(skb, sizeof(*iph));
+	/* XXX: Copy or alloc new one? */
+	__skb_ext_copy(skb, p);
+
+	return 0;
+out:
+	/* First skb */
+	NFT_BULK_CB(skb)->last = skb;
+	list_add_tail(&skb->list, head);
+
+	x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+			(xfrm_address_t *)&daddr,
+			spi, IPPROTO_ESP, AF_INET);
+	if (!x)
+		return -ENOENT;
+
+	sp = secpath_set(skb);
+	if (!sp)
+		return -ENOMEM;
+
+	sp->xvec[sp->len++] = x;
+	skb_pull(skb, sizeof(*iph));
+
+	return 0;
+}
+
 
 static void nft_bulk_receive(struct list_head *head, struct sk_buff *skb)
 {
@@ -364,25 +449,19 @@ static void nft_bulk_receive(struct list_head *head, struct sk_buff *skb)
 		struct iphdr *iph2;
 		__be32 daddr2;
 
-		if (!NFT_BULK_CB(p)->same_flow)
-			continue;
-
 		dst2 = skb_dst(p);
 		rt2 = (struct rtable *)dst2;
 		if (dst->dev != dst2->dev) {
-			NFT_BULK_CB(p)->same_flow = 0;
 			continue;
 		}
 
 		iph2 = ip_hdr(p);
 		daddr2 = rt_nexthop(rt2, iph2->daddr);
 		if (daddr != daddr2) {
-			NFT_BULK_CB(p)->same_flow = 0;
 			continue;
 		}
 
 		if (x != dst_xfrm(dst2)) {
-			NFT_BULK_CB(p)->same_flow = 0;
 			continue;
 		}
 
@@ -398,13 +477,11 @@ static void nft_bulk_receive(struct list_head *head, struct sk_buff *skb)
 		NFT_BULK_CB(p)->last->next = skb;
 
 	NFT_BULK_CB(p)->last = skb;
-	NFT_BULK_CB(skb)->same_flow = 1;
 
 	return;
 out:
 	/* First skb */
 	NFT_BULK_CB(skb)->last = skb;
-	NFT_BULK_CB(skb)->same_flow = 1;
 	list_add_tail(&skb->list, head);
 
 	return;
@@ -423,6 +500,7 @@ __nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
 	unsigned int thoff, mtu;
 	struct iphdr *iph;
 	struct dst_entry *dst;
+	int ret;
 
 	skb_reset_network_header(skb);
 	if (!skb_transport_header_was_set(skb))
@@ -433,8 +511,9 @@ __nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset))
 		return 0;
 
-	if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0)
-		return 0;
+	ret = nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset);
+	if (ret != 1)
+		return ret;
 
 	tuplehash = flow_offload_lookup(flow_table, &tuple);
 	if (tuplehash == NULL)
@@ -686,6 +765,7 @@ nf_flow_offload_ip_hook_list(void *priv, struct sk_buff *unused,
 	struct sk_buff *skb, *n;
 	struct list_head bulk_list;
 	struct list_head acc_list;
+	struct list_head esp_list;
 	struct list_head *bulk_head;
 	struct list_head *head = state->skb_list;
 	struct neighbour *neigh;
@@ -695,6 +775,7 @@ nf_flow_offload_ip_hook_list(void *priv, struct sk_buff *unused,
 
 	INIT_LIST_HEAD(&bulk_list);
 	INIT_LIST_HEAD(&acc_list);
+	INIT_LIST_HEAD(&esp_list);
 
 	bulk_head = per_cpu_ptr(flow_table->bulk_list, cpu);
 
@@ -706,12 +787,39 @@ nf_flow_offload_ip_hook_list(void *priv, struct sk_buff *unused,
 			list_add_tail(&skb->list, &acc_list);
 		else if (ret == 1)
 			list_add_tail(&skb->list, &bulk_list);
+		else if (ret == 2)
+			list_add_tail(&skb->list, &esp_list);
 		/* ret == -1: Packet dropped! */
 		else if (ret == -1)
 			kfree_skb(skb);
 
 	}
 
+	list_for_each_entry_safe(skb, n, &esp_list, list) {
+		skb_list_del_init(skb);
+		memset(skb->cb, 0, sizeof(struct nft_bulk_cb));
+		ret = nft_esp_bulk_receive(bulk_head, skb);
+		if (ret)
+			list_add_tail(&skb->list, &acc_list);
+	}
+
+	list_for_each_entry_safe(skb, n, bulk_head, list) {
+
+		list_del_init(&skb->list);
+
+		skb->next = skb_shinfo(skb)->frag_list;
+		skb_shinfo(skb)->frag_list = NULL;
+
+		ret = xfrm_input_list(&skb, IPPROTO_ESP, 0, -2);
+//		if (ret) {
+//			if (ret == 1)
+//				kfree_skb_list(skb);
+//			continue;
+//		}
+	}
+
+	/*XXX: fwd policy check */
+
 	list_splice_init(&acc_list, head);
 
 	list_for_each_entry_safe(skb, n, &bulk_list, list) {
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index a13796f..ac4dc3d 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -256,8 +256,9 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
 
 static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
 {
-	if (skb_sec_path(skb))
-		return true;
+	/* FIXME: */
+//	if (skb_sec_path(skb))
+//		return true;
 
 	if (family == NFPROTO_IPV4) {
 		const struct ip_options *opt;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 3df0861..ab69d7d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -456,128 +456,52 @@ static int xfrm_inner_mode_input(struct xfrm_state *x,
 	return -EOPNOTSUPP;
 }
 
-int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+static int xfrm_input_loop(struct net *net, struct sk_buff *skb, struct xfrm_state *x, __be32 spi, int nexthdr, int encap_type)
 {
 	const struct xfrm_state_afinfo *afinfo;
-	struct net *net = dev_net(skb->dev);
 	const struct xfrm_mode *inner_mode;
-	int err;
+	xfrm_address_t *daddr;
+	unsigned int family;
+	struct sec_path *sp;
 	__be32 seq;
 	__be32 seq_hi;
-	struct xfrm_state *x = NULL;
-	xfrm_address_t *daddr;
-	u32 mark = skb->mark;
-	unsigned int family = AF_UNSPEC;
-	int decaps = 0;
+	int err = 0;
 	int async = 0;
 	bool xfrm_gro = false;
 	bool crypto_done = false;
 	struct xfrm_offload *xo = xfrm_offload(skb);
-	struct sec_path *sp;
 
-	if (encap_type < 0) {
-		x = xfrm_input_state(skb);
-
-		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
-			if (x->km.state == XFRM_STATE_ACQ)
-				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
-			else
-				XFRM_INC_STATS(net,
-					       LINUX_MIB_XFRMINSTATEINVALID);
-
-			if (encap_type == -1)
-				dev_put(skb->dev);
-			goto drop;
-		}
-
+	if (encap_type == -1) {
+		async = 1;
 		family = x->outer_mode.family;
+		seq = XFRM_SKB_CB(skb)->seq.input.low;
+		goto resume;
+	}
+	
+	seq = XFRM_SPI_SKB_CB(skb)->seq;
 
-		/* An encap_type of -1 indicates async resumption. */
-		if (encap_type == -1) {
-			async = 1;
-			seq = XFRM_SKB_CB(skb)->seq.input.low;
-			goto resume;
-		}
-
-		/* encap_type < -1 indicates a GRO call. */
+	/* encap_type < -1 indicates a GRO call. */
+	if (encap_type < -1) {
 		encap_type = 0;
-		seq = XFRM_SPI_SKB_CB(skb)->seq;
-
-		if (xo && (xo->flags & CRYPTO_DONE)) {
-			crypto_done = true;
-			family = XFRM_SPI_SKB_CB(skb)->family;
-
-			if (!(xo->status & CRYPTO_SUCCESS)) {
-				if (xo->status &
-				    (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
-				     CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
-				     CRYPTO_TUNNEL_AH_AUTH_FAILED |
-				     CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
-
-					xfrm_audit_state_icvfail(x, skb,
-								 x->type->proto);
-					x->stats.integrity_failed++;
-					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
-					goto drop;
-				}
-
-				if (xo->status & CRYPTO_INVALID_PROTOCOL) {
-					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
-					goto drop;
-				}
-
-				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
-				goto drop;
-			}
-
-			if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
-				XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
-				goto drop;
-			}
-		}
-
+		family = x->outer_mode.family;
 		goto lock;
 	}
 
-	family = XFRM_SPI_SKB_CB(skb)->family;
-
-	/* if tunnel is present override skb->mark value with tunnel i_key */
-	switch (family) {
-	case AF_INET:
-		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
-			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
-		break;
-	case AF_INET6:
-		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
-			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
-		break;
-	}
-
-	sp = secpath_set(skb);
-	if (!sp) {
-		XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
-		goto drop;
-	}
-
-	seq = 0;
-	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
-		secpath_reset(skb);
-		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
-		goto drop;
-	}
-
 	daddr = (xfrm_address_t *)(skb_network_header(skb) +
 				   XFRM_SPI_SKB_CB(skb)->daddroff);
+
+	family = XFRM_SPI_SKB_CB(skb)->family;
+
 	do {
 		sp = skb_sec_path(skb);
 
-		if (sp->len == XFRM_MAX_DEPTH) {
+		if (sp && sp->len == XFRM_MAX_DEPTH) {
 			secpath_reset(skb);
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
 			goto drop;
 		}
 
-		x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
+		x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
 		if (x == NULL) {
 			secpath_reset(skb);
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
@@ -587,7 +511,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
 		skb->mark = xfrm_smark_get(skb->mark, x);
 
-		sp->xvec[sp->len++] = x;
+		if (sp)
+			sp->xvec[sp->len++] = x;
 
 		skb_dst_force(skb);
 		if (!skb_dst(skb)) {
@@ -689,10 +614,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 			goto drop;
 		}
 
-		if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
-			decaps = 1;
+		if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)
 			break;
-		}
 
 		/*
 		 * We need the inner address.  However, we only get here for
@@ -715,12 +638,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
 	nf_reset_ct(skb);
 
-	if (decaps) {
+	if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
 		sp = skb_sec_path(skb);
 		if (sp)
 			sp->olen = 0;
 		skb_dst_drop(skb);
-		gro_cells_receive(&gro_cells, skb);
 		return 0;
 	} else {
 		xo = xfrm_offload(skb);
@@ -738,16 +660,249 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 			if (sp)
 				sp->olen = 0;
 			skb_dst_drop(skb);
-			gro_cells_receive(&gro_cells, skb);
 			return err;
 		}
-
-		return err;
 	}
 
+	return 0;
+
 drop_unlock:
 	spin_unlock(&x->lock);
 drop:
+	return -EINVAL;
+}
+int xfrm_input_list(struct sk_buff **skbp, int nexthdr, __be32 spi, int encap_type)
+{
+	struct sk_buff *skb2, *nskb, *pskb = NULL;
+	struct sk_buff *skb = *skbp;
+	struct net *net = dev_net(skb->dev);
+	int err;
+	__be32 seq;
+	struct xfrm_state *x = NULL;
+	unsigned int family = AF_UNSPEC;
+	bool crypto_done = false;
+	struct xfrm_offload *xo = xfrm_offload(skb);
+	struct list_head head;
+
+	x = xfrm_input_state(skb);
+
+	if (unlikely(x->km.state != XFRM_STATE_VALID)) {
+		if (x->km.state == XFRM_STATE_ACQ)
+			XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+		else
+			XFRM_INC_STATS(net,
+				       LINUX_MIB_XFRMINSTATEINVALID);
+
+		if (encap_type == -1)
+			dev_put(skb->dev);
+		goto drop;
+	}
+
+	family = x->outer_mode.family;
+
+	/* An encap_type of -1 indicates async resumption. */
+	if (encap_type == -1) {
+		seq = XFRM_SKB_CB(skb)->seq.input.low;
+		goto loop;
+	}
+
+	seq = XFRM_SPI_SKB_CB(skb)->seq;
+
+	if (xo && (xo->flags & CRYPTO_DONE)) {
+		crypto_done = true;
+		family = XFRM_SPI_SKB_CB(skb)->family;
+
+		if (!(xo->status & CRYPTO_SUCCESS)) {
+			if (xo->status &
+			    (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
+			     CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
+			     CRYPTO_TUNNEL_AH_AUTH_FAILED |
+			     CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
+
+				xfrm_audit_state_icvfail(x, skb,
+							 x->type->proto);
+				x->stats.integrity_failed++;
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
+				goto drop;
+			}
+
+		if (xo->status & CRYPTO_INVALID_PROTOCOL) {
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
+				goto drop;
+			}
+
+			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+			goto drop;
+		}
+
+		/* XXX: Do we need this here? */
+		if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
+			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+			goto drop;
+		}
+	}
+
+	family = XFRM_SPI_SKB_CB(skb)->family;
+
+	/* HERE */
+loop:
+	INIT_LIST_HEAD(&head);
+	skb_list_walk_safe(skb, skb2, nskb) {
+
+
+		skb_mark_not_on_list(skb2);
+
+		err = xfrm_input_loop(net, skb2, x, nexthdr, spi, encap_type);
+//		if (!err) {
+//			skb2->next = nskb;
+//		} else if (err != -EINPROGRESS) {
+//			skb2->next = nskb;
+//			kfree_skb_list(skb2);
+//			return err;
+//		} else {
+//			if (skb == skb2)
+//				skb = nskb;
+//			else
+//				pskb->next = nskb;
+//
+//			continue;
+//		}
+//
+//		pskb = skb2;
+
+		if (err) {
+			if (err != -EINPROGRESS)
+				kfree_skb(skb2);
+			continue;
+		}
+
+		list_add_tail(&skb2->list, &head);
+	}
+
+	/* XXX: Recursive call! */
+	netif_receive_skb_list(&head);
+	return 0;
+
+drop:
+	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
+	kfree_skb_list(skb);
+	return 0;
+}
+EXPORT_SYMBOL(xfrm_input_list);
+
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+{
+	struct net *net = dev_net(skb->dev);
+	int err;
+	__be32 seq;
+	struct xfrm_state *x = NULL;
+	u32 mark = skb->mark;
+	unsigned int family = AF_UNSPEC;
+	int async = 0;
+	bool crypto_done = false;
+	struct xfrm_offload *xo = xfrm_offload(skb);
+	struct sec_path *sp;
+
+	if (encap_type < 0) {
+		x = xfrm_input_state(skb);
+
+		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
+			if (x->km.state == XFRM_STATE_ACQ)
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+			else
+				XFRM_INC_STATS(net,
+					       LINUX_MIB_XFRMINSTATEINVALID);
+
+			if (encap_type == -1)
+				dev_put(skb->dev);
+			goto drop;
+		}
+
+		family = x->outer_mode.family;
+
+		/* An encap_type of -1 indicates async resumption. */
+		if (encap_type == -1) {
+			async = 1;
+			seq = XFRM_SKB_CB(skb)->seq.input.low;
+			goto loop;
+		}
+
+		seq = XFRM_SPI_SKB_CB(skb)->seq;
+
+		if (xo && (xo->flags & CRYPTO_DONE)) {
+			crypto_done = true;
+			family = XFRM_SPI_SKB_CB(skb)->family;
+
+			if (!(xo->status & CRYPTO_SUCCESS)) {
+				if (xo->status &
+				    (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
+				     CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
+				     CRYPTO_TUNNEL_AH_AUTH_FAILED |
+				     CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
+
+					xfrm_audit_state_icvfail(x, skb,
+								 x->type->proto);
+					x->stats.integrity_failed++;
+					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
+					goto drop;
+				}
+
+				if (xo->status & CRYPTO_INVALID_PROTOCOL) {
+					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
+					goto drop;
+				}
+
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+				goto drop;
+			}
+
+			if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+				goto drop;
+			}
+		}
+
+		goto loop;
+	}
+
+	family = XFRM_SPI_SKB_CB(skb)->family;
+
+	/* if tunnel is present override skb->mark value with tunnel i_key */
+	switch (family) {
+	case AF_INET:
+		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
+			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
+		break;
+	case AF_INET6:
+		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
+			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
+		break;
+	}
+
+	sp = secpath_set(skb);
+	if (!sp) {
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+		goto drop;
+	}
+
+	seq = 0;
+	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
+		secpath_reset(skb);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+		goto drop;
+	}
+
+	XFRM_SPI_SKB_CB(skb)->seq = seq;
+loop:
+	err = xfrm_input_loop(net, skb, x, spi, nexthdr, encap_type);
+	if (err)
+		goto drop;
+
+	gro_cells_receive(&gro_cells, skb);
+
+	return err;
+
+drop:
 	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
 	kfree_skb(skb);
 	return 0;