Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 41354f7..26efd52 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -7,7 +7,10 @@
    - PCS registers
 - interrupt-parent: Should be the phandle for the interrupt controller
   that services interrupts for this device
-- interrupts: Should contain the amd-xgbe interrupt
+- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
+  listed is required and is the general device interrupt. If the optional
+  amd,per-channel-interrupt property is specified, then one additional
+  interrupt for each DMA channel supported by the device should be specified
 - clocks:
    - DMA clock for the amd-xgbe device (used for calculating the
      correct Rx interrupt watchdog timer value on a DMA channel
@@ -23,6 +26,9 @@
 - mac-address: mac address to be assigned to the device. Can be overridden
   by UEFI.
 - dma-coherent: Present if dma operations are coherent
+- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
+  a unique interrupt for each DMA channel - this requires an additional
+  interrupt be configured for each DMA channel
 
 Example:
 	xgbe@e0700000 {
@@ -30,7 +36,9 @@
 		reg = <0 0xe0700000 0 0x80000>,
 		      <0 0xe0780000 0 0x80000>;
 		interrupt-parent = <&gic>;
-		interrupts = <0 325 4>;
+		interrupts = <0 325 4>,
+			     <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
+		amd,per-channel-interrupt;
 		clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
 		clock-names = "dma_clk", "ptp_clk";
 		phy-handle = <&phy>;
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index a62c889..e124847 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -10,7 +10,7 @@
 - dsa,ethernet		: Should be a phandle to a valid Ethernet device node
 - dsa,mii-bus		: Should be a phandle to a valid MDIO bus device node
 
-Optionnal properties:
+Optional properties:
 - interrupts		: property with a value describing the switch
 			  interrupt number (not supported by the driver)
 
@@ -23,6 +23,13 @@
 - #address-cells	: Must be 1
 - #size-cells		: Must be 0
 
+A switch child node has the following optional property:
+
+- eeprom-length		: Set to the length of an EEPROM connected to the
+			  switch. Must be set if the switch can not detect
+			  the presence and/or size of a connected EEPROM,
+			  otherwise optional.
+
 A switch may have multiple "port" children nodes
 
 Each port children node must have the following mandatory properties:
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index eeb5b2e..83bf498 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -2230,11 +2230,8 @@
 
 	It is possible to adjust TCP/IP's congestion limits by
 	altering the net.ipv4.tcp_reordering sysctl parameter.  The
-	usual default value is 3, and the maximum useful value is 127.
-	For a four interface balance-rr bond, expect that a single
-	TCP/IP stream will utilize no more than approximately 2.3
-	interface's worth of throughput, even after adjusting
-	tcp_reordering.
+	usual default value is 3. But keep in mind TCP stack is able
+	to automatically increase this when it detects reorders.
 
 	Note that the fraction of packets that will be delivered out of
 	order is highly variable, and is unlikely to be zero.  The level
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index a476b08..9bffdfc 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -383,9 +383,17 @@
 	may consume significant resources. Cf. tcp_max_orphans.
 
 tcp_reordering - INTEGER
-	Maximal reordering of packets in a TCP stream.
+	Initial reordering level of packets in a TCP stream.
+	TCP stack can then dynamically adjust flow reordering level
+	between this initial value and tcp_max_reordering
 	Default: 3
 
+tcp_max_reordering - INTEGER
+	Maximal reordering level of packets in a TCP stream.
+	300 is a fairly conservative value, but you might increase it
+	if paths are using per packet load balancing (like bonding rr mode)
+	Default: 300
+
 tcp_retrans_collapse - BOOLEAN
 	Bug-to-bug compatibility with some broken printers.
 	On retransmit try to send bigger packets to work around bugs in
@@ -1466,6 +1474,19 @@
 	1 - (default) discard fragmented neighbor discovery packets
 	0 - allow fragmented neighbor discovery packets
 
+optimistic_dad - BOOLEAN
+	Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
+		0: disabled (default)
+		1: enabled
+
+use_optimistic - BOOLEAN
+	If enabled, do not classify optimistic addresses as deprecated during
+	source address selection.  Preferred addresses will still be chosen
+	before optimistic addresses, subject to other ranking in the source
+	address selection algorithm.
+		0: disabled (default)
+		1: enabled
+
 icmp/*:
 ratelimit - INTEGER
 	Limit the maximal rates for sending ICMPv6 packets.
diff --git a/MAINTAINERS b/MAINTAINERS
index 1cfabdd..3a41fb0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5860,6 +5860,11 @@
 S:	Maintained
 F:	drivers/gpu/drm/armada/
 
+MARVELL 88E6352 DSA support
+M:	Guenter Roeck <linux@roeck-us.net>
+S:	Maintained
+F:	drivers/net/dsa/mv88e6352.c
+
 MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
 M:	Mirko Lindner <mlindner@marvell.com>
 M:	Stephen Hemminger <stephen@networkplumber.org>
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d81b247..22762a1 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -158,6 +158,7 @@
 	.port_number = 0,
 	.phy_addr    = 0,
 	.speed       = 0, /* Autonagotiation */
+	.intf        = PHY_INTERFACE_MODE_RMII,
 	.init        = gplugd_eth_init,
 };
 
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6f85362..1a52877 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -204,6 +204,7 @@
 #define PPC_INST_ERATSX_DOT		0x7c000127
 
 /* Misc instructions for BPF compiler */
+#define PPC_INST_LBZ			0x88000000
 #define PPC_INST_LD			0xe8000000
 #define PPC_INST_LHZ			0xa0000000
 #define PPC_INST_LHBRX			0x7c00062c
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 9aee27c..c406aa9 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -87,6 +87,9 @@
 #define PPC_STD(r, base, i)	EMIT(PPC_INST_STD | ___PPC_RS(r) |	      \
 				     ___PPC_RA(base) | ((i) & 0xfffc))
 
+
+#define PPC_LBZ(r, base, i)	EMIT(PPC_INST_LBZ | ___PPC_RT(r) |	      \
+				     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LD(r, base, i)	EMIT(PPC_INST_LD | ___PPC_RT(r) |	      \
 				     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LWZ(r, base, i)	EMIT(PPC_INST_LWZ | ___PPC_RT(r) |	      \
@@ -96,6 +99,10 @@
 #define PPC_LHBRX(r, base, b)	EMIT(PPC_INST_LHBRX | ___PPC_RT(r) |	      \
 				     ___PPC_RA(base) | ___PPC_RB(b))
 /* Convenience helpers for the above with 'far' offsets: */
+#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i);   \
+		else {	PPC_ADDIS(r, base, IMM_HA(i));			      \
+			PPC_LBZ(r, r, IMM_L(i)); } } while(0)
+
 #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i);     \
 		else {	PPC_ADDIS(r, base, IMM_HA(i));			      \
 			PPC_LD(r, r, IMM_L(i)); } } while(0)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index cbae2df..d110e28 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -407,6 +407,11 @@
 			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 							  queue_mapping));
 			break;
+		case BPF_ANC | SKF_AD_PKTTYPE:
+			PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
+			PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
+			PPC_SRWI(r_A, r_A, 5);
+			break;
 		case BPF_ANC | SKF_AD_CPU:
 #ifdef CONFIG_SMP
 			/*
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
index e8c58f8..a881d78 100644
--- a/arch/x86/include/asm/hash.h
+++ b/arch/x86/include/asm/hash.h
@@ -1,7 +1,48 @@
-#ifndef _ASM_X86_HASH_H
-#define _ASM_X86_HASH_H
+#ifndef __ASM_X86_HASH_H
+#define __ASM_X86_HASH_H
 
-struct fast_hash_ops;
-extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
+#include <linux/cpufeature.h>
+#include <asm/alternative.h>
 
-#endif /* _ASM_X86_HASH_H */
+u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed);
+u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed);
+
+/*
+ * non-inline versions of jhash so gcc does not need to generate
+ * duplicate code in every object file
+ */
+u32 __jhash(const void *data, u32 len, u32 seed);
+u32 __jhash2(const u32 *data, u32 len, u32 seed);
+
+/*
+ * for documentation of these functions please look into
+ * <include/asm-generic/hash.h>
+ */
+
+static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed)
+{
+	u32 hash;
+
+	alternative_call(__jhash, __intel_crc4_2_hash, X86_FEATURE_XMM4_2,
+#ifdef CONFIG_X86_64
+			 "=a" (hash), "D" (data), "S" (len), "d" (seed));
+#else
+			 "=a" (hash), "a" (data), "d" (len), "c" (seed));
+#endif
+	return hash;
+}
+
+static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
+{
+	u32 hash;
+
+	alternative_call(__jhash2, __intel_crc4_2_hash2, X86_FEATURE_XMM4_2,
+#ifdef CONFIG_X86_64
+			 "=a" (hash), "D" (data), "S" (len), "d" (seed));
+#else
+			 "=a" (hash), "a" (data), "d" (len), "c" (seed));
+#endif
+	return hash;
+}
+
+#endif /* __ASM_X86_HASH_H */
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
index ff4fa51..e143271 100644
--- a/arch/x86/lib/hash.c
+++ b/arch/x86/lib/hash.c
@@ -31,13 +31,13 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <linux/hash.h>
-#include <linux/init.h>
-
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
 #include <asm/hash.h>
 
+#include <linux/hash.h>
+#include <linux/jhash.h>
+
 static inline u32 crc32_u32(u32 crc, u32 val)
 {
 #ifdef CONFIG_AS_CRC32
@@ -48,7 +48,7 @@
 	return crc;
 }
 
-static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
+u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed)
 {
 	const u32 *p32 = (const u32 *) data;
 	u32 i, tmp = 0;
@@ -71,22 +71,27 @@
 
 	return seed;
 }
+EXPORT_SYMBOL(__intel_crc4_2_hash);
 
-static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
+u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
 {
-	const u32 *p32 = (const u32 *) data;
 	u32 i;
 
 	for (i = 0; i < len; i++)
-		seed = crc32_u32(seed, *p32++);
+		seed = crc32_u32(seed, *data++);
 
 	return seed;
 }
+EXPORT_SYMBOL(__intel_crc4_2_hash2);
 
-void __init setup_arch_fast_hash(struct fast_hash_ops *ops)
+u32 __jhash(const void *data, u32 len, u32 seed)
 {
-	if (cpu_has_xmm4_2) {
-		ops->hash  = intel_crc4_2_hash;
-		ops->hash2 = intel_crc4_2_hash2;
-	}
+	return jhash(data, len, seed);
 }
+EXPORT_SYMBOL(__jhash);
+
+u32 __jhash2(const u32 *data, u32 len, u32 seed)
+{
+	return jhash2(data, len, seed);
+}
+EXPORT_SYMBOL(__jhash2);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 1be8228..dcbd858 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -163,7 +163,7 @@
 	memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
 	       MISDN_HEADER_LEN);
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	mISDN_sock_cmsg(sk, msg, skb);
 
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index d2eadab..baa58e7 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1326,7 +1326,7 @@
 	}
 
 	/* no suitable interface, frame not sent */
-	dev_kfree_skb_any(skb);
+	bond_tx_drop(bond->dev, skb);
 out:
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c9ac06c..c752008 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3522,7 +3522,7 @@
 		}
 	}
 	/* no slave that can tx has been found */
-	dev_kfree_skb_any(skb);
+	bond_tx_drop(bond->dev, skb);
 }
 
 /**
@@ -3584,7 +3584,7 @@
 			slave_id = bond_rr_gen_slave_id(bond);
 			bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
 		} else {
-			dev_kfree_skb_any(skb);
+			bond_tx_drop(bond_dev, skb);
 		}
 	}
 
@@ -3603,7 +3603,7 @@
 	if (slave)
 		bond_dev_queue_xmit(bond, skb, slave->dev);
 	else
-		dev_kfree_skb_any(skb);
+		bond_tx_drop(bond_dev, skb);
 
 	return NETDEV_TX_OK;
 }
@@ -3747,8 +3747,7 @@
 		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
 		bond_dev_queue_xmit(bond, skb, slave->dev);
 	} else {
-		dev_kfree_skb_any(skb);
-		atomic_long_inc(&dev->tx_dropped);
+		bond_tx_drop(dev, skb);
 	}
 
 	return NETDEV_TX_OK;
@@ -3778,7 +3777,7 @@
 	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
 		bond_dev_queue_xmit(bond, skb, slave->dev);
 	else
-		dev_kfree_skb_any(skb);
+		bond_tx_drop(bond_dev, skb);
 
 	return NETDEV_TX_OK;
 }
@@ -3858,7 +3857,7 @@
 		/* Should never happen, mode already checked */
 		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
 		WARN_ON_ONCE(1);
-		dev_kfree_skb_any(skb);
+		bond_tx_drop(dev, skb);
 		return NETDEV_TX_OK;
 	}
 }
@@ -3878,7 +3877,7 @@
 	if (bond_has_slaves(bond))
 		ret = __bond_start_xmit(skb, dev);
 	else
-		dev_kfree_skb_any(skb);
+		bond_tx_drop(dev, skb);
 	rcu_read_unlock();
 
 	return ret;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 10920f0..bfb0b51 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -645,4 +645,10 @@
 /* exported from bond_netlink.c */
 extern struct rtnl_link_ops bond_link_ops;
 
+static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+{
+	atomic_long_inc(&dev->tx_dropped);
+	dev_kfree_skb_any(skb);
+}
+
 #endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 9234d80..7cf8f4a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,13 +37,22 @@
 	  ethernet switch chips.
 
 config NET_DSA_MV88E6171
-	tristate "Marvell 88E6171 ethernet switch chip support"
+	tristate "Marvell 88E6171/6172 ethernet switch chip support"
 	select NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
-	  This enables support for the Marvell 88E6171 ethernet switch
-	  chip.
+	  This enables support for the Marvell 88E6171/6172 ethernet switch
+	  chips.
+
+config NET_DSA_MV88E6352
+	tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+	select NET_DSA
+	select NET_DSA_MV88E6XXX
+	select NET_DSA_TAG_EDSA
+	---help---
+	  This enables support for the Marvell 88E6176 and 88E6352 ethernet
+	  switch chips.
 
 config NET_DSA_BCM_SF2
 	tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 23a90de..e2d51c4 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -7,6 +7,9 @@
 ifdef CONFIG_NET_DSA_MV88E6131
 mv88e6xxx_drv-y += mv88e6131.o
 endif
+ifdef CONFIG_NET_DSA_MV88E6352
+mv88e6xxx_drv-y += mv88e6352.o
+endif
 ifdef CONFIG_NET_DSA_MV88E6171
 mv88e6xxx_drv-y += mv88e6171.o
 endif
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 05b0ca3..c29aebe 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -69,8 +69,11 @@
 
 	ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
 	if (ret >= 0) {
-		ret &= 0xfff0;
 		if (ret == 0x0600)
+			return "Marvell 88E6060 (A0)";
+		if (ret == 0x0601 || ret == 0x0602)
+			return "Marvell 88E6060 (B0)";
+		if ((ret & 0xfff0) == 0x0600)
 			return "Marvell 88E6060";
 	}
 
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index a332c53..9a3f9e0 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -291,6 +291,54 @@
 	return 0;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int  mv88e6123_61_65_get_temp(struct dsa_switch *ds, int *temp)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+	int val;
+
+	*temp = 0;
+
+	mutex_lock(&ps->phy_mutex);
+
+	ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+	if (ret < 0)
+		goto error;
+
+	/* Enable temperature sensor */
+	ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+	if (ret < 0)
+		goto error;
+
+	/* Wait for temperature to stabilize */
+	usleep_range(10000, 12000);
+
+	val = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (val < 0) {
+		ret = val;
+		goto error;
+	}
+
+	/* Disable temperature sensor */
+	ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+	if (ret < 0)
+		goto error;
+
+	*temp = ((val & 0x1f) - 5) * 5;
+
+error:
+	mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+	mutex_unlock(&ps->phy_mutex);
+	return ret;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -299,6 +347,7 @@
 
 	mutex_init(&ps->smi_mutex);
 	mutex_init(&ps->stats_mutex);
+	mutex_init(&ps->phy_mutex);
 
 	ret = mv88e6123_61_65_switch_reset(ds);
 	if (ret < 0)
@@ -329,16 +378,28 @@
 static int
 mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int addr = mv88e6123_61_65_port_to_phy_addr(port);
-	return mv88e6xxx_phy_read(ds, addr, regnum);
+	int ret;
+
+	mutex_lock(&ps->phy_mutex);
+	ret = mv88e6xxx_phy_read(ds, addr, regnum);
+	mutex_unlock(&ps->phy_mutex);
+	return ret;
 }
 
 static int
 mv88e6123_61_65_phy_write(struct dsa_switch *ds,
 			      int port, int regnum, u16 val)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int addr = mv88e6123_61_65_port_to_phy_addr(port);
-	return mv88e6xxx_phy_write(ds, addr, regnum, val);
+	int ret;
+
+	mutex_lock(&ps->phy_mutex);
+	ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
+	mutex_unlock(&ps->phy_mutex);
+	return ret;
 }
 
 static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
@@ -372,6 +433,9 @@
 	{ "hist_256_511bytes", 4, 0x0b, },
 	{ "hist_512_1023bytes", 4, 0x0c, },
 	{ "hist_1024_max_bytes", 4, 0x0d, },
+	{ "sw_in_discards", 4, 0x110, },
+	{ "sw_in_filtered", 2, 0x112, },
+	{ "sw_out_filtered", 2, 0x113, },
 };
 
 static void
@@ -406,6 +470,11 @@
 	.get_strings		= mv88e6123_61_65_get_strings,
 	.get_ethtool_stats	= mv88e6123_61_65_get_ethtool_stats,
 	.get_sset_count		= mv88e6123_61_65_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+	.get_temp		= mv88e6123_61_65_get_temp,
+#endif
+	.get_regs_len		= mv88e6xxx_get_regs_len,
+	.get_regs		= mv88e6xxx_get_regs,
 };
 
 MODULE_ALIAS("platform:mv88e6123");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 244c735..1230f52 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -21,6 +21,7 @@
 #define ID_6085		0x04a0
 #define ID_6095		0x0950
 #define ID_6131		0x1060
+#define ID_6131_B2	0x1066
 
 static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
 {
@@ -32,12 +33,15 @@
 
 	ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
 	if (ret >= 0) {
-		ret &= 0xfff0;
-		if (ret == ID_6085)
+		int ret_masked = ret & 0xfff0;
+
+		if (ret_masked == ID_6085)
 			return "Marvell 88E6085";
-		if (ret == ID_6095)
+		if (ret_masked == ID_6095)
 			return "Marvell 88E6095/88E6095F";
-		if (ret == ID_6131)
+		if (ret == ID_6131_B2)
+			return "Marvell 88E6131 (B2)";
+		if (ret_masked == ID_6131)
 			return "Marvell 88E6131";
 	}
 
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 78d8e87..537eeed 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
  * Copyright (c) 2008-2009 Marvell Semiconductor
  * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
  *
@@ -29,6 +29,8 @@
 	if (ret >= 0) {
 		if ((ret & 0xfff0) == 0x1710)
 			return "Marvell 88E6171";
+		if ((ret & 0xfff0) == 0x1720)
+			return "Marvell 88E6172";
 	}
 
 	return NULL;
@@ -409,3 +411,4 @@
 };
 
 MODULE_ALIAS("platform:mv88e6171");
+MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
new file mode 100644
index 0000000..258d9ef
--- /dev/null
+++ b/drivers/net/dsa/mv88e6352.c
@@ -0,0 +1,788 @@
+/*
+ * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * Derived from mv88e6123_61_65.c
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
+{
+	unsigned long timeout = jiffies + HZ / 10;
+
+	while (time_before(jiffies, timeout)) {
+		int ret;
+
+		ret = REG_READ(REG_GLOBAL2, reg);
+		if (ret < 0)
+			return ret;
+
+		if (!(ret & mask))
+			return 0;
+
+		usleep_range(1000, 2000);
+	}
+	return -ETIMEDOUT;
+}
+
+static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
+{
+	return mv88e6352_wait(ds, 0x18, 0x8000);
+}
+
+static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
+{
+	return mv88e6352_wait(ds, 0x14, 0x0800);
+}
+
+static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
+{
+	return mv88e6352_wait(ds, 0x14, 0x8000);
+}
+
+static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
+{
+	int ret;
+
+	REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
+
+	ret = mv88e6352_phy_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	return REG_READ(REG_GLOBAL2, 0x19);
+}
+
+static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum,
+				 u16 val)
+{
+	REG_WRITE(REG_GLOBAL2, 0x19, val);
+	REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
+
+	return mv88e6352_phy_wait(ds);
+}
+
+static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
+{
+	struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+	int ret;
+
+	if (bus == NULL)
+		return NULL;
+
+	ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+	if (ret >= 0) {
+		if ((ret & 0xfff0) == 0x1760)
+			return "Marvell 88E6176";
+		if (ret == 0x3521)
+			return "Marvell 88E6352 (A0)";
+		if (ret == 0x3522)
+			return "Marvell 88E6352 (A1)";
+		if ((ret & 0xfff0) == 0x3520)
+			return "Marvell 88E6352";
+	}
+
+	return NULL;
+}
+
+static int mv88e6352_switch_reset(struct dsa_switch *ds)
+{
+	unsigned long timeout;
+	int ret;
+	int i;
+
+	/* Set all ports to the disabled state. */
+	for (i = 0; i < 7; i++) {
+		ret = REG_READ(REG_PORT(i), 0x04);
+		REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+	}
+
+	/* Wait for transmit queues to drain. */
+	usleep_range(2000, 4000);
+
+	/* Reset the switch. Keep PPU active (bit 14, undocumented).
+	 * The PPU needs to be active to support indirect phy register
+	 * accesses through global registers 0x18 and 0x19.
+	 */
+	REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+
+	/* Wait up to one second for reset to complete. */
+	timeout = jiffies + 1 * HZ;
+	while (time_before(jiffies, timeout)) {
+		ret = REG_READ(REG_GLOBAL, 0x00);
+		if ((ret & 0x8800) == 0x8800)
+			break;
+		usleep_range(1000, 2000);
+	}
+	if (time_after(jiffies, timeout))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int mv88e6352_setup_global(struct dsa_switch *ds)
+{
+	int ret;
+	int i;
+
+	/* Discard packets with excessive collisions,
+	 * mask all interrupt sources, enable PPU (bit 14, undocumented).
+	 */
+	REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
+
+	/* Set the default address aging time to 5 minutes, and
+	 * enable address learn messages to be sent to all message
+	 * ports.
+	 */
+	REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+	/* Configure the priority mapping registers. */
+	ret = mv88e6xxx_config_prio(ds);
+	if (ret < 0)
+		return ret;
+
+	/* Configure the upstream port, and configure the upstream
+	 * port as the port to which ingress and egress monitor frames
+	 * are to be sent.
+	 */
+	REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+
+	/* Disable remote management for now, and set the switch's
+	 * DSA device number.
+	 */
+	REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
+
+	/* Send all frames with destination addresses matching
+	 * 01:80:c2:00:00:2x to the CPU port.
+	 */
+	REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
+
+	/* Send all frames with destination addresses matching
+	 * 01:80:c2:00:00:0x to the CPU port.
+	 */
+	REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+	/* Disable the loopback filter, disable flow control
+	 * messages, disable flood broadcast override, disable
+	 * removing of provider tags, disable ATU age violation
+	 * interrupts, disable tag flow control, force flow
+	 * control priority to the highest, and send all special
+	 * multicast frames to the CPU at the highest priority.
+	 */
+	REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+	/* Program the DSA routing table. */
+	for (i = 0; i < 32; i++) {
+		int nexthop = 0x1f;
+
+		if (i != ds->index && i < ds->dst->pd->nr_chips)
+			nexthop = ds->pd->rtable[i] & 0x1f;
+
+		REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+	}
+
+	/* Clear all trunk masks. */
+	for (i = 0; i < 8; i++)
+		REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
+
+	/* Clear all trunk mappings. */
+	for (i = 0; i < 16; i++)
+		REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+	/* Disable ingress rate limiting by resetting all ingress
+	 * rate limit registers to their initial state.
+	 */
+	for (i = 0; i < 7; i++)
+		REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
+
+	/* Initialise cross-chip port VLAN table to reset defaults. */
+	REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
+
+	/* Clear the priority override table. */
+	for (i = 0; i < 16; i++)
+		REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
+
+	/* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+
+	return 0;
+}
+
+static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
+{
+	int addr = REG_PORT(p);
+	u16 val;
+
+	/* MAC Forcing register: don't force link, speed, duplex
+	 * or flow control state to any particular values on physical
+	 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+	 * full duplex.
+	 */
+	if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+		REG_WRITE(addr, 0x01, 0x003e);
+	else
+		REG_WRITE(addr, 0x01, 0x0003);
+
+	/* Do not limit the period of time that this port can be
+	 * paused for by the remote end or the period of time that
+	 * this port can pause the remote end.
+	 */
+	REG_WRITE(addr, 0x02, 0x0000);
+
+	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+	 * tunneling, determine priority by looking at 802.1p and IP
+	 * priority fields (IP prio has precedence), and set STP state
+	 * to Forwarding.
+	 *
+	 * If this is the CPU link, use DSA or EDSA tagging depending
+	 * on which tagging mode was configured.
+	 *
+	 * If this is a link to another switch, use DSA tagging mode.
+	 *
+	 * If this is the upstream port for this switch, enable
+	 * forwarding of unknown unicasts and multicasts.
+	 */
+	val = 0x0433;
+	if (dsa_is_cpu_port(ds, p)) {
+		if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+			val |= 0x3300;
+		else
+			val |= 0x0100;
+	}
+	if (ds->dsa_port_mask & (1 << p))
+		val |= 0x0100;
+	if (p == dsa_upstream_port(ds))
+		val |= 0x000c;
+	REG_WRITE(addr, 0x04, val);
+
+	/* Port Control 1: disable trunking.  Also, if this is the
+	 * CPU port, enable learn messages to be sent to this port.
+	 */
+	REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+	/* Port based VLAN map: give each port its own address
+	 * database, allow the CPU port to talk to each of the 'real'
+	 * ports, and allow each of the 'real' ports to only talk to
+	 * the upstream port.
+	 */
+	val = (p & 0xf) << 12;
+	if (dsa_is_cpu_port(ds, p))
+		val |= ds->phys_port_mask;
+	else
+		val |= 1 << dsa_upstream_port(ds);
+	REG_WRITE(addr, 0x06, val);
+
+	/* Default VLAN ID and priority: don't set a default VLAN
+	 * ID, and set the default packet priority to zero.
+	 */
+	REG_WRITE(addr, 0x07, 0x0000);
+
+	/* Port Control 2: don't force a good FCS, set the maximum
+	 * frame size to 10240 bytes, don't let the switch add or
+	 * strip 802.1q tags, don't discard tagged or untagged frames
+	 * on this port, do a destination address lookup on all
+	 * received packets as usual, disable ARP mirroring and don't
+	 * send a copy of all transmitted/received frames on this port
+	 * to the CPU.
+	 */
+	REG_WRITE(addr, 0x08, 0x2080);
+
+	/* Egress rate control: disable egress rate control. */
+	REG_WRITE(addr, 0x09, 0x0001);
+
+	/* Egress rate control 2: disable egress rate control. */
+	REG_WRITE(addr, 0x0a, 0x0000);
+
+	/* Port Association Vector: when learning source addresses
+	 * of packets, add the address to the address database using
+	 * a port bitmap that has only the bit for this port set and
+	 * the other bits clear.
+	 */
+	REG_WRITE(addr, 0x0b, 1 << p);
+
+	/* Port ATU control: disable limiting the number of address
+	 * database entries that this port is allowed to use.
+	 */
+	REG_WRITE(addr, 0x0c, 0x0000);
+
+	/* Priority Override: disable DA, SA and VTU priority override. */
+	REG_WRITE(addr, 0x0d, 0x0000);
+
+	/* Port Ethertype: use the Ethertype DSA Ethertype value. */
+	REG_WRITE(addr, 0x0f, ETH_P_EDSA);
+
+	/* Tag Remap: use an identity 802.1p prio -> switch prio
+	 * mapping.
+	 */
+	REG_WRITE(addr, 0x18, 0x3210);
+
+	/* Tag Remap 2: use an identity 802.1p prio -> switch prio
+	 * mapping.
+	 */
+	REG_WRITE(addr, 0x19, 0x7654);
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e6352_phy_page_read(struct dsa_switch *ds,
+				   int port, int page, int reg)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->phy_mutex);
+	ret = __mv88e6352_phy_write(ds, port, 0x16, page);
+	if (ret < 0)
+		goto error;
+	ret = __mv88e6352_phy_read(ds, port, reg);
+error:
+	__mv88e6352_phy_write(ds, port, 0x16, 0x0);
+	mutex_unlock(&ps->phy_mutex);
+	return ret;
+}
+
+static int mv88e6352_phy_page_write(struct dsa_switch *ds,
+				    int port, int page, int reg, int val)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->phy_mutex);
+	ret = __mv88e6352_phy_write(ds, port, 0x16, page);
+	if (ret < 0)
+		goto error;
+
+	ret = __mv88e6352_phy_write(ds, port, reg, val);
+error:
+	__mv88e6352_phy_write(ds, port, 0x16, 0x0);
+	mutex_unlock(&ps->phy_mutex);
+	return ret;
+}
+
+static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
+{
+	int ret;
+
+	*temp = 0;
+
+	ret = mv88e6352_phy_page_read(ds, 0, 6, 27);
+	if (ret < 0)
+		return ret;
+
+	*temp = (ret & 0xff) - 25;
+
+	return 0;
+}
+
+static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+	int ret;
+
+	*temp = 0;
+
+	ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+	return 0;
+}
+
+static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+	int ret;
+
+	ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+	if (ret < 0)
+		return ret;
+	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+	return mv88e6352_phy_page_write(ds, 0, 6, 26,
+					(ret & 0xe0ff) | (temp << 8));
+}
+
+static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+	int ret;
+
+	*alarm = false;
+
+	ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*alarm = !!(ret & 0x40);
+
+	return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
+static int mv88e6352_setup(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+	int i;
+
+	mutex_init(&ps->smi_mutex);
+	mutex_init(&ps->stats_mutex);
+	mutex_init(&ps->phy_mutex);
+	mutex_init(&ps->eeprom_mutex);
+
+	ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
+	ret = mv88e6352_switch_reset(ds);
+	if (ret < 0)
+		return ret;
+
+	/* @@@ initialise vtu and atu */
+
+	ret = mv88e6352_setup_global(ds);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < 7; i++) {
+		ret = mv88e6352_setup_port(ds, i);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mv88e6352_port_to_phy_addr(int port)
+{
+	if (port >= 0 && port <= 4)
+		return port;
+	return -EINVAL;
+}
+
+static int
+mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int addr = mv88e6352_port_to_phy_addr(port);
+	int ret;
+
+	if (addr < 0)
+		return addr;
+
+	mutex_lock(&ps->phy_mutex);
+	ret = __mv88e6352_phy_read(ds, addr, regnum);
+	mutex_unlock(&ps->phy_mutex);
+
+	return ret;
+}
+
+static int
+mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int addr = mv88e6352_port_to_phy_addr(port);
+	int ret;
+
+	if (addr < 0)
+		return addr;
+
+	mutex_lock(&ps->phy_mutex);
+	ret = __mv88e6352_phy_write(ds, addr, regnum, val);
+	mutex_unlock(&ps->phy_mutex);
+
+	return ret;
+}
+
+static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
+	{ "in_good_octets", 8, 0x00, },
+	{ "in_bad_octets", 4, 0x02, },
+	{ "in_unicast", 4, 0x04, },
+	{ "in_broadcasts", 4, 0x06, },
+	{ "in_multicasts", 4, 0x07, },
+	{ "in_pause", 4, 0x16, },
+	{ "in_undersize", 4, 0x18, },
+	{ "in_fragments", 4, 0x19, },
+	{ "in_oversize", 4, 0x1a, },
+	{ "in_jabber", 4, 0x1b, },
+	{ "in_rx_error", 4, 0x1c, },
+	{ "in_fcs_error", 4, 0x1d, },
+	{ "out_octets", 8, 0x0e, },
+	{ "out_unicast", 4, 0x10, },
+	{ "out_broadcasts", 4, 0x13, },
+	{ "out_multicasts", 4, 0x12, },
+	{ "out_pause", 4, 0x15, },
+	{ "excessive", 4, 0x11, },
+	{ "collisions", 4, 0x1e, },
+	{ "deferred", 4, 0x05, },
+	{ "single", 4, 0x14, },
+	{ "multiple", 4, 0x17, },
+	{ "out_fcs_error", 4, 0x03, },
+	{ "late", 4, 0x1f, },
+	{ "hist_64bytes", 4, 0x08, },
+	{ "hist_65_127bytes", 4, 0x09, },
+	{ "hist_128_255bytes", 4, 0x0a, },
+	{ "hist_256_511bytes", 4, 0x0b, },
+	{ "hist_512_1023bytes", 4, 0x0c, },
+	{ "hist_1024_max_bytes", 4, 0x0d, },
+	{ "sw_in_discards", 4, 0x110, },
+	{ "sw_in_filtered", 2, 0x112, },
+	{ "sw_out_filtered", 2, 0x113, },
+};
+
+static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->eeprom_mutex);
+
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
+				  0xc000 | (addr & 0xff));
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6352_eeprom_busy_wait(ds);
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+error:
+	mutex_unlock(&ps->eeprom_mutex);
+	return ret;
+}
+
+static int mv88e6352_get_eeprom(struct dsa_switch *ds,
+				struct ethtool_eeprom *eeprom, u8 *data)
+{
+	int offset;
+	int len;
+	int ret;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+	eeprom->len = 0;
+
+	eeprom->magic = 0xc3ec4951;
+
+	ret = mv88e6352_eeprom_load_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (offset & 1) {
+		int word;
+
+		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		*data++ = (word >> 8) & 0xff;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	while (len >= 2) {
+		int word;
+
+		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		*data++ = word & 0xff;
+		*data++ = (word >> 8) & 0xff;
+
+		offset += 2;
+		len -= 2;
+		eeprom->len += 2;
+	}
+
+	if (len) {
+		int word;
+
+		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		*data++ = word & 0xff;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	return 0;
+}
+
+static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
+{
+	int ret;
+
+	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+	if (ret < 0)
+		return ret;
+
+	if (!(ret & 0x0400))
+		return -EROFS;
+
+	return 0;
+}
+
+static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
+				       u16 data)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->eeprom_mutex);
+
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
+				  0xb000 | (addr & 0xff));
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6352_eeprom_busy_wait(ds);
+error:
+	mutex_unlock(&ps->eeprom_mutex);
+	return ret;
+}
+
+static int mv88e6352_set_eeprom(struct dsa_switch *ds,
+				struct ethtool_eeprom *eeprom, u8 *data)
+{
+	int offset;
+	int ret;
+	int len;
+
+	if (eeprom->magic != 0xc3ec4951)
+		return -EINVAL;
+
+	ret = mv88e6352_eeprom_is_readonly(ds);
+	if (ret)
+		return ret;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+	eeprom->len = 0;
+
+	ret = mv88e6352_eeprom_load_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (offset & 1) {
+		int word;
+
+		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		word = (*data++ << 8) | (word & 0xff);
+
+		ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+		if (ret < 0)
+			return ret;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	while (len >= 2) {
+		int word;
+
+		word = *data++;
+		word |= *data++ << 8;
+
+		ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+		if (ret < 0)
+			return ret;
+
+		offset += 2;
+		len -= 2;
+		eeprom->len += 2;
+	}
+
+	if (len) {
+		int word;
+
+		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		word = (word & 0xff00) | *data++;
+
+		ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+		if (ret < 0)
+			return ret;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	return 0;
+}
+
+static void
+mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+	mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
+			      mv88e6352_hw_stats, port, data);
+}
+
+static void
+mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+	mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
+				    mv88e6352_hw_stats, port, data);
+}
+
+static int mv88e6352_get_sset_count(struct dsa_switch *ds)
+{
+	return ARRAY_SIZE(mv88e6352_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6352_switch_driver = {
+	.tag_protocol		= DSA_TAG_PROTO_EDSA,
+	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
+	.probe			= mv88e6352_probe,
+	.setup			= mv88e6352_setup,
+	.set_addr		= mv88e6xxx_set_addr_indirect,
+	.phy_read		= mv88e6352_phy_read,
+	.phy_write		= mv88e6352_phy_write,
+	.poll_link		= mv88e6xxx_poll_link,
+	.get_strings		= mv88e6352_get_strings,
+	.get_ethtool_stats	= mv88e6352_get_ethtool_stats,
+	.get_sset_count		= mv88e6352_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+	.get_temp		= mv88e6352_get_temp,
+	.get_temp_limit		= mv88e6352_get_temp_limit,
+	.set_temp_limit		= mv88e6352_set_temp_limit,
+	.get_temp_alarm		= mv88e6352_get_temp_alarm,
+#endif
+	.get_eeprom		= mv88e6352_get_eeprom,
+	.set_eeprom		= mv88e6352_set_eeprom,
+	.get_regs_len		= mv88e6xxx_get_regs_len,
+	.get_regs		= mv88e6xxx_get_regs,
+};
+
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index a6c90cf..da558d8 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -485,20 +485,60 @@
 	for (i = 0; i < nr_stats; i++) {
 		struct mv88e6xxx_hw_stat *s = stats + i;
 		u32 low;
-		u32 high;
+		u32 high = 0;
 
+		if (s->reg >= 0x100) {
+			int ret;
+
+			ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
+						 s->reg - 0x100);
+			if (ret < 0)
+				goto error;
+			low = ret;
+			if (s->sizeof_stat == 4) {
+				ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
+							 s->reg - 0x100 + 1);
+				if (ret < 0)
+					goto error;
+				high = ret;
+			}
+			data[i] = (((u64)high) << 16) | low;
+			continue;
+		}
 		mv88e6xxx_stats_read(ds, s->reg, &low);
 		if (s->sizeof_stat == 8)
 			mv88e6xxx_stats_read(ds, s->reg + 1, &high);
-		else
-			high = 0;
 
 		data[i] = (((u64)high) << 32) | low;
 	}
-
+error:
 	mutex_unlock(&ps->stats_mutex);
 }
 
+int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+{
+	return 32 * sizeof(u16);
+}
+
+void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+			struct ethtool_regs *regs, void *_p)
+{
+	u16 *p = _p;
+	int i;
+
+	regs->version = 0;
+
+	memset(p, 0xff, 32 * sizeof(u16));
+
+	for (i = 0; i < 32; i++) {
+		int ret;
+
+		ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+		if (ret >= 0)
+			p[i] = ret;
+	}
+}
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
@@ -507,6 +547,9 @@
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
 	register_switch_driver(&mv88e6123_61_65_switch_driver);
 #endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
+	register_switch_driver(&mv88e6352_switch_driver);
+#endif
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
 	register_switch_driver(&mv88e6171_switch_driver);
 #endif
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 5e5145a..a0780b0 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -37,6 +37,17 @@
 	 */
 	struct mutex	stats_mutex;
 
+	/* This mutex serializes phy access for chips with
+	 * indirect phy addressing. It is unused for chips
+	 * with direct phy access.
+	 */
+	struct mutex	phy_mutex;
+
+	/* This mutex serializes eeprom access for chips with
+	 * eeprom support.
+	 */
+	struct mutex eeprom_mutex;
+
 	int		id; /* switch product id */
 };
 
@@ -67,9 +78,13 @@
 void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
 				 int nr_stats, struct mv88e6xxx_hw_stat *stats,
 				 int port, uint64_t *data);
+int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
+void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+			struct ethtool_regs *regs, void *_p);
 
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+extern struct dsa_switch_driver mv88e6352_switch_driver;
 extern struct dsa_switch_driver mv88e6171_switch_driver;
 
 #define REG_READ(addr, reg)						\
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 48775b8..dede43f 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1285,7 +1285,7 @@
 		return err;
 	}
 
-	image_data = (u8 *) typhoon_fw->data;
+	image_data = typhoon_fw->data;
 	remaining = typhoon_fw->size;
 	if (remaining < sizeof(struct typhoon_file_header))
 		goto invalid_fw;
@@ -1343,7 +1343,7 @@
 	int i;
 	int err;
 
-	image_data = (u8 *) typhoon_fw->data;
+	image_data = typhoon_fw->data;
 	fHdr = (struct typhoon_file_header *) image_data;
 
 	/* Cannot just map the firmware image using pci_map_single() as
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8319c99..7a5e4aa 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@
 
 config AMD_XGBE
 	tristate "AMD 10GbE Ethernet driver"
-	depends on OF_NET
+	depends on OF_NET && HAS_IOMEM
 	select PHYLIB
 	select AMD_XGBE_PHY
 	select BITREVERSE
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index caade30..2fe8fc7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -207,6 +207,8 @@
 /* DMA channel register entry bit positions and sizes */
 #define DMA_CH_CR_PBLX8_INDEX		16
 #define DMA_CH_CR_PBLX8_WIDTH		1
+#define DMA_CH_CR_SPH_INDEX		24
+#define DMA_CH_CR_SPH_WIDTH		1
 #define DMA_CH_IER_AIE_INDEX		15
 #define DMA_CH_IER_AIE_WIDTH		1
 #define DMA_CH_IER_FBEE_INDEX		12
@@ -306,6 +308,9 @@
 #define MAC_MACA0LR			0x0304
 #define MAC_MACA1HR			0x0308
 #define MAC_MACA1LR			0x030c
+#define MAC_RSSCR			0x0c80
+#define MAC_RSSAR			0x0c88
+#define MAC_RSSDR			0x0c8c
 #define MAC_TSCR			0x0d00
 #define MAC_SSIR			0x0d04
 #define MAC_STSR			0x0d08
@@ -429,6 +434,8 @@
 #define MAC_RCR_CST_WIDTH		1
 #define MAC_RCR_DCRCC_INDEX		3
 #define MAC_RCR_DCRCC_WIDTH		1
+#define MAC_RCR_HDSMS_INDEX		12
+#define MAC_RCR_HDSMS_WIDTH		3
 #define MAC_RCR_IPC_INDEX		9
 #define MAC_RCR_IPC_WIDTH		1
 #define MAC_RCR_JE_INDEX		8
@@ -445,6 +452,24 @@
 #define MAC_RFCR_UP_WIDTH		1
 #define MAC_RQC0R_RXQ0EN_INDEX		0
 #define MAC_RQC0R_RXQ0EN_WIDTH		2
+#define MAC_RSSAR_ADDRT_INDEX		2
+#define MAC_RSSAR_ADDRT_WIDTH		1
+#define MAC_RSSAR_CT_INDEX		1
+#define MAC_RSSAR_CT_WIDTH		1
+#define MAC_RSSAR_OB_INDEX		0
+#define MAC_RSSAR_OB_WIDTH		1
+#define MAC_RSSAR_RSSIA_INDEX		8
+#define MAC_RSSAR_RSSIA_WIDTH		8
+#define MAC_RSSCR_IP2TE_INDEX		1
+#define MAC_RSSCR_IP2TE_WIDTH		1
+#define MAC_RSSCR_RSSE_INDEX		0
+#define MAC_RSSCR_RSSE_WIDTH		1
+#define MAC_RSSCR_TCP4TE_INDEX		2
+#define MAC_RSSCR_TCP4TE_WIDTH		1
+#define MAC_RSSCR_UDP4TE_INDEX		3
+#define MAC_RSSCR_UDP4TE_WIDTH		1
+#define MAC_RSSDR_DMCH_INDEX		0
+#define MAC_RSSDR_DMCH_WIDTH		4
 #define MAC_SSIR_SNSINC_INDEX		8
 #define MAC_SSIR_SNSINC_WIDTH		8
 #define MAC_SSIR_SSINC_INDEX		16
@@ -844,9 +869,13 @@
 #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX	5
 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX	6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH	1
 
 #define RX_NORMAL_DESC0_OVT_INDEX		0
 #define RX_NORMAL_DESC0_OVT_WIDTH		16
+#define RX_NORMAL_DESC2_HL_INDEX		0
+#define RX_NORMAL_DESC2_HL_WIDTH		10
 #define RX_NORMAL_DESC3_CDA_INDEX		27
 #define RX_NORMAL_DESC3_CDA_WIDTH		1
 #define RX_NORMAL_DESC3_CTXT_INDEX		30
@@ -855,14 +884,27 @@
 #define RX_NORMAL_DESC3_ES_WIDTH		1
 #define RX_NORMAL_DESC3_ETLT_INDEX		16
 #define RX_NORMAL_DESC3_ETLT_WIDTH		4
+#define RX_NORMAL_DESC3_FD_INDEX		29
+#define RX_NORMAL_DESC3_FD_WIDTH		1
 #define RX_NORMAL_DESC3_INTE_INDEX		30
 #define RX_NORMAL_DESC3_INTE_WIDTH		1
+#define RX_NORMAL_DESC3_L34T_INDEX		20
+#define RX_NORMAL_DESC3_L34T_WIDTH		4
 #define RX_NORMAL_DESC3_LD_INDEX		28
 #define RX_NORMAL_DESC3_LD_WIDTH		1
 #define RX_NORMAL_DESC3_OWN_INDEX		31
 #define RX_NORMAL_DESC3_OWN_WIDTH		1
 #define RX_NORMAL_DESC3_PL_INDEX		0
 #define RX_NORMAL_DESC3_PL_WIDTH		14
+#define RX_NORMAL_DESC3_RSV_INDEX		26
+#define RX_NORMAL_DESC3_RSV_WIDTH		1
+
+#define RX_DESC3_L34T_IPV4_TCP			1
+#define RX_DESC3_L34T_IPV4_UDP			2
+#define RX_DESC3_L34T_IPV4_ICMP			3
+#define RX_DESC3_L34T_IPV6_TCP			9
+#define RX_DESC3_L34T_IPV6_UDP			10
+#define RX_DESC3_L34T_IPV6_ICMP			11
 
 #define RX_CONTEXT_DESC3_TSA_INDEX		4
 #define RX_CONTEXT_DESC3_TSA_WIDTH		1
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6fc5da0..e6b9f54 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,7 @@
 #include "xgbe.h"
 #include "xgbe-common.h"
 
-static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
 
 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
 			   struct xgbe_ring *ring)
@@ -131,13 +131,35 @@
 	if (ring->rdata) {
 		for (i = 0; i < ring->rdesc_count; i++) {
 			rdata = XGBE_GET_DESC_DATA(ring, i);
-			xgbe_unmap_skb(pdata, rdata);
+			xgbe_unmap_rdata(pdata, rdata);
 		}
 
 		kfree(ring->rdata);
 		ring->rdata = NULL;
 	}
 
+	if (ring->rx_hdr_pa.pages) {
+		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+		put_page(ring->rx_hdr_pa.pages);
+
+		ring->rx_hdr_pa.pages = NULL;
+		ring->rx_hdr_pa.pages_len = 0;
+		ring->rx_hdr_pa.pages_offset = 0;
+		ring->rx_hdr_pa.pages_dma = 0;
+	}
+
+	if (ring->rx_buf_pa.pages) {
+		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+		put_page(ring->rx_buf_pa.pages);
+
+		ring->rx_buf_pa.pages = NULL;
+		ring->rx_buf_pa.pages_len = 0;
+		ring->rx_buf_pa.pages_offset = 0;
+		ring->rx_buf_pa.pages_dma = 0;
+	}
+
 	if (ring->rdesc) {
 		dma_free_coherent(pdata->dev,
 				  (sizeof(struct xgbe_ring_desc) *
@@ -233,6 +255,96 @@
 	return ret;
 }
 
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+			    struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+{
+	struct page *pages = NULL;
+	dma_addr_t pages_dma;
+	int ret;
+
+	/* Try to obtain pages, decreasing order if necessary */
+	gfp |= __GFP_COLD | __GFP_COMP;
+	while (order >= 0) {
+		pages = alloc_pages(gfp, order);
+		if (pages)
+			break;
+
+		order--;
+	}
+	if (!pages)
+		return -ENOMEM;
+
+	/* Map the pages */
+	pages_dma = dma_map_page(pdata->dev, pages, 0,
+				 PAGE_SIZE << order, DMA_FROM_DEVICE);
+	ret = dma_mapping_error(pdata->dev, pages_dma);
+	if (ret) {
+		put_page(pages);
+		return ret;
+	}
+
+	pa->pages = pages;
+	pa->pages_len = PAGE_SIZE << order;
+	pa->pages_offset = 0;
+	pa->pages_dma = pages_dma;
+
+	return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+				 struct xgbe_page_alloc *pa,
+				 unsigned int len)
+{
+	get_page(pa->pages);
+	bd->pa = *pa;
+
+	bd->dma = pa->pages_dma + pa->pages_offset;
+	bd->dma_len = len;
+
+	pa->pages_offset += len;
+	if ((pa->pages_offset + len) > pa->pages_len) {
+		/* This data descriptor is responsible for unmapping page(s) */
+		bd->pa_unmap = *pa;
+
+		/* Get a new allocation next time */
+		pa->pages = NULL;
+		pa->pages_len = 0;
+		pa->pages_offset = 0;
+		pa->pages_dma = 0;
+	}
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+			      struct xgbe_ring *ring,
+			      struct xgbe_ring_data *rdata)
+{
+	int order, ret;
+
+	if (!ring->rx_hdr_pa.pages) {
+		ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+		if (ret)
+			return ret;
+	}
+
+	if (!ring->rx_buf_pa.pages) {
+		order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+		ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+				       order);
+		if (ret)
+			return ret;
+	}
+
+	/* Set up the header page info */
+	xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
+			     XGBE_SKB_ALLOC_SIZE);
+
+	/* Set up the buffer page info */
+	xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
+			     pdata->rx_buf_size);
+
+	return 0;
+}
+
 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -281,8 +393,7 @@
 	struct xgbe_ring *ring;
 	struct xgbe_ring_desc *rdesc;
 	struct xgbe_ring_data *rdata;
-	dma_addr_t rdesc_dma, skb_dma;
-	struct sk_buff *skb = NULL;
+	dma_addr_t rdesc_dma;
 	unsigned int i, j;
 
 	DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -302,22 +413,8 @@
 			rdata->rdesc = rdesc;
 			rdata->rdesc_dma = rdesc_dma;
 
-			/* Allocate skb & assign to each rdesc */
-			skb = dev_alloc_skb(pdata->rx_buf_size);
-			if (skb == NULL)
+			if (xgbe_map_rx_buffer(pdata, ring, rdata))
 				break;
-			skb_dma = dma_map_single(pdata->dev, skb->data,
-						 pdata->rx_buf_size,
-						 DMA_FROM_DEVICE);
-			if (dma_mapping_error(pdata->dev, skb_dma)) {
-				netdev_alert(pdata->netdev,
-					     "failed to do the dma map\n");
-				dev_kfree_skb_any(skb);
-				break;
-			}
-			rdata->skb = skb;
-			rdata->skb_dma = skb_dma;
-			rdata->skb_dma_len = pdata->rx_buf_size;
 
 			rdesc++;
 			rdesc_dma += sizeof(struct xgbe_ring_desc);
@@ -334,8 +431,8 @@
 	DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
 }
 
-static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
-			   struct xgbe_ring_data *rdata)
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+			     struct xgbe_ring_data *rdata)
 {
 	if (rdata->skb_dma) {
 		if (rdata->mapped_as_page) {
@@ -354,6 +451,29 @@
 		rdata->skb = NULL;
 	}
 
+	if (rdata->rx_hdr.pa.pages)
+		put_page(rdata->rx_hdr.pa.pages);
+
+	if (rdata->rx_hdr.pa_unmap.pages) {
+		dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
+			       rdata->rx_hdr.pa_unmap.pages_len,
+			       DMA_FROM_DEVICE);
+		put_page(rdata->rx_hdr.pa_unmap.pages);
+	}
+
+	if (rdata->rx_buf.pa.pages)
+		put_page(rdata->rx_buf.pa.pages);
+
+	if (rdata->rx_buf.pa_unmap.pages) {
+		dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
+			       rdata->rx_buf.pa_unmap.pages_len,
+			       DMA_FROM_DEVICE);
+		put_page(rdata->rx_buf.pa_unmap.pages);
+	}
+
+	memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
+	memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
+
 	rdata->tso_header = 0;
 	rdata->len = 0;
 	rdata->interrupt = 0;
@@ -494,7 +614,7 @@
 err_out:
 	while (start_index < cur_index) {
 		rdata = XGBE_GET_DESC_DATA(ring, start_index++);
-		xgbe_unmap_skb(pdata, rdata);
+		xgbe_unmap_rdata(pdata, rdata);
 	}
 
 	DBGPR("<--xgbe_map_tx_skb: count=0\n");
@@ -502,40 +622,25 @@
 	return 0;
 }
 
-static void xgbe_realloc_skb(struct xgbe_channel *channel)
+static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
 {
 	struct xgbe_prv_data *pdata = channel->pdata;
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_ring *ring = channel->rx_ring;
 	struct xgbe_ring_data *rdata;
-	struct sk_buff *skb = NULL;
-	dma_addr_t skb_dma;
 	int i;
 
-	DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+	DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
 	      ring->rx.realloc_index);
 
 	for (i = 0; i < ring->dirty; i++) {
 		rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
 
 		/* Reset rdata values */
-		xgbe_unmap_skb(pdata, rdata);
+		xgbe_unmap_rdata(pdata, rdata);
 
-		/* Allocate skb & assign to each rdesc */
-		skb = dev_alloc_skb(pdata->rx_buf_size);
-		if (skb == NULL)
+		if (xgbe_map_rx_buffer(pdata, ring, rdata))
 			break;
-		skb_dma = dma_map_single(pdata->dev, skb->data,
-					 pdata->rx_buf_size, DMA_FROM_DEVICE);
-		if (dma_mapping_error(pdata->dev, skb_dma)) {
-			netdev_alert(pdata->netdev,
-				     "failed to do the dma map\n");
-			dev_kfree_skb_any(skb);
-			break;
-		}
-		rdata->skb = skb;
-		rdata->skb_dma = skb_dma;
-		rdata->skb_dma_len = pdata->rx_buf_size;
 
 		hw_if->rx_desc_reset(rdata);
 
@@ -543,7 +648,7 @@
 	}
 	ring->dirty = 0;
 
-	DBGPR("<--xgbe_realloc_skb\n");
+	DBGPR("<--xgbe_realloc_rx_buffer\n");
 }
 
 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
@@ -553,8 +658,8 @@
 	desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
 	desc_if->free_ring_resources = xgbe_free_ring_resources;
 	desc_if->map_tx_skb = xgbe_map_tx_skb;
-	desc_if->realloc_skb = xgbe_realloc_skb;
-	desc_if->unmap_skb = xgbe_unmap_skb;
+	desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+	desc_if->unmap_rdata = xgbe_unmap_rdata;
 	desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
 	desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9da3a03..7daa2cd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,161 @@
 	}
 }
 
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+	struct xgbe_channel *channel;
+	unsigned int i;
+
+	channel = pdata->channel;
+	for (i = 0; i < pdata->channel_count; i++, channel++) {
+		if (!channel->rx_ring)
+			break;
+
+		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+	}
+
+	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+			      unsigned int index, unsigned int val)
+{
+	unsigned int wait;
+	int ret = 0;
+
+	mutex_lock(&pdata->rss_mutex);
+
+	if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+	wait = 1000;
+	while (wait--) {
+		if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+			goto unlock;
+
+		usleep_range(1000, 1500);
+	}
+
+	ret = -EBUSY;
+
+unlock:
+	mutex_unlock(&pdata->rss_mutex);
+
+	return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+	unsigned int *key = (unsigned int *)&pdata->rss_key;
+	int ret;
+
+	while (key_regs--) {
+		ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+					 key_regs, *key++);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+		ret = xgbe_write_rss_reg(pdata,
+					 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+					 pdata->rss_table[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+	memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+	return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+				     const u32 *table)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+	return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+	int ret;
+
+	if (!pdata->hw_feat.rss)
+		return -EOPNOTSUPP;
+
+	/* Program the hash key */
+	ret = xgbe_write_rss_hash_key(pdata);
+	if (ret)
+		return ret;
+
+	/* Program the lookup table */
+	ret = xgbe_write_rss_lookup_table(pdata);
+	if (ret)
+		return ret;
+
+	/* Set the RSS options */
+	XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+	/* Enable RSS */
+	XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+	return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+	if (!pdata->hw_feat.rss)
+		return -EOPNOTSUPP;
+
+	XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+	return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+	int ret;
+
+	if (!pdata->hw_feat.rss)
+		return;
+
+	if (pdata->netdev->features & NETIF_F_RXHASH)
+		ret = xgbe_enable_rss(pdata);
+	else
+		ret = xgbe_disable_rss(pdata);
+
+	if (ret)
+		netdev_err(pdata->netdev,
+			   "error configuring RSS, RSS disabled\n");
+}
+
 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
 {
 	unsigned int max_q_count, q_count;
@@ -465,17 +620,21 @@
 
 		if (channel->tx_ring) {
 			/* Enable the following Tx interrupts
-			 *   TIE  - Transmit Interrupt Enable (unless polling)
+			 *   TIE  - Transmit Interrupt Enable (unless using
+			 *          per channel interrupts)
 			 */
-			XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+			if (!pdata->per_channel_irq)
+				XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
 		}
 		if (channel->rx_ring) {
 			/* Enable following Rx interrupts
 			 *   RBUE - Receive Buffer Unavailable Enable
-			 *   RIE  - Receive Interrupt Enable
+			 *   RIE  - Receive Interrupt Enable (unless using
+			 *          per channel interrupts)
 			 */
 			XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
-			XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+			if (!pdata->per_channel_irq)
+				XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
 		}
 
 		XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
@@ -880,13 +1039,15 @@
 	rdesc->desc1 = 0;
 	rdesc->desc2 = 0;
 	rdesc->desc3 = 0;
+
+	/* Make sure ownership is written to the descriptor */
+	wmb();
 }
 
 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
 {
 	struct xgbe_ring *ring = channel->tx_ring;
 	struct xgbe_ring_data *rdata;
-	struct xgbe_ring_desc *rdesc;
 	int i;
 	int start_index = ring->cur;
 
@@ -895,26 +1056,11 @@
 	/* Initialze all descriptors */
 	for (i = 0; i < ring->rdesc_count; i++) {
 		rdata = XGBE_GET_DESC_DATA(ring, i);
-		rdesc = rdata->rdesc;
 
-		/* Initialize Tx descriptor
-		 *   Set buffer 1 (lo) address to zero
-		 *   Set buffer 1 (hi) address to zero
-		 *   Reset all other control bits (IC, TTSE, B2L & B1L)
-		 *   Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
-		 *     etc)
-		 */
-		rdesc->desc0 = 0;
-		rdesc->desc1 = 0;
-		rdesc->desc2 = 0;
-		rdesc->desc3 = 0;
+		/* Initialize Tx descriptor */
+		xgbe_tx_desc_reset(rdata);
 	}
 
-	/* Make sure everything is written to the descriptor(s) before
-	 * telling the device about them
-	 */
-	wmb();
-
 	/* Update the total number of Tx descriptors */
 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
 
@@ -933,19 +1079,19 @@
 	struct xgbe_ring_desc *rdesc = rdata->rdesc;
 
 	/* Reset the Rx descriptor
-	 *   Set buffer 1 (lo) address to dma address (lo)
-	 *   Set buffer 1 (hi) address to dma address (hi)
-	 *   Set buffer 2 (lo) address to zero
-	 *   Set buffer 2 (hi) address to zero and set control bits
-	 *     OWN and INTE
+	 *   Set buffer 1 (lo) address to header dma address (lo)
+	 *   Set buffer 1 (hi) address to header dma address (hi)
+	 *   Set buffer 2 (lo) address to buffer dma address (lo)
+	 *   Set buffer 2 (hi) address to buffer dma address (hi) and
+	 *     set control bits OWN and INTE
 	 */
-	rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
-	rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
-	rdesc->desc2 = 0;
+	rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
+	rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
+	rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
+	rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
 
-	rdesc->desc3 = 0;
-	if (rdata->interrupt)
-		XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+	XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+			  rdata->interrupt ? 1 : 0);
 
 	/* Since the Rx DMA engine is likely running, make sure everything
 	 * is written to the descriptor(s) before setting the OWN bit
@@ -964,7 +1110,6 @@
 	struct xgbe_prv_data *pdata = channel->pdata;
 	struct xgbe_ring *ring = channel->rx_ring;
 	struct xgbe_ring_data *rdata;
-	struct xgbe_ring_desc *rdesc;
 	unsigned int start_index = ring->cur;
 	unsigned int rx_coalesce, rx_frames;
 	unsigned int i;
@@ -977,34 +1122,16 @@
 	/* Initialize all descriptors */
 	for (i = 0; i < ring->rdesc_count; i++) {
 		rdata = XGBE_GET_DESC_DATA(ring, i);
-		rdesc = rdata->rdesc;
 
-		/* Initialize Rx descriptor
-		 *   Set buffer 1 (lo) address to dma address (lo)
-		 *   Set buffer 1 (hi) address to dma address (hi)
-		 *   Set buffer 2 (lo) address to zero
-		 *   Set buffer 2 (hi) address to zero and set control
-		 *     bits OWN and INTE appropriateley
-		 */
-		rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
-		rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
-		rdesc->desc2 = 0;
-		rdesc->desc3 = 0;
-		XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
-		XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
-		rdata->interrupt = 1;
-		if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
-			/* Clear interrupt on completion bit */
-			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
-					  0);
+		/* Set interrupt on completion bit as appropriate */
+		if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
 			rdata->interrupt = 0;
-		}
-	}
+		else
+			rdata->interrupt = 1;
 
-	/* Make sure everything is written to the descriptors before
-	 * telling the device about them
-	 */
-	wmb();
+		/* Initialize Rx descriptor */
+		xgbe_rx_desc_reset(rdata);
+	}
 
 	/* Update the total number of Rx descriptors */
 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -1198,7 +1325,7 @@
 	xgbe_config_flow_control(pdata);
 }
 
-static void xgbe_pre_xmit(struct xgbe_channel *channel)
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
 {
 	struct xgbe_prv_data *pdata = channel->pdata;
 	struct xgbe_ring *ring = channel->tx_ring;
@@ -1211,7 +1338,7 @@
 	int start_index = ring->cur;
 	int i;
 
-	DBGPR("-->xgbe_pre_xmit\n");
+	DBGPR("-->xgbe_dev_xmit\n");
 
 	csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
 			      CSUM_ENABLE);
@@ -1410,7 +1537,7 @@
 	      channel->name, start_index & (ring->rdesc_count - 1),
 	      (ring->cur - 1) & (ring->rdesc_count - 1));
 
-	DBGPR("<--xgbe_pre_xmit\n");
+	DBGPR("<--xgbe_dev_xmit\n");
 }
 
 static int xgbe_dev_read(struct xgbe_channel *channel)
@@ -1420,7 +1547,7 @@
 	struct xgbe_ring_desc *rdesc;
 	struct xgbe_packet_data *packet = &ring->packet_data;
 	struct net_device *netdev = channel->pdata->netdev;
-	unsigned int err, etlt;
+	unsigned int err, etlt, l34t;
 
 	DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
 
@@ -1454,6 +1581,31 @@
 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
 			       CONTEXT_NEXT, 1);
 
+	/* Get the header length */
+	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+		rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+						   RX_NORMAL_DESC2, HL);
+
+	/* Get the RSS hash */
+	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       RSS_HASH, 1);
+
+		packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+		l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+		switch (l34t) {
+		case RX_DESC3_L34T_IPV4_TCP:
+		case RX_DESC3_L34T_IPV4_UDP:
+		case RX_DESC3_L34T_IPV6_TCP:
+		case RX_DESC3_L34T_IPV6_UDP:
+			packet->rss_hash_type = PKT_HASH_TYPE_L4;
+
+		default:
+			packet->rss_hash_type = PKT_HASH_TYPE_L3;
+		}
+	}
+
 	/* Get the packet length */
 	rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
@@ -2485,6 +2637,8 @@
 	xgbe_config_tx_coalesce(pdata);
 	xgbe_config_rx_buffer_size(pdata);
 	xgbe_config_tso_mode(pdata);
+	xgbe_config_sph_mode(pdata);
+	xgbe_config_rss(pdata);
 	desc_if->wrapper_tx_desc_init(pdata);
 	desc_if->wrapper_rx_desc_init(pdata);
 	xgbe_enable_dma_interrupts(pdata);
@@ -2561,7 +2715,7 @@
 	hw_if->powerup_rx = xgbe_powerup_rx;
 	hw_if->powerdown_rx = xgbe_powerdown_rx;
 
-	hw_if->pre_xmit = xgbe_pre_xmit;
+	hw_if->dev_xmit = xgbe_dev_xmit;
 	hw_if->dev_read = xgbe_dev_read;
 	hw_if->enable_int = xgbe_enable_int;
 	hw_if->disable_int = xgbe_disable_int;
@@ -2620,5 +2774,11 @@
 	hw_if->config_dcb_tc = xgbe_config_dcb_tc;
 	hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
 
+	/* For Receive Side Scaling */
+	hw_if->enable_rss = xgbe_enable_rss;
+	hw_if->disable_rss = xgbe_disable_rss;
+	hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+	hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
 	DBGPR("<--xgbe_init_function_ptrs\n");
 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2349ea9..0544931 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -114,6 +114,7 @@
  *     THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/platform_device.h>
 #include <linux/spinlock.h>
 #include <linux/tcp.h>
 #include <linux/if_vlan.h>
@@ -126,9 +127,99 @@
 #include "xgbe.h"
 #include "xgbe-common.h"
 
-static int xgbe_poll(struct napi_struct *, int);
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
 static void xgbe_set_rx_mode(struct net_device *);
 
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+	struct xgbe_channel *channel_mem, *channel;
+	struct xgbe_ring *tx_ring, *rx_ring;
+	unsigned int count, i;
+	int ret = -ENOMEM;
+
+	count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+	channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+	if (!channel_mem)
+		goto err_channel;
+
+	tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+			  GFP_KERNEL);
+	if (!tx_ring)
+		goto err_tx_ring;
+
+	rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+			  GFP_KERNEL);
+	if (!rx_ring)
+		goto err_rx_ring;
+
+	for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+		snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+		channel->pdata = pdata;
+		channel->queue_index = i;
+		channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+				    (DMA_CH_INC * i);
+
+		if (pdata->per_channel_irq) {
+			/* Get the DMA interrupt (offset 1) */
+			ret = platform_get_irq(pdata->pdev, i + 1);
+			if (ret < 0) {
+				netdev_err(pdata->netdev,
+					   "platform_get_irq %u failed\n",
+					   i + 1);
+				goto err_irq;
+			}
+
+			channel->dma_irq = ret;
+		}
+
+		if (i < pdata->tx_ring_count) {
+			spin_lock_init(&tx_ring->lock);
+			channel->tx_ring = tx_ring++;
+		}
+
+		if (i < pdata->rx_ring_count) {
+			spin_lock_init(&rx_ring->lock);
+			channel->rx_ring = rx_ring++;
+		}
+
+		DBGPR("  %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+		      channel->name, channel->queue_index, channel->dma_regs,
+		      channel->dma_irq, channel->tx_ring, channel->rx_ring);
+	}
+
+	pdata->channel = channel_mem;
+	pdata->channel_count = count;
+
+	return 0;
+
+err_irq:
+	kfree(rx_ring);
+
+err_rx_ring:
+	kfree(tx_ring);
+
+err_tx_ring:
+	kfree(channel_mem);
+
+err_channel:
+	return ret;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+	if (!pdata->channel)
+		return;
+
+	kfree(pdata->channel->rx_ring);
+	kfree(pdata->channel->tx_ring);
+	kfree(pdata->channel);
+
+	pdata->channel = NULL;
+	pdata->channel_count = 0;
+}
+
 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
 {
 	return (ring->rdesc_count - (ring->cur - ring->dirty));
@@ -144,8 +235,8 @@
 	}
 
 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-	if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
-		rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+	rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
 	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
 		      ~(XGBE_RX_BUF_ALIGN - 1);
 
@@ -213,11 +304,7 @@
 	if (!dma_isr)
 		goto isr_done;
 
-	DBGPR("-->xgbe_isr\n");
-
 	DBGPR("  DMA_ISR = %08x\n", dma_isr);
-	DBGPR("  DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
-	DBGPR("  DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
 
 	for (i = 0; i < pdata->channel_count; i++) {
 		if (!(dma_isr & (1 << i)))
@@ -228,6 +315,10 @@
 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
 		DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
 
+		/* If we get a TI or RI interrupt that means per channel DMA
+		 * interrupts are not enabled, so we use the private data napi
+		 * structure, not the per channel napi structure
+		 */
 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
 		    XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
 			if (napi_schedule_prep(&pdata->napi)) {
@@ -270,12 +361,28 @@
 
 	DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
 
-	DBGPR("<--xgbe_isr\n");
-
 isr_done:
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t xgbe_dma_isr(int irq, void *data)
+{
+	struct xgbe_channel *channel = data;
+
+	/* Per channel DMA interrupts are enabled, so we use the per
+	 * channel napi structure and not the private data napi structure
+	 */
+	if (napi_schedule_prep(&channel->napi)) {
+		/* Disable Tx and Rx interrupts */
+		disable_irq(channel->dma_irq);
+
+		/* Turn on polling */
+		__napi_schedule(&channel->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
 {
 	struct xgbe_channel *channel = container_of(timer,
@@ -283,18 +390,24 @@
 						    tx_timer);
 	struct xgbe_ring *ring = channel->tx_ring;
 	struct xgbe_prv_data *pdata = channel->pdata;
+	struct napi_struct *napi;
 	unsigned long flags;
 
 	DBGPR("-->xgbe_tx_timer\n");
 
+	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
 	spin_lock_irqsave(&ring->lock, flags);
 
-	if (napi_schedule_prep(&pdata->napi)) {
+	if (napi_schedule_prep(napi)) {
 		/* Disable Tx and Rx interrupts */
-		xgbe_disable_rx_tx_ints(pdata);
+		if (pdata->per_channel_irq)
+			disable_irq(channel->dma_irq);
+		else
+			xgbe_disable_rx_tx_ints(pdata);
 
 		/* Turn on polling */
-		__napi_schedule(&pdata->napi);
+		__napi_schedule(napi);
 	}
 
 	channel->tx_timer_active = 0;
@@ -430,18 +543,46 @@
 
 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
 {
-	if (add)
-		netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
-			       NAPI_POLL_WEIGHT);
-	napi_enable(&pdata->napi);
+	struct xgbe_channel *channel;
+	unsigned int i;
+
+	if (pdata->per_channel_irq) {
+		channel = pdata->channel;
+		for (i = 0; i < pdata->channel_count; i++, channel++) {
+			if (add)
+				netif_napi_add(pdata->netdev, &channel->napi,
+					       xgbe_one_poll, NAPI_POLL_WEIGHT);
+
+			napi_enable(&channel->napi);
+		}
+	} else {
+		if (add)
+			netif_napi_add(pdata->netdev, &pdata->napi,
+				       xgbe_all_poll, NAPI_POLL_WEIGHT);
+
+		napi_enable(&pdata->napi);
+	}
 }
 
 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
 {
-	napi_disable(&pdata->napi);
+	struct xgbe_channel *channel;
+	unsigned int i;
 
-	if (del)
-		netif_napi_del(&pdata->napi);
+	if (pdata->per_channel_irq) {
+		channel = pdata->channel;
+		for (i = 0; i < pdata->channel_count; i++, channel++) {
+			napi_disable(&channel->napi);
+
+			if (del)
+				netif_napi_del(&channel->napi);
+		}
+	} else {
+		napi_disable(&pdata->napi);
+
+		if (del)
+			netif_napi_del(&pdata->napi);
+	}
 }
 
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -472,7 +613,7 @@
 	DBGPR("<--xgbe_init_rx_coalesce\n");
 }
 
-static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
+static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
 	struct xgbe_channel *channel;
@@ -480,7 +621,7 @@
 	struct xgbe_ring_data *rdata;
 	unsigned int i, j;
 
-	DBGPR("-->xgbe_free_tx_skbuff\n");
+	DBGPR("-->xgbe_free_tx_data\n");
 
 	channel = pdata->channel;
 	for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -490,14 +631,14 @@
 
 		for (j = 0; j < ring->rdesc_count; j++) {
 			rdata = XGBE_GET_DESC_DATA(ring, j);
-			desc_if->unmap_skb(pdata, rdata);
+			desc_if->unmap_rdata(pdata, rdata);
 		}
 	}
 
-	DBGPR("<--xgbe_free_tx_skbuff\n");
+	DBGPR("<--xgbe_free_tx_data\n");
 }
 
-static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
+static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
 	struct xgbe_channel *channel;
@@ -505,7 +646,7 @@
 	struct xgbe_ring_data *rdata;
 	unsigned int i, j;
 
-	DBGPR("-->xgbe_free_rx_skbuff\n");
+	DBGPR("-->xgbe_free_rx_data\n");
 
 	channel = pdata->channel;
 	for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -515,11 +656,11 @@
 
 		for (j = 0; j < ring->rdesc_count; j++) {
 			rdata = XGBE_GET_DESC_DATA(ring, j);
-			desc_if->unmap_skb(pdata, rdata);
+			desc_if->unmap_rdata(pdata, rdata);
 		}
 	}
 
-	DBGPR("<--xgbe_free_rx_skbuff\n");
+	DBGPR("<--xgbe_free_rx_data\n");
 }
 
 static void xgbe_adjust_link(struct net_device *netdev)
@@ -754,7 +895,9 @@
 
 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
 {
+	struct xgbe_channel *channel;
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
+	unsigned int i;
 
 	DBGPR("-->xgbe_restart_dev\n");
 
@@ -763,10 +906,15 @@
 		return;
 
 	xgbe_stop(pdata);
-	synchronize_irq(pdata->irq_number);
+	synchronize_irq(pdata->dev_irq);
+	if (pdata->per_channel_irq) {
+		channel = pdata->channel;
+		for (i = 0; i < pdata->channel_count; i++, channel++)
+			synchronize_irq(channel->dma_irq);
+	}
 
-	xgbe_free_tx_skbuff(pdata);
-	xgbe_free_rx_skbuff(pdata);
+	xgbe_free_tx_data(pdata);
+	xgbe_free_rx_data(pdata);
 
 	/* Issue software reset to device if requested */
 	if (reset)
@@ -1037,13 +1185,13 @@
 	packet->rdesc_count = 0;
 
 	if (xgbe_is_tso(skb)) {
-		/* TSO requires an extra desriptor if mss is different */
+		/* TSO requires an extra descriptor if mss is different */
 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
 			context_desc = 1;
 			packet->rdesc_count++;
 		}
 
-		/* TSO requires an extra desriptor for TSO header */
+		/* TSO requires an extra descriptor for TSO header */
 		packet->rdesc_count++;
 
 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
@@ -1091,6 +1239,9 @@
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
+	struct xgbe_channel *channel = NULL;
+	char dma_irq_name[IFNAMSIZ + 32];
+	unsigned int i = 0;
 	int ret;
 
 	DBGPR("-->xgbe_open\n");
@@ -1119,24 +1270,47 @@
 		goto err_ptpclk;
 	pdata->rx_buf_size = ret;
 
+	/* Allocate the channel and ring structures */
+	ret = xgbe_alloc_channels(pdata);
+	if (ret)
+		goto err_ptpclk;
+
 	/* Allocate the ring descriptors and buffers */
 	ret = desc_if->alloc_ring_resources(pdata);
 	if (ret)
-		goto err_ptpclk;
+		goto err_channels;
 
 	/* Initialize the device restart and Tx timestamp work struct */
 	INIT_WORK(&pdata->restart_work, xgbe_restart);
 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
 	/* Request interrupts */
-	ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
+	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
 			       netdev->name, pdata);
 	if (ret) {
 		netdev_alert(netdev, "error requesting irq %d\n",
-			     pdata->irq_number);
-		goto err_irq;
+			     pdata->dev_irq);
+		goto err_rings;
 	}
-	pdata->irq_number = netdev->irq;
+
+	if (pdata->per_channel_irq) {
+		channel = pdata->channel;
+		for (i = 0; i < pdata->channel_count; i++, channel++) {
+			snprintf(dma_irq_name, sizeof(dma_irq_name) - 1,
+				 "%s-TxRx-%u", netdev_name(netdev),
+				 channel->queue_index);
+
+			ret = devm_request_irq(pdata->dev, channel->dma_irq,
+					       xgbe_dma_isr, 0, dma_irq_name,
+					       channel);
+			if (ret) {
+				netdev_alert(netdev,
+					     "error requesting irq %d\n",
+					     channel->dma_irq);
+				goto err_irq;
+			}
+		}
+	}
 
 	ret = xgbe_start(pdata);
 	if (ret)
@@ -1149,12 +1323,21 @@
 err_start:
 	hw_if->exit(pdata);
 
-	devm_free_irq(pdata->dev, pdata->irq_number, pdata);
-	pdata->irq_number = 0;
-
 err_irq:
+	if (pdata->per_channel_irq) {
+		/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+		for (i--, channel--; i < pdata->channel_count; i--, channel--)
+			devm_free_irq(pdata->dev, channel->dma_irq, channel);
+	}
+
+	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+err_rings:
 	desc_if->free_ring_resources(pdata);
 
+err_channels:
+	xgbe_free_channels(pdata);
+
 err_ptpclk:
 	clk_disable_unprepare(pdata->ptpclk);
 
@@ -1172,6 +1355,8 @@
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
+	struct xgbe_channel *channel;
+	unsigned int i;
 
 	DBGPR("-->xgbe_close\n");
 
@@ -1181,15 +1366,20 @@
 	/* Issue software reset to device */
 	hw_if->exit(pdata);
 
-	/* Free all the ring data */
+	/* Free the ring descriptors and buffers */
 	desc_if->free_ring_resources(pdata);
 
-	/* Release the interrupt */
-	if (pdata->irq_number != 0) {
-		devm_free_irq(pdata->dev, pdata->irq_number, pdata);
-		pdata->irq_number = 0;
+	/* Release the interrupts */
+	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+	if (pdata->per_channel_irq) {
+		channel = pdata->channel;
+		for (i = 0; i < pdata->channel_count; i++, channel++)
+			devm_free_irq(pdata->dev, channel->dma_irq, channel);
 	}
 
+	/* Free the channel and ring structures */
+	xgbe_free_channels(pdata);
+
 	/* Disable the clocks */
 	clk_disable_unprepare(pdata->ptpclk);
 	clk_disable_unprepare(pdata->sysclk);
@@ -1258,7 +1448,7 @@
 	xgbe_prep_tx_tstamp(pdata, skb, packet);
 
 	/* Configure required descriptor fields for transmission */
-	hw_if->pre_xmit(channel);
+	hw_if->dev_xmit(channel);
 
 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
 	xgbe_print_pkt(netdev, skb, true);
@@ -1420,14 +1610,20 @@
 static void xgbe_poll_controller(struct net_device *netdev)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	struct xgbe_channel *channel;
+	unsigned int i;
 
 	DBGPR("-->xgbe_poll_controller\n");
 
-	disable_irq(pdata->irq_number);
-
-	xgbe_isr(pdata->irq_number, pdata);
-
-	enable_irq(pdata->irq_number);
+	if (pdata->per_channel_irq) {
+		channel = pdata->channel;
+		for (i = 0; i < pdata->channel_count; i++, channel++)
+			xgbe_dma_isr(channel->dma_irq, channel);
+	} else {
+		disable_irq(pdata->dev_irq);
+		xgbe_isr(pdata->dev_irq, pdata);
+		enable_irq(pdata->dev_irq);
+	}
 
 	DBGPR("<--xgbe_poll_controller\n");
 }
@@ -1465,12 +1661,21 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
-	netdev_features_t rxcsum, rxvlan, rxvlan_filter;
+	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+	int ret = 0;
 
+	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
 
+	if ((features & NETIF_F_RXHASH) && !rxhash)
+		ret = hw_if->enable_rss(pdata);
+	else if (!(features & NETIF_F_RXHASH) && rxhash)
+		ret = hw_if->disable_rss(pdata);
+	if (ret)
+		return ret;
+
 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
 		hw_if->enable_rx_csum(pdata);
 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
@@ -1524,7 +1729,7 @@
 	struct xgbe_ring *ring = channel->rx_ring;
 	struct xgbe_ring_data *rdata;
 
-	desc_if->realloc_skb(channel);
+	desc_if->realloc_rx_buffer(channel);
 
 	/* Update the Rx Tail Pointer Register with address of
 	 * the last cleaned entry */
@@ -1533,6 +1738,31 @@
 			  lower_32_bits(rdata->rdesc_dma));
 }
 
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+				       struct xgbe_ring_data *rdata,
+				       unsigned int *len)
+{
+	struct net_device *netdev = pdata->netdev;
+	struct sk_buff *skb;
+	u8 *packet;
+	unsigned int copy_len;
+
+	skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
+	if (!skb)
+		return NULL;
+
+	packet = page_address(rdata->rx_hdr.pa.pages) +
+		 rdata->rx_hdr.pa.pages_offset;
+	copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
+	copy_len = min(rdata->rx_hdr.dma_len, copy_len);
+	skb_copy_to_linear_data(skb, packet, copy_len);
+	skb_put(skb, copy_len);
+
+	*len -= copy_len;
+
+	return skb;
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
 	struct xgbe_prv_data *pdata = channel->pdata;
@@ -1566,7 +1796,7 @@
 #endif
 
 		/* Free the SKB and reset the descriptor for re-use */
-		desc_if->unmap_skb(pdata, rdata);
+		desc_if->unmap_rdata(pdata, rdata);
 		hw_if->tx_desc_reset(rdata);
 
 		processed++;
@@ -1594,6 +1824,7 @@
 	struct xgbe_ring_data *rdata;
 	struct xgbe_packet_data *packet;
 	struct net_device *netdev = pdata->netdev;
+	struct napi_struct *napi;
 	struct sk_buff *skb;
 	struct skb_shared_hwtstamps *hwtstamps;
 	unsigned int incomplete, error, context_next, context;
@@ -1607,6 +1838,8 @@
 	if (!ring)
 		return 0;
 
+	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 	packet = &ring->packet_data;
 	while (packet_count < budget) {
@@ -1641,10 +1874,6 @@
 		ring->cur++;
 		ring->dirty++;
 
-		dma_unmap_single(pdata->dev, rdata->skb_dma,
-				 rdata->skb_dma_len, DMA_FROM_DEVICE);
-		rdata->skb_dma = 0;
-
 		incomplete = XGMAC_GET_BITS(packet->attributes,
 					    RX_PACKET_ATTRIBUTES,
 					    INCOMPLETE);
@@ -1668,32 +1897,39 @@
 
 		if (!context) {
 			put_len = rdata->len - len;
-			if (skb) {
-				if (pskb_expand_head(skb, 0, put_len,
-						     GFP_ATOMIC)) {
-					DBGPR("pskb_expand_head error\n");
-					if (incomplete) {
-						error = 1;
-						goto read_again;
-					}
-
-					dev_kfree_skb(skb);
-					goto next_packet;
-				}
-				memcpy(skb_tail_pointer(skb), rdata->skb->data,
-				       put_len);
-			} else {
-				skb = rdata->skb;
-				rdata->skb = NULL;
-			}
-			skb_put(skb, put_len);
 			len += put_len;
+
+			if (!skb) {
+				dma_sync_single_for_cpu(pdata->dev,
+							rdata->rx_hdr.dma,
+							rdata->rx_hdr.dma_len,
+							DMA_FROM_DEVICE);
+
+				skb = xgbe_create_skb(pdata, rdata, &put_len);
+				if (!skb) {
+					error = 1;
+					goto skip_data;
+				}
+			}
+
+			if (put_len) {
+				dma_sync_single_for_cpu(pdata->dev,
+							rdata->rx_buf.dma,
+							rdata->rx_buf.dma_len,
+							DMA_FROM_DEVICE);
+
+				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+						rdata->rx_buf.pa.pages,
+						rdata->rx_buf.pa.pages_offset,
+						put_len, rdata->rx_buf.dma_len);
+				rdata->rx_buf.pa.pages = NULL;
+			}
 		}
 
+skip_data:
 		if (incomplete || context_next)
 			goto read_again;
 
-		/* Stray Context Descriptor? */
 		if (!skb)
 			goto next_packet;
 
@@ -1733,13 +1969,18 @@
 			hwtstamps->hwtstamp = ns_to_ktime(nsec);
 		}
 
+		if (XGMAC_GET_BITS(packet->attributes,
+				   RX_PACKET_ATTRIBUTES, RSS_HASH))
+			skb_set_hash(skb, packet->rss_hash,
+				     packet->rss_hash_type);
+
 		skb->dev = netdev;
 		skb->protocol = eth_type_trans(skb, netdev);
 		skb_record_rx_queue(skb, channel->queue_index);
-		skb_mark_napi_id(skb, &pdata->napi);
+		skb_mark_napi_id(skb, napi);
 
 		netdev->last_rx = jiffies;
-		napi_gro_receive(&pdata->napi, skb);
+		napi_gro_receive(napi, skb);
 
 next_packet:
 		packet_count++;
@@ -1761,7 +2002,35 @@
 	return packet_count;
 }
 
-static int xgbe_poll(struct napi_struct *napi, int budget)
+static int xgbe_one_poll(struct napi_struct *napi, int budget)
+{
+	struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+						    napi);
+	int processed = 0;
+
+	DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+
+	/* Cleanup Tx ring first */
+	xgbe_tx_poll(channel);
+
+	/* Process Rx ring next */
+	processed = xgbe_rx_poll(channel, budget);
+
+	/* If we processed everything, we are done */
+	if (processed < budget) {
+		/* Turn off polling */
+		napi_complete(napi);
+
+		/* Enable Tx and Rx interrupts */
+		enable_irq(channel->dma_irq);
+	}
+
+	DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+
+	return processed;
+}
+
+static int xgbe_all_poll(struct napi_struct *napi, int budget)
 {
 	struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
 						   napi);
@@ -1770,7 +2039,7 @@
 	int processed, last_processed;
 	unsigned int i;
 
-	DBGPR("-->xgbe_poll: budget=%d\n", budget);
+	DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
 
 	processed = 0;
 	ring_budget = budget / pdata->rx_ring_count;
@@ -1798,7 +2067,7 @@
 		xgbe_enable_rx_tx_ints(pdata);
 	}
 
-	DBGPR("<--xgbe_poll: received = %d\n", processed);
+	DBGPR("<--xgbe_all_poll: received = %d\n", processed);
 
 	return processed;
 }
@@ -1812,10 +2081,10 @@
 	while (count--) {
 		rdata = XGBE_GET_DESC_DATA(ring, idx);
 		rdesc = rdata->rdesc;
-		DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
-		      (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
-		      le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
-		      le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+		pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+			 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+			 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+			 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
 		idx++;
 	}
 }
@@ -1823,9 +2092,9 @@
 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
 		       unsigned int idx)
 {
-	DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
-	      le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
-	      le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+	pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+		 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+		 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
 }
 
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 49508ec..95d4453 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -452,9 +452,9 @@
 			     rx_usecs);
 		return -EINVAL;
 	}
-	if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
+	if (rx_frames > pdata->rx_desc_count) {
 		netdev_alert(netdev, "rx-frames is limited to %d frames\n",
-			     pdata->channel->rx_ring->rdesc_count);
+			     pdata->rx_desc_count);
 		return -EINVAL;
 	}
 
@@ -462,9 +462,9 @@
 	tx_frames = ec->tx_max_coalesced_frames;
 
 	/* Check the bounds of values for Tx */
-	if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
+	if (tx_frames > pdata->tx_desc_count) {
 		netdev_alert(netdev, "tx-frames is limited to %d frames\n",
-			     pdata->channel->tx_ring->rdesc_count);
+			     pdata->tx_desc_count);
 		return -EINVAL;
 	}
 
@@ -481,6 +481,75 @@
 	return 0;
 }
 
+static int xgbe_get_rxnfc(struct net_device *netdev,
+			  struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_GRXRINGS:
+		rxnfc->data = pdata->rx_ring_count;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	return sizeof(pdata->rss_key);
+}
+
+static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	return ARRAY_SIZE(pdata->rss_table);
+}
+
+static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	unsigned int i;
+
+	if (indir) {
+		for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+			indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+						  MAC_RSSDR, DMCH);
+	}
+
+	if (key)
+		memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+
+	return 0;
+}
+
+static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+			 const u8 *key)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	struct xgbe_hw_if *hw_if = &pdata->hw_if;
+	unsigned int ret;
+
+	if (indir) {
+		ret = hw_if->set_rss_lookup_table(pdata, indir);
+		if (ret)
+			return ret;
+	}
+
+	if (key) {
+		ret = hw_if->set_rss_hash_key(pdata, key);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int xgbe_get_ts_info(struct net_device *netdev,
 			    struct ethtool_ts_info *ts_info)
 {
@@ -526,6 +595,11 @@
 	.get_strings = xgbe_get_strings,
 	.get_ethtool_stats = xgbe_get_ethtool_stats,
 	.get_sset_count = xgbe_get_sset_count,
+	.get_rxnfc = xgbe_get_rxnfc,
+	.get_rxfh_key_size = xgbe_get_rxfh_key_size,
+	.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
+	.get_rxfh = xgbe_get_rxfh,
+	.set_rxfh = xgbe_set_rxfh,
 	.get_ts_info = xgbe_get_ts_info,
 };
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index f5a8fa0..05fbdf9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -133,60 +133,6 @@
 MODULE_VERSION(XGBE_DRV_VERSION);
 MODULE_DESCRIPTION(XGBE_DRV_DESC);
 
-static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
-{
-	struct xgbe_channel *channel_mem, *channel;
-	struct xgbe_ring *tx_ring, *rx_ring;
-	unsigned int count, i;
-
-	DBGPR("-->xgbe_alloc_rings\n");
-
-	count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
-
-	channel_mem = devm_kcalloc(pdata->dev, count,
-				   sizeof(struct xgbe_channel), GFP_KERNEL);
-	if (!channel_mem)
-		return NULL;
-
-	tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
-			       sizeof(struct xgbe_ring), GFP_KERNEL);
-	if (!tx_ring)
-		return NULL;
-
-	rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
-			       sizeof(struct xgbe_ring), GFP_KERNEL);
-	if (!rx_ring)
-		return NULL;
-
-	for (i = 0, channel = channel_mem; i < count; i++, channel++) {
-		snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
-		channel->pdata = pdata;
-		channel->queue_index = i;
-		channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
-				    (DMA_CH_INC * i);
-
-		if (i < pdata->tx_ring_count) {
-			spin_lock_init(&tx_ring->lock);
-			channel->tx_ring = tx_ring++;
-		}
-
-		if (i < pdata->rx_ring_count) {
-			spin_lock_init(&rx_ring->lock);
-			channel->rx_ring = rx_ring++;
-		}
-
-		DBGPR("  %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
-		      channel->name, channel->queue_index, channel->dma_regs,
-		      channel->tx_ring, channel->rx_ring);
-	}
-
-	pdata->channel_count = count;
-
-	DBGPR("<--xgbe_alloc_rings\n");
-
-	return channel_mem;
-}
-
 static void xgbe_default_config(struct xgbe_prv_data *pdata)
 {
 	DBGPR("-->xgbe_default_config\n");
@@ -224,6 +170,7 @@
 	struct device *dev = &pdev->dev;
 	struct resource *res;
 	const u8 *mac_addr;
+	unsigned int i;
 	int ret;
 
 	DBGPR("--> xgbe_probe\n");
@@ -244,6 +191,7 @@
 
 	spin_lock_init(&pdata->lock);
 	mutex_init(&pdata->xpcs_mutex);
+	mutex_init(&pdata->rss_mutex);
 	spin_lock_init(&pdata->tstamp_lock);
 
 	/* Set and validate the number of descriptors for a ring */
@@ -318,12 +266,18 @@
 		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
 	}
 
+	/* Check for per channel interrupt support */
+	if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
+		pdata->per_channel_irq = 1;
+
 	ret = platform_get_irq(pdev, 0);
 	if (ret < 0) {
-		dev_err(dev, "platform_get_irq failed\n");
+		dev_err(dev, "platform_get_irq 0 failed\n");
 		goto err_io;
 	}
-	netdev->irq = ret;
+	pdata->dev_irq = ret;
+
+	netdev->irq = pdata->dev_irq;
 	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
 
 	/* Set all the function pointers */
@@ -383,13 +337,16 @@
 		goto err_io;
 	}
 
-	/* Allocate the rings for the DMA channels */
-	pdata->channel = xgbe_alloc_rings(pdata);
-	if (!pdata->channel) {
-		dev_err(dev, "ring allocation failed\n");
-		ret = -ENOMEM;
-		goto err_io;
-	}
+	/* Initialize RSS hash key and lookup table */
+	get_random_bytes(pdata->rss_key, sizeof(pdata->rss_key));
+
+	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+			       i % pdata->rx_ring_count);
+
+	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Prepare to regsiter with MDIO */
 	pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
@@ -421,6 +378,9 @@
 			      NETIF_F_HW_VLAN_CTAG_TX |
 			      NETIF_F_HW_VLAN_CTAG_FILTER;
 
+	if (pdata->hw_feat.rss)
+		netdev->hw_features |= NETIF_F_RXHASH;
+
 	netdev->vlan_features |= NETIF_F_SG |
 				 NETIF_F_IP_CSUM |
 				 NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 789957d..aa8da9f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -142,6 +142,8 @@
 
 #define XGBE_RX_MIN_BUF_SIZE	(ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
 #define XGBE_RX_BUF_ALIGN	64
+#define XGBE_SKB_ALLOC_SIZE	256
+#define XGBE_SPH_HDSMS_SIZE	2	/* Keep in sync with SKB_ALLOC_SIZE */
 
 #define XGBE_MAX_DMA_CHANNELS	16
 #define XGBE_MAX_QUEUES		16
@@ -171,6 +173,7 @@
 /* Device-tree clock names */
 #define XGBE_DMA_CLOCK		"dma_clk"
 #define XGBE_PTP_CLOCK		"ptp_clk"
+#define XGBE_DMA_IRQS		"amd,per-channel-interrupt"
 
 /* Timestamp support - values based on 50MHz PTP clock
  *   50MHz => 20 nsec
@@ -212,6 +215,12 @@
 /* Maximum MAC address hash table size (256 bits = 8 bytes) */
 #define XGBE_MAC_HASH_TABLE_SIZE	8
 
+/* Receive Side Scaling */
+#define XGBE_RSS_HASH_KEY_SIZE		40
+#define XGBE_RSS_MAX_TABLE_SIZE		256
+#define XGBE_RSS_LOOKUP_TABLE_TYPE	0
+#define XGBE_RSS_HASH_KEY_TYPE		1
+
 struct xgbe_prv_data;
 
 struct xgbe_packet_data {
@@ -230,14 +239,35 @@
 	unsigned short vlan_ctag;
 
 	u64 rx_tstamp;
+
+	u32 rss_hash;
+	enum pkt_hash_types rss_hash_type;
 };
 
 /* Common Rx and Tx descriptor mapping */
 struct xgbe_ring_desc {
-	unsigned int desc0;
-	unsigned int desc1;
-	unsigned int desc2;
-	unsigned int desc3;
+	u32 desc0;
+	u32 desc1;
+	u32 desc2;
+	u32 desc3;
+};
+
+/* Page allocation related values */
+struct xgbe_page_alloc {
+	struct page *pages;
+	unsigned int pages_len;
+	unsigned int pages_offset;
+
+	dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+	struct xgbe_page_alloc pa;
+	struct xgbe_page_alloc pa_unmap;
+
+	dma_addr_t dma;
+	unsigned int dma_len;
 };
 
 /* Structure used to hold information related to the descriptor
@@ -253,6 +283,10 @@
 	unsigned int skb_dma_len;	/* Length of SKB DMA area */
 	unsigned int tso_header;        /* TSO header indicator */
 
+	struct xgbe_buffer_data rx_hdr;	/* Header locations */
+	struct xgbe_buffer_data rx_buf; /* Payload locations */
+
+	unsigned short hdr_len;		/* Length of received header */
 	unsigned short len;		/* Length of received Rx packet */
 
 	unsigned int interrupt;		/* Interrupt indicator */
@@ -291,6 +325,10 @@
 	 */
 	struct xgbe_ring_data *rdata;
 
+	/* Page allocation for RX buffers */
+	struct xgbe_page_alloc rx_hdr_pa;
+	struct xgbe_page_alloc rx_buf_pa;
+
 	/* Ring index values
 	 *  cur   - Tx: index of descriptor to be used for current transfer
 	 *          Rx: index of descriptor to check for packet availability
@@ -331,6 +369,12 @@
 	unsigned int queue_index;
 	void __iomem *dma_regs;
 
+	/* Per channel interrupt irq number */
+	int dma_irq;
+
+	/* Netdev related settings */
+	struct napi_struct napi;
+
 	unsigned int saved_ier;
 
 	unsigned int tx_timer_active;
@@ -456,7 +500,7 @@
 
 	int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
 	int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
-	void (*pre_xmit)(struct xgbe_channel *);
+	void (*dev_xmit)(struct xgbe_channel *);
 	int (*dev_read)(struct xgbe_channel *);
 	void (*tx_desc_init)(struct xgbe_channel *);
 	void (*rx_desc_init)(struct xgbe_channel *);
@@ -509,14 +553,20 @@
 	/* For Data Center Bridging config */
 	void (*config_dcb_tc)(struct xgbe_prv_data *);
 	void (*config_dcb_pfc)(struct xgbe_prv_data *);
+
+	/* For Receive Side Scaling */
+	int (*enable_rss)(struct xgbe_prv_data *);
+	int (*disable_rss)(struct xgbe_prv_data *);
+	int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+	int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
 };
 
 struct xgbe_desc_if {
 	int (*alloc_ring_resources)(struct xgbe_prv_data *);
 	void (*free_ring_resources)(struct xgbe_prv_data *);
 	int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
-	void (*realloc_skb)(struct xgbe_channel *);
-	void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+	void (*realloc_rx_buffer)(struct xgbe_channel *);
+	void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
 	void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
 	void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
 };
@@ -581,7 +631,11 @@
 	/* XPCS indirect addressing mutex */
 	struct mutex xpcs_mutex;
 
-	int irq_number;
+	/* RSS addressing mutex */
+	struct mutex rss_mutex;
+
+	int dev_irq;
+	unsigned int per_channel_irq;
 
 	struct xgbe_hw_if hw_if;
 	struct xgbe_desc_if desc_if;
@@ -624,7 +678,7 @@
 	unsigned int rx_riwt;
 	unsigned int rx_frames;
 
-	/* Current MTU */
+	/* Current Rx buffer size */
 	unsigned int rx_buf_size;
 
 	/* Flow control settings */
@@ -632,6 +686,11 @@
 	unsigned int tx_pause;
 	unsigned int rx_pause;
 
+	/* Receive Side Scaling settings */
+	u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+	u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+	u32 rss_options;
+
 	/* MDIO settings */
 	struct module *phy_module;
 	char *mii_bus_id;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 1236696..83a5028 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -761,10 +761,6 @@
 	ndev = pdata->ndev;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
-	if (!res) {
-		dev_err(dev, "Resource enet_csr not defined\n");
-		return -ENODEV;
-	}
 	pdata->base_addr = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pdata->base_addr)) {
 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
@@ -772,10 +768,6 @@
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
-	if (!res) {
-		dev_err(dev, "Resource ring_csr not defined\n");
-		return -ENODEV;
-	}
 	pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pdata->ring_csr_addr)) {
 		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
@@ -783,10 +775,6 @@
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
-	if (!res) {
-		dev_err(dev, "Resource ring_cmd not defined\n");
-		return -ENODEV;
-	}
 	pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pdata->ring_cmd_addr)) {
 		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 40beef5..e9af4af 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1139,7 +1139,7 @@
 		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
 
 	prefetch(&fp->sb_running_index[SM_RX_ID]);
-	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+	napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 74fbf9e..c4bd025 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1931,7 +1931,7 @@
 			for_each_cos_in_tx_queue(fp, cos)
 				prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
 			prefetch(&fp->sb_running_index[SM_RX_ID]);
-			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+			napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
 			status &= ~mask;
 		}
 	}
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index e285f38..0771967 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -216,14 +216,10 @@
 	ioaddr = (unsigned long)
 		nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
 	{
-		unsigned long flags;
 		int card_present;
 
-		local_irq_save(flags);
-		card_present = (hwreg_present((void*) ioaddr+4) &&
-				hwreg_present((void*) ioaddr + DATA_PORT));
-		local_irq_restore(flags);
-
+		card_present = (hwreg_present((void *)ioaddr + 4) &&
+				hwreg_present((void *)ioaddr + DATA_PORT));
 		if (!card_present)
 			goto out;
 	}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4fdf0aa..86dccb26 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -173,10 +173,12 @@
 static int gfar_init_bds(struct net_device *ndev)
 {
 	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	struct gfar_priv_tx_q *tx_queue = NULL;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	struct txbd8 *txbdp;
 	struct rxbd8 *rxbdp;
+	u32 *rfbptr;
 	int i, j;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
@@ -201,6 +203,7 @@
 		txbdp->status |= TXBD_WRAP;
 	}
 
+	rfbptr = &regs->rfbptr0;
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
 		rx_queue->cur_rx = rx_queue->rx_bd_base;
@@ -227,6 +230,8 @@
 			rxbdp++;
 		}
 
+		rx_queue->rfbptr = rfbptr;
+		rfbptr += 2;
 	}
 
 	return 0;
@@ -336,6 +341,20 @@
 	}
 }
 
+static void gfar_init_rqprm(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 __iomem *baddr;
+	int i;
+
+	baddr = &regs->rqprm0;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
+			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
+		baddr++;
+	}
+}
+
 static void gfar_rx_buff_size_config(struct gfar_private *priv)
 {
 	int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
@@ -396,6 +415,13 @@
 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
 
+	/* Clear the LFC bit */
+	gfar_write(&regs->rctrl, rctrl);
+	/* Init flow control threshold values */
+	gfar_init_rqprm(priv);
+	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
+	rctrl |= RCTRL_LFC;
+
 	/* Init rctrl based on our settings */
 	gfar_write(&regs->rctrl, rctrl);
 }
@@ -1687,6 +1713,9 @@
 	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
 	priv->phydev->advertising = priv->phydev->supported;
 
+	/* Add support for flow control, but don't advertise it by default */
+	priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
 	return 0;
 }
 
@@ -2856,6 +2885,10 @@
 		/* Setup the new bdp */
 		gfar_new_rxbdp(rx_queue, bdp, newskb);
 
+		/* Update Last Free RxBD pointer for LFC */
+		if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
+			gfar_write(rx_queue->rfbptr, (u32)bdp);
+
 		/* Update to the next pointer */
 		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
@@ -3370,7 +3403,11 @@
 		if (phydev->asym_pause)
 			rmt_adv |= LPA_PAUSE_ASYM;
 
-		lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+		lcl_adv = 0;
+		if (phydev->advertising & ADVERTISED_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_CAP;
+		if (phydev->advertising & ADVERTISED_Asym_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 		if (flowctrl & FLOW_CTRL_TX)
@@ -3386,6 +3423,9 @@
 {
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	struct phy_device *phydev = priv->phydev;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int i;
+	struct rxbd8 *bdp;
 
 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
 		return;
@@ -3394,6 +3434,7 @@
 		u32 tempval1 = gfar_read(&regs->maccfg1);
 		u32 tempval = gfar_read(&regs->maccfg2);
 		u32 ecntrl = gfar_read(&regs->ecntrl);
+		u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
 
 		if (phydev->duplex != priv->oldduplex) {
 			if (!(phydev->duplex))
@@ -3438,6 +3479,26 @@
 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
 		tempval1 |= gfar_get_flowctrl_cfg(priv);
 
+		/* Turn last free buffer recording on */
+		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
+			for (i = 0; i < priv->num_rx_queues; i++) {
+				rx_queue = priv->rx_queue[i];
+				bdp = rx_queue->cur_rx;
+				/* skip to previous bd */
+				bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
+					      rx_queue->rx_bd_base,
+					      rx_queue->rx_ring_size);
+
+				if (rx_queue->rfbptr)
+					gfar_write(rx_queue->rfbptr, (u32)bdp);
+			}
+
+			priv->tx_actual_en = 1;
+		}
+
+		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
+			priv->tx_actual_en = 0;
+
 		gfar_write(&regs->maccfg1, tempval1);
 		gfar_write(&regs->maccfg2, tempval);
 		gfar_write(&regs->ecntrl, ecntrl);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 2805cfb..b581b88 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -99,6 +99,10 @@
 #define GFAR_MAX_FIFO_STARVE	511
 #define GFAR_MAX_FIFO_STARVE_OFF 511
 
+#define FBTHR_SHIFT        24
+#define DEFAULT_RX_LFC_THR  16
+#define DEFAULT_LFC_PTVVAL  4
+
 #define DEFAULT_RX_BUFFER_SIZE  1536
 #define TX_RING_MOD_MASK(size) (size-1)
 #define RX_RING_MOD_MASK(size) (size-1)
@@ -145,9 +149,7 @@
 		| SUPPORTED_Autoneg \
 		| SUPPORTED_MII)
 
-#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
-		| SUPPORTED_Pause \
-		| SUPPORTED_Asym_Pause)
+#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full
 
 /* TBI register addresses */
 #define MII_TBICON		0x11
@@ -275,6 +277,7 @@
 
 #define RCTRL_TS_ENABLE 	0x01000000
 #define RCTRL_PAL_MASK		0x001f0000
+#define RCTRL_LFC		0x00004000
 #define RCTRL_VLEX		0x00002000
 #define RCTRL_FILREN		0x00001000
 #define RCTRL_GHTX		0x00000400
@@ -851,7 +854,32 @@
 	u8	res23c[248];
 	u32	attr;		/* 0x.bf8 - Attributes Register */
 	u32	attreli;	/* 0x.bfc - Attributes Extract Length and Extract Index Register */
-	u8	res24[688];
+	u32	rqprm0;	/* 0x.c00 - Receive queue parameters register 0 */
+	u32	rqprm1;	/* 0x.c04 - Receive queue parameters register 1 */
+	u32	rqprm2;	/* 0x.c08 - Receive queue parameters register 2 */
+	u32	rqprm3;	/* 0x.c0c - Receive queue parameters register 3 */
+	u32	rqprm4;	/* 0x.c10 - Receive queue parameters register 4 */
+	u32	rqprm5;	/* 0x.c14 - Receive queue parameters register 5 */
+	u32	rqprm6;	/* 0x.c18 - Receive queue parameters register 6 */
+	u32	rqprm7;	/* 0x.c1c - Receive queue parameters register 7 */
+	u8	res24[36];
+	u32	rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */
+	u8	res24a[4];
+	u32	rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */
+	u8	res24b[4];
+	u32	rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */
+	u8	res24c[4];
+	u32	rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */
+	u8	res24d[4];
+	u32	rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */
+	u8	res24e[4];
+	u32	rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */
+	u8	res24f[4];
+	u32	rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */
+	u8	res24g[4];
+	u32	rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */
+	u8	res24h[4];
+	u8	res24x[556];
 	u32	isrg0;		/* 0x.eb0 - Interrupt steering group 0 register */
 	u32	isrg1;		/* 0x.eb4 - Interrupt steering group 1 register */
 	u32	isrg2;		/* 0x.eb8 - Interrupt steering group 2 register */
@@ -1011,6 +1039,7 @@
 	/* RX Coalescing values */
 	unsigned char rxcoalescing;
 	unsigned long rxic;
+	u32 *rfbptr;
 };
 
 enum gfar_irqinfo_id {
@@ -1101,6 +1130,7 @@
 	unsigned int num_tx_queues;
 	unsigned int num_rx_queues;
 	unsigned int num_grps;
+	int tx_actual_en;
 
 	/* Network Statistics */
 	struct gfar_extra_stats extra_stats;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 76d7070..3e1a9c1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -579,8 +579,13 @@
 			u32 tempval;
 			tempval = gfar_read(&regs->maccfg1);
 			tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-			if (priv->tx_pause_en)
+
+			priv->tx_actual_en = 0;
+			if (priv->tx_pause_en) {
+				priv->tx_actual_en = 1;
 				tempval |= MACCFG1_TX_FLOW;
+			}
+
 			if (priv->rx_pause_en)
 				tempval |= MACCFG1_RX_FLOW;
 			gfar_write(&regs->maccfg1, tempval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 15f289f..a65bc43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -33,8 +33,8 @@
  * This file needs to comply with the Linux Kernel coding style.
  */
 
-#define I40E_FW_API_VERSION_MAJOR  0x0001
-#define I40E_FW_API_VERSION_MINOR  0x0002
+#define I40E_FW_API_VERSION_MAJOR	0x0001
+#define I40E_FW_API_VERSION_MINOR	0x0002
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -66,216 +66,216 @@
  */
 
 /* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT  0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT  9
-#define I40E_AQ_FLAG_RD_SHIFT  10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT  13
-#define I40E_AQ_FLAG_EI_SHIFT  14
-#define I40E_AQ_FLAG_FE_SHIFT  15
+#define I40E_AQ_FLAG_DD_SHIFT	0
+#define I40E_AQ_FLAG_CMP_SHIFT	1
+#define I40E_AQ_FLAG_ERR_SHIFT	2
+#define I40E_AQ_FLAG_VFE_SHIFT	3
+#define I40E_AQ_FLAG_LB_SHIFT	9
+#define I40E_AQ_FLAG_RD_SHIFT	10
+#define I40E_AQ_FLAG_VFC_SHIFT	11
+#define I40E_AQ_FLAG_BUF_SHIFT	12
+#define I40E_AQ_FLAG_SI_SHIFT	13
+#define I40E_AQ_FLAG_EI_SHIFT	14
+#define I40E_AQ_FLAG_FE_SHIFT	15
 
-#define I40E_AQ_FLAG_DD  (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB  (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD  (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI  (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI  (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE  (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD		(1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	(1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	(1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	(1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		(1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		(1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	(1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	(1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		(1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		(1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		(1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
-	I40E_AQ_RC_OK       = 0,    /* success */
-	I40E_AQ_RC_EPERM    = 1,    /* Operation not permitted */
-	I40E_AQ_RC_ENOENT   = 2,    /* No such element */
-	I40E_AQ_RC_ESRCH    = 3,    /* Bad opcode */
-	I40E_AQ_RC_EINTR    = 4,    /* operation interrupted */
-	I40E_AQ_RC_EIO      = 5,    /* I/O error */
-	I40E_AQ_RC_ENXIO    = 6,    /* No such resource */
-	I40E_AQ_RC_E2BIG    = 7,    /* Arg too long */
-	I40E_AQ_RC_EAGAIN   = 8,    /* Try again */
-	I40E_AQ_RC_ENOMEM   = 9,    /* Out of memory */
-	I40E_AQ_RC_EACCES   = 10,   /* Permission denied */
-	I40E_AQ_RC_EFAULT   = 11,   /* Bad address */
-	I40E_AQ_RC_EBUSY    = 12,   /* Device or resource busy */
-	I40E_AQ_RC_EEXIST   = 13,   /* object already exists */
-	I40E_AQ_RC_EINVAL   = 14,   /* Invalid argument */
-	I40E_AQ_RC_ENOTTY   = 15,   /* Not a typewriter */
-	I40E_AQ_RC_ENOSPC   = 16,   /* No space left or alloc failure */
-	I40E_AQ_RC_ENOSYS   = 17,   /* Function not implemented */
-	I40E_AQ_RC_ERANGE   = 18,   /* Parameter out of range */
-	I40E_AQ_RC_EFLUSHED = 19,   /* Cmd flushed because of prev cmd error */
-	I40E_AQ_RC_BAD_ADDR = 20,   /* Descriptor contains a bad pointer */
-	I40E_AQ_RC_EMODE    = 21,   /* Op not allowed in current dev mode */
-	I40E_AQ_RC_EFBIG    = 22,   /* File too large */
+	I40E_AQ_RC_OK		= 0,  /* success */
+	I40E_AQ_RC_EPERM	= 1,  /* Operation not permitted */
+	I40E_AQ_RC_ENOENT	= 2,  /* No such element */
+	I40E_AQ_RC_ESRCH	= 3,  /* Bad opcode */
+	I40E_AQ_RC_EINTR	= 4,  /* operation interrupted */
+	I40E_AQ_RC_EIO		= 5,  /* I/O error */
+	I40E_AQ_RC_ENXIO	= 6,  /* No such resource */
+	I40E_AQ_RC_E2BIG	= 7,  /* Arg too long */
+	I40E_AQ_RC_EAGAIN	= 8,  /* Try again */
+	I40E_AQ_RC_ENOMEM	= 9,  /* Out of memory */
+	I40E_AQ_RC_EACCES	= 10, /* Permission denied */
+	I40E_AQ_RC_EFAULT	= 11, /* Bad address */
+	I40E_AQ_RC_EBUSY	= 12, /* Device or resource busy */
+	I40E_AQ_RC_EEXIST	= 13, /* object already exists */
+	I40E_AQ_RC_EINVAL	= 14, /* Invalid argument */
+	I40E_AQ_RC_ENOTTY	= 15, /* Not a typewriter */
+	I40E_AQ_RC_ENOSPC	= 16, /* No space left or alloc failure */
+	I40E_AQ_RC_ENOSYS	= 17, /* Function not implemented */
+	I40E_AQ_RC_ERANGE	= 18, /* Parameter out of range */
+	I40E_AQ_RC_EFLUSHED	= 19, /* Cmd flushed due to prev cmd error */
+	I40E_AQ_RC_BAD_ADDR	= 20, /* Descriptor contains a bad pointer */
+	I40E_AQ_RC_EMODE	= 21, /* Op not allowed in current dev mode */
+	I40E_AQ_RC_EFBIG	= 22, /* File too large */
 };
 
 /* Admin Queue command opcodes */
 enum i40e_admin_queue_opc {
 	/* aq commands */
-	i40e_aqc_opc_get_version      = 0x0001,
-	i40e_aqc_opc_driver_version   = 0x0002,
-	i40e_aqc_opc_queue_shutdown   = 0x0003,
-	i40e_aqc_opc_set_pf_context   = 0x0004,
+	i40e_aqc_opc_get_version	= 0x0001,
+	i40e_aqc_opc_driver_version	= 0x0002,
+	i40e_aqc_opc_queue_shutdown	= 0x0003,
+	i40e_aqc_opc_set_pf_context	= 0x0004,
 
 	/* resource ownership */
-	i40e_aqc_opc_request_resource = 0x0008,
-	i40e_aqc_opc_release_resource = 0x0009,
+	i40e_aqc_opc_request_resource	= 0x0008,
+	i40e_aqc_opc_release_resource	= 0x0009,
 
-	i40e_aqc_opc_list_func_capabilities = 0x000A,
-	i40e_aqc_opc_list_dev_capabilities  = 0x000B,
+	i40e_aqc_opc_list_func_capabilities	= 0x000A,
+	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
 
-	i40e_aqc_opc_set_cppm_configuration = 0x0103,
-	i40e_aqc_opc_set_arp_proxy_entry    = 0x0104,
-	i40e_aqc_opc_set_ns_proxy_entry     = 0x0105,
+	i40e_aqc_opc_set_cppm_configuration	= 0x0103,
+	i40e_aqc_opc_set_arp_proxy_entry	= 0x0104,
+	i40e_aqc_opc_set_ns_proxy_entry		= 0x0105,
 
 	/* LAA */
-	i40e_aqc_opc_mng_laa                = 0x0106,   /* AQ obsolete */
-	i40e_aqc_opc_mac_address_read       = 0x0107,
-	i40e_aqc_opc_mac_address_write      = 0x0108,
+	i40e_aqc_opc_mng_laa		= 0x0106,   /* AQ obsolete */
+	i40e_aqc_opc_mac_address_read	= 0x0107,
+	i40e_aqc_opc_mac_address_write	= 0x0108,
 
 	/* PXE */
-	i40e_aqc_opc_clear_pxe_mode         = 0x0110,
+	i40e_aqc_opc_clear_pxe_mode	= 0x0110,
 
 	/* internal switch commands */
-	i40e_aqc_opc_get_switch_config         = 0x0200,
-	i40e_aqc_opc_add_statistics            = 0x0201,
-	i40e_aqc_opc_remove_statistics         = 0x0202,
-	i40e_aqc_opc_set_port_parameters       = 0x0203,
-	i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+	i40e_aqc_opc_get_switch_config		= 0x0200,
+	i40e_aqc_opc_add_statistics		= 0x0201,
+	i40e_aqc_opc_remove_statistics		= 0x0202,
+	i40e_aqc_opc_set_port_parameters	= 0x0203,
+	i40e_aqc_opc_get_switch_resource_alloc	= 0x0204,
 
-	i40e_aqc_opc_add_vsi                = 0x0210,
-	i40e_aqc_opc_update_vsi_parameters  = 0x0211,
-	i40e_aqc_opc_get_vsi_parameters     = 0x0212,
+	i40e_aqc_opc_add_vsi			= 0x0210,
+	i40e_aqc_opc_update_vsi_parameters	= 0x0211,
+	i40e_aqc_opc_get_vsi_parameters		= 0x0212,
 
-	i40e_aqc_opc_add_pv                = 0x0220,
-	i40e_aqc_opc_update_pv_parameters  = 0x0221,
-	i40e_aqc_opc_get_pv_parameters     = 0x0222,
+	i40e_aqc_opc_add_pv			= 0x0220,
+	i40e_aqc_opc_update_pv_parameters	= 0x0221,
+	i40e_aqc_opc_get_pv_parameters		= 0x0222,
 
-	i40e_aqc_opc_add_veb               = 0x0230,
-	i40e_aqc_opc_update_veb_parameters = 0x0231,
-	i40e_aqc_opc_get_veb_parameters    = 0x0232,
+	i40e_aqc_opc_add_veb			= 0x0230,
+	i40e_aqc_opc_update_veb_parameters	= 0x0231,
+	i40e_aqc_opc_get_veb_parameters		= 0x0232,
 
-	i40e_aqc_opc_delete_element  = 0x0243,
+	i40e_aqc_opc_delete_element		= 0x0243,
 
-	i40e_aqc_opc_add_macvlan                  = 0x0250,
-	i40e_aqc_opc_remove_macvlan               = 0x0251,
-	i40e_aqc_opc_add_vlan                     = 0x0252,
-	i40e_aqc_opc_remove_vlan                  = 0x0253,
-	i40e_aqc_opc_set_vsi_promiscuous_modes    = 0x0254,
-	i40e_aqc_opc_add_tag                      = 0x0255,
-	i40e_aqc_opc_remove_tag                   = 0x0256,
-	i40e_aqc_opc_add_multicast_etag           = 0x0257,
-	i40e_aqc_opc_remove_multicast_etag        = 0x0258,
-	i40e_aqc_opc_update_tag                   = 0x0259,
-	i40e_aqc_opc_add_control_packet_filter    = 0x025A,
-	i40e_aqc_opc_remove_control_packet_filter = 0x025B,
-	i40e_aqc_opc_add_cloud_filters            = 0x025C,
-	i40e_aqc_opc_remove_cloud_filters         = 0x025D,
+	i40e_aqc_opc_add_macvlan		= 0x0250,
+	i40e_aqc_opc_remove_macvlan		= 0x0251,
+	i40e_aqc_opc_add_vlan			= 0x0252,
+	i40e_aqc_opc_remove_vlan		= 0x0253,
+	i40e_aqc_opc_set_vsi_promiscuous_modes	= 0x0254,
+	i40e_aqc_opc_add_tag			= 0x0255,
+	i40e_aqc_opc_remove_tag			= 0x0256,
+	i40e_aqc_opc_add_multicast_etag		= 0x0257,
+	i40e_aqc_opc_remove_multicast_etag	= 0x0258,
+	i40e_aqc_opc_update_tag			= 0x0259,
+	i40e_aqc_opc_add_control_packet_filter	= 0x025A,
+	i40e_aqc_opc_remove_control_packet_filter	= 0x025B,
+	i40e_aqc_opc_add_cloud_filters		= 0x025C,
+	i40e_aqc_opc_remove_cloud_filters	= 0x025D,
 
-	i40e_aqc_opc_add_mirror_rule    = 0x0260,
-	i40e_aqc_opc_delete_mirror_rule = 0x0261,
+	i40e_aqc_opc_add_mirror_rule	= 0x0260,
+	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
 	/* DCB commands */
-	i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
-	i40e_aqc_opc_dcb_updated    = 0x0302,
+	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
+	i40e_aqc_opc_dcb_updated	= 0x0302,
 
 	/* TX scheduler */
-	i40e_aqc_opc_configure_vsi_bw_limit            = 0x0400,
-	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit    = 0x0406,
-	i40e_aqc_opc_configure_vsi_tc_bw               = 0x0407,
-	i40e_aqc_opc_query_vsi_bw_config               = 0x0408,
-	i40e_aqc_opc_query_vsi_ets_sla_config          = 0x040A,
-	i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
+	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit	= 0x0406,
+	i40e_aqc_opc_configure_vsi_tc_bw		= 0x0407,
+	i40e_aqc_opc_query_vsi_bw_config		= 0x0408,
+	i40e_aqc_opc_query_vsi_ets_sla_config		= 0x040A,
+	i40e_aqc_opc_configure_switching_comp_bw_limit	= 0x0410,
 
-	i40e_aqc_opc_enable_switching_comp_ets             = 0x0413,
-	i40e_aqc_opc_modify_switching_comp_ets             = 0x0414,
-	i40e_aqc_opc_disable_switching_comp_ets            = 0x0415,
-	i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
-	i40e_aqc_opc_configure_switching_comp_bw_config    = 0x0417,
-	i40e_aqc_opc_query_switching_comp_ets_config       = 0x0418,
-	i40e_aqc_opc_query_port_ets_config                 = 0x0419,
-	i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,
-	i40e_aqc_opc_suspend_port_tx                       = 0x041B,
-	i40e_aqc_opc_resume_port_tx                        = 0x041C,
-	i40e_aqc_opc_configure_partition_bw                = 0x041D,
+	i40e_aqc_opc_enable_switching_comp_ets			= 0x0413,
+	i40e_aqc_opc_modify_switching_comp_ets			= 0x0414,
+	i40e_aqc_opc_disable_switching_comp_ets			= 0x0415,
+	i40e_aqc_opc_configure_switching_comp_ets_bw_limit	= 0x0416,
+	i40e_aqc_opc_configure_switching_comp_bw_config		= 0x0417,
+	i40e_aqc_opc_query_switching_comp_ets_config		= 0x0418,
+	i40e_aqc_opc_query_port_ets_config			= 0x0419,
+	i40e_aqc_opc_query_switching_comp_bw_config		= 0x041A,
+	i40e_aqc_opc_suspend_port_tx				= 0x041B,
+	i40e_aqc_opc_resume_port_tx				= 0x041C,
+	i40e_aqc_opc_configure_partition_bw			= 0x041D,
 
 	/* hmc */
-	i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
-	i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
+	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
+	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
 
 	/* phy commands*/
-	i40e_aqc_opc_get_phy_abilities   = 0x0600,
-	i40e_aqc_opc_set_phy_config      = 0x0601,
-	i40e_aqc_opc_set_mac_config      = 0x0603,
-	i40e_aqc_opc_set_link_restart_an = 0x0605,
-	i40e_aqc_opc_get_link_status     = 0x0607,
-	i40e_aqc_opc_set_phy_int_mask    = 0x0613,
-	i40e_aqc_opc_get_local_advt_reg  = 0x0614,
-	i40e_aqc_opc_set_local_advt_reg  = 0x0615,
-	i40e_aqc_opc_get_partner_advt    = 0x0616,
-	i40e_aqc_opc_set_lb_modes        = 0x0618,
-	i40e_aqc_opc_get_phy_wol_caps    = 0x0621,
-	i40e_aqc_opc_set_phy_debug	 = 0x0622,
-	i40e_aqc_opc_upload_ext_phy_fm   = 0x0625,
+	i40e_aqc_opc_get_phy_abilities		= 0x0600,
+	i40e_aqc_opc_set_phy_config		= 0x0601,
+	i40e_aqc_opc_set_mac_config		= 0x0603,
+	i40e_aqc_opc_set_link_restart_an	= 0x0605,
+	i40e_aqc_opc_get_link_status		= 0x0607,
+	i40e_aqc_opc_set_phy_int_mask		= 0x0613,
+	i40e_aqc_opc_get_local_advt_reg		= 0x0614,
+	i40e_aqc_opc_set_local_advt_reg		= 0x0615,
+	i40e_aqc_opc_get_partner_advt		= 0x0616,
+	i40e_aqc_opc_set_lb_modes		= 0x0618,
+	i40e_aqc_opc_get_phy_wol_caps		= 0x0621,
+	i40e_aqc_opc_set_phy_debug		= 0x0622,
+	i40e_aqc_opc_upload_ext_phy_fm		= 0x0625,
 
 	/* NVM commands */
-	i40e_aqc_opc_nvm_read         = 0x0701,
-	i40e_aqc_opc_nvm_erase        = 0x0702,
-	i40e_aqc_opc_nvm_update       = 0x0703,
-	i40e_aqc_opc_nvm_config_read  = 0x0704,
-	i40e_aqc_opc_nvm_config_write = 0x0705,
+	i40e_aqc_opc_nvm_read			= 0x0701,
+	i40e_aqc_opc_nvm_erase			= 0x0702,
+	i40e_aqc_opc_nvm_update			= 0x0703,
+	i40e_aqc_opc_nvm_config_read		= 0x0704,
+	i40e_aqc_opc_nvm_config_write		= 0x0705,
 
 	/* virtualization commands */
-	i40e_aqc_opc_send_msg_to_pf   = 0x0801,
-	i40e_aqc_opc_send_msg_to_vf   = 0x0802,
-	i40e_aqc_opc_send_msg_to_peer = 0x0803,
+	i40e_aqc_opc_send_msg_to_pf		= 0x0801,
+	i40e_aqc_opc_send_msg_to_vf		= 0x0802,
+	i40e_aqc_opc_send_msg_to_peer		= 0x0803,
 
 	/* alternate structure */
-	i40e_aqc_opc_alternate_write          = 0x0900,
-	i40e_aqc_opc_alternate_write_indirect = 0x0901,
-	i40e_aqc_opc_alternate_read           = 0x0902,
-	i40e_aqc_opc_alternate_read_indirect  = 0x0903,
-	i40e_aqc_opc_alternate_write_done     = 0x0904,
-	i40e_aqc_opc_alternate_set_mode       = 0x0905,
-	i40e_aqc_opc_alternate_clear_port     = 0x0906,
+	i40e_aqc_opc_alternate_write		= 0x0900,
+	i40e_aqc_opc_alternate_write_indirect	= 0x0901,
+	i40e_aqc_opc_alternate_read		= 0x0902,
+	i40e_aqc_opc_alternate_read_indirect	= 0x0903,
+	i40e_aqc_opc_alternate_write_done	= 0x0904,
+	i40e_aqc_opc_alternate_set_mode		= 0x0905,
+	i40e_aqc_opc_alternate_clear_port	= 0x0906,
 
 	/* LLDP commands */
-	i40e_aqc_opc_lldp_get_mib    = 0x0A00,
-	i40e_aqc_opc_lldp_update_mib = 0x0A01,
-	i40e_aqc_opc_lldp_add_tlv    = 0x0A02,
-	i40e_aqc_opc_lldp_update_tlv = 0x0A03,
-	i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
-	i40e_aqc_opc_lldp_stop       = 0x0A05,
-	i40e_aqc_opc_lldp_start      = 0x0A06,
+	i40e_aqc_opc_lldp_get_mib	= 0x0A00,
+	i40e_aqc_opc_lldp_update_mib	= 0x0A01,
+	i40e_aqc_opc_lldp_add_tlv	= 0x0A02,
+	i40e_aqc_opc_lldp_update_tlv	= 0x0A03,
+	i40e_aqc_opc_lldp_delete_tlv	= 0x0A04,
+	i40e_aqc_opc_lldp_stop		= 0x0A05,
+	i40e_aqc_opc_lldp_start		= 0x0A06,
 
 	/* Tunnel commands */
-	i40e_aqc_opc_add_udp_tunnel       = 0x0B00,
-	i40e_aqc_opc_del_udp_tunnel       = 0x0B01,
-	i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
+	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
+	i40e_aqc_opc_tunnel_key_structure	= 0x0B10,
 
 	/* Async Events */
-	i40e_aqc_opc_event_lan_overflow = 0x1001,
+	i40e_aqc_opc_event_lan_overflow		= 0x1001,
 
 	/* OEM commands */
-	i40e_aqc_opc_oem_parameter_change     = 0xFE00,
-	i40e_aqc_opc_oem_device_status_change = 0xFE01,
+	i40e_aqc_opc_oem_parameter_change	= 0xFE00,
+	i40e_aqc_opc_oem_device_status_change	= 0xFE01,
 
 	/* debug commands */
-	i40e_aqc_opc_debug_get_deviceid     = 0xFF00,
-	i40e_aqc_opc_debug_set_mode         = 0xFF01,
-	i40e_aqc_opc_debug_read_reg         = 0xFF03,
-	i40e_aqc_opc_debug_write_reg        = 0xFF04,
-	i40e_aqc_opc_debug_modify_reg       = 0xFF07,
-	i40e_aqc_opc_debug_dump_internals   = 0xFF08,
-	i40e_aqc_opc_debug_modify_internals = 0xFF09,
+	i40e_aqc_opc_debug_get_deviceid		= 0xFF00,
+	i40e_aqc_opc_debug_set_mode		= 0xFF01,
+	i40e_aqc_opc_debug_read_reg		= 0xFF03,
+	i40e_aqc_opc_debug_write_reg		= 0xFF04,
+	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
+	i40e_aqc_opc_debug_dump_internals	= 0xFF08,
+	i40e_aqc_opc_debug_modify_internals	= 0xFF09,
 };
 
 /* command structures and indirect data structures */
@@ -302,7 +302,7 @@
 /* This macro is used extensively to ensure that command structures are 16
  * bytes in length as they have to map to the raw array of that size.
  */
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+#define I40E_CHECK_CMD_LENGTH(X)	I40E_CHECK_STRUCT_LEN(16, X)
 
 /* internal (0x00XX) commands */
 
@@ -320,22 +320,22 @@
 
 /* Send driver version (indirect 0x0002) */
 struct i40e_aqc_driver_version {
-	u8     driver_major_ver;
-	u8     driver_minor_ver;
-	u8     driver_build_ver;
-	u8     driver_subbuild_ver;
-	u8     reserved[4];
-	__le32 address_high;
-	__le32 address_low;
+	u8	driver_major_ver;
+	u8	driver_minor_ver;
+	u8	driver_build_ver;
+	u8	driver_subbuild_ver;
+	u8	reserved[4];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
 
 /* Queue Shutdown (direct 0x0003) */
 struct i40e_aqc_queue_shutdown {
-	__le32     driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING    0x1
-	u8     reserved[12];
+	__le32	driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING	0x1
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
@@ -351,19 +351,19 @@
 /* Request resource ownership (direct 0x0008)
  * Release resource ownership (direct 0x0009)
  */
-#define I40E_AQ_RESOURCE_NVM               1
-#define I40E_AQ_RESOURCE_SDP               2
-#define I40E_AQ_RESOURCE_ACCESS_READ       1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE      2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT  3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+#define I40E_AQ_RESOURCE_NVM			1
+#define I40E_AQ_RESOURCE_SDP			2
+#define I40E_AQ_RESOURCE_ACCESS_READ		1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE		2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT	3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT	180000
 
 struct i40e_aqc_request_resource {
-	__le16 resource_id;
-	__le16 access_type;
-	__le32 timeout;
-	__le32 resource_number;
-	u8     reserved[4];
+	__le16	resource_id;
+	__le16	access_type;
+	__le32	timeout;
+	__le32	resource_number;
+	u8	reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
@@ -373,7 +373,7 @@
  */
 struct i40e_aqc_list_capabilites {
 	u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN     1
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN	1
 	u8 pf_index;
 	u8 reserved[2];
 	__le32 count;
@@ -384,123 +384,123 @@
 I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
 
 struct i40e_aqc_list_capabilities_element_resp {
-	__le16 id;
-	u8     major_rev;
-	u8     minor_rev;
-	__le32 number;
-	__le32 logical_id;
-	__le32 phys_id;
-	u8     reserved[16];
+	__le16	id;
+	u8	major_rev;
+	u8	minor_rev;
+	__le32	number;
+	__le32	logical_id;
+	__le32	phys_id;
+	u8	reserved[16];
 };
 
 /* list of caps */
 
-#define I40E_AQ_CAP_ID_SWITCH_MODE      0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE         0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE      0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP       0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID  0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM    0x0006
-#define I40E_AQ_CAP_ID_SRIOV            0x0012
-#define I40E_AQ_CAP_ID_VF               0x0013
-#define I40E_AQ_CAP_ID_VMDQ             0x0014
-#define I40E_AQ_CAP_ID_8021QBG          0x0015
-#define I40E_AQ_CAP_ID_8021QBR          0x0016
-#define I40E_AQ_CAP_ID_VSI              0x0017
-#define I40E_AQ_CAP_ID_DCB              0x0018
-#define I40E_AQ_CAP_ID_FCOE             0x0021
-#define I40E_AQ_CAP_ID_RSS              0x0040
-#define I40E_AQ_CAP_ID_RXQ              0x0041
-#define I40E_AQ_CAP_ID_TXQ              0x0042
-#define I40E_AQ_CAP_ID_MSIX             0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX          0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR    0x0045
-#define I40E_AQ_CAP_ID_1588             0x0046
-#define I40E_AQ_CAP_ID_IWARP            0x0051
-#define I40E_AQ_CAP_ID_LED              0x0061
-#define I40E_AQ_CAP_ID_SDP              0x0062
-#define I40E_AQ_CAP_ID_MDIO             0x0063
-#define I40E_AQ_CAP_ID_FLEX10           0x00F1
-#define I40E_AQ_CAP_ID_CEM              0x00F2
+#define I40E_AQ_CAP_ID_SWITCH_MODE	0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE		0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE	0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP	0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID	0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM	0x0006
+#define I40E_AQ_CAP_ID_SRIOV		0x0012
+#define I40E_AQ_CAP_ID_VF		0x0013
+#define I40E_AQ_CAP_ID_VMDQ		0x0014
+#define I40E_AQ_CAP_ID_8021QBG		0x0015
+#define I40E_AQ_CAP_ID_8021QBR		0x0016
+#define I40E_AQ_CAP_ID_VSI		0x0017
+#define I40E_AQ_CAP_ID_DCB		0x0018
+#define I40E_AQ_CAP_ID_FCOE		0x0021
+#define I40E_AQ_CAP_ID_RSS		0x0040
+#define I40E_AQ_CAP_ID_RXQ		0x0041
+#define I40E_AQ_CAP_ID_TXQ		0x0042
+#define I40E_AQ_CAP_ID_MSIX		0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX		0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR	0x0045
+#define I40E_AQ_CAP_ID_1588		0x0046
+#define I40E_AQ_CAP_ID_IWARP		0x0051
+#define I40E_AQ_CAP_ID_LED		0x0061
+#define I40E_AQ_CAP_ID_SDP		0x0062
+#define I40E_AQ_CAP_ID_MDIO		0x0063
+#define I40E_AQ_CAP_ID_FLEX10		0x00F1
+#define I40E_AQ_CAP_ID_CEM		0x00F2
 
 /* Set CPPM Configuration (direct 0x0103) */
 struct i40e_aqc_cppm_configuration {
-	__le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC    0x0800
-#define I40E_AQ_CPPM_EN_DMCTH   0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX  0x2000
-#define I40E_AQ_CPPM_EN_HPTC    0x4000
-#define I40E_AQ_CPPM_EN_DMARC   0x8000
-	__le16 ttlx;
-	__le32 dmacr;
-	__le16 dmcth;
-	u8     hptc;
-	u8     reserved;
-	__le32 pfltrc;
+	__le16	command_flags;
+#define I40E_AQ_CPPM_EN_LTRC	0x0800
+#define I40E_AQ_CPPM_EN_DMCTH	0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX	0x2000
+#define I40E_AQ_CPPM_EN_HPTC	0x4000
+#define I40E_AQ_CPPM_EN_DMARC	0x8000
+	__le16	ttlx;
+	__le32	dmacr;
+	__le16	dmcth;
+	u8	hptc;
+	u8	reserved;
+	__le32	pfltrc;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
 
 /* Set ARP Proxy command / response (indirect 0x0104) */
 struct i40e_aqc_arp_proxy_data {
-	__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4           0x0008
-#define I40E_AQ_ARP_UNSUP_CTL           0x0010
-#define I40E_AQ_ARP_ENA                 0x0020
-#define I40E_AQ_ARP_ADD_IPV4            0x0040
-#define I40E_AQ_ARP_DEL_IPV4            0x0080
-	__le16 table_id;
-	__le32 pfpm_proxyfc;
-	__le32 ip_addr;
-	u8     mac_addr[6];
+	__le16	command_flags;
+#define I40E_AQ_ARP_INIT_IPV4	0x0008
+#define I40E_AQ_ARP_UNSUP_CTL	0x0010
+#define I40E_AQ_ARP_ENA		0x0020
+#define I40E_AQ_ARP_ADD_IPV4	0x0040
+#define I40E_AQ_ARP_DEL_IPV4	0x0080
+	__le16	table_id;
+	__le32	pfpm_proxyfc;
+	__le32	ip_addr;
+	u8	mac_addr[6];
 };
 
 /* Set NS Proxy Table Entry Command (indirect 0x0105) */
 struct i40e_aqc_ns_proxy_data {
-	__le16 table_idx_mac_addr_0;
-	__le16 table_idx_mac_addr_1;
-	__le16 table_idx_ipv6_0;
-	__le16 table_idx_ipv6_1;
-	__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0             0x0100
-#define I40E_AQ_NS_PROXY_DEL_0             0x0200
-#define I40E_AQ_NS_PROXY_ADD_1             0x0400
-#define I40E_AQ_NS_PROXY_DEL_1             0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0        0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0        0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1        0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1        0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ       0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL     0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL      0x0004
-	u8     mac_addr_0[6];
-	u8     mac_addr_1[6];
-	u8     local_mac_addr[6];
-	u8     ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
-	u8     ipv6_addr_1[16];
+	__le16	table_idx_mac_addr_0;
+	__le16	table_idx_mac_addr_1;
+	__le16	table_idx_ipv6_0;
+	__le16	table_idx_ipv6_1;
+	__le16	control;
+#define I40E_AQ_NS_PROXY_ADD_0		0x0100
+#define I40E_AQ_NS_PROXY_DEL_0		0x0200
+#define I40E_AQ_NS_PROXY_ADD_1		0x0400
+#define I40E_AQ_NS_PROXY_DEL_1		0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0	0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0	0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1	0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1	0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ	0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL	0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL	0x0004
+	u8	mac_addr_0[6];
+	u8	mac_addr_1[6];
+	u8	local_mac_addr[6];
+	u8	ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+	u8	ipv6_addr_1[16];
 };
 
 /* Manage LAA Command (0x0106) - obsolete */
 struct i40e_aqc_mng_laa {
 	__le16	command_flags;
-#define I40E_AQ_LAA_FLAG_WR   0x8000
-	u8     reserved[2];
-	__le32 sal;
-	__le16 sah;
-	u8     reserved2[6];
+#define I40E_AQ_LAA_FLAG_WR	0x8000
+	u8	reserved[2];
+	__le32	sal;
+	__le16	sah;
+	u8	reserved2[6];
 };
 
 /* Manage MAC Address Read Command (indirect 0x0107) */
 struct i40e_aqc_mac_address_read {
 	__le16	command_flags;
-#define I40E_AQC_LAN_ADDR_VALID   0x10
-#define I40E_AQC_SAN_ADDR_VALID   0x20
-#define I40E_AQC_PORT_ADDR_VALID  0x40
-#define I40E_AQC_WOL_ADDR_VALID   0x80
-#define I40E_AQC_ADDR_VALID_MASK  0xf0
-	u8     reserved[6];
-	__le32 addr_high;
-	__le32 addr_low;
+#define I40E_AQC_LAN_ADDR_VALID		0x10
+#define I40E_AQC_SAN_ADDR_VALID		0x20
+#define I40E_AQC_PORT_ADDR_VALID	0x40
+#define I40E_AQC_WOL_ADDR_VALID		0x80
+#define I40E_AQC_ADDR_VALID_MASK	0xf0
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
@@ -516,14 +516,14 @@
 
 /* Manage MAC Address Write Command (0x0108) */
 struct i40e_aqc_mac_address_write {
-	__le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY    0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL     0x4000
-#define I40E_AQC_WRITE_TYPE_PORT        0x8000
-#define I40E_AQC_WRITE_TYPE_MASK        0xc000
-	__le16 mac_sah;
-	__le32 mac_sal;
-	u8     reserved[8];
+	__le16	command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
+#define I40E_AQC_WRITE_TYPE_PORT	0x8000
+#define I40E_AQC_WRITE_TYPE_MASK	0xc000
+	__le16	mac_sah;
+	__le32	mac_sal;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
@@ -544,10 +544,10 @@
  * command
  */
 struct i40e_aqc_switch_seid {
-	__le16 seid;
-	u8     reserved[6];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	seid;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
@@ -556,34 +556,34 @@
  * uses i40e_aqc_switch_seid for the descriptor
  */
 struct i40e_aqc_get_switch_config_header_resp {
-	__le16 num_reported;
-	__le16 num_total;
-	u8     reserved[12];
+	__le16	num_reported;
+	__le16	num_total;
+	u8	reserved[12];
 };
 
 struct i40e_aqc_switch_config_element_resp {
-	u8     element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC        1
-#define I40E_AQ_SW_ELEM_TYPE_PF         2
-#define I40E_AQ_SW_ELEM_TYPE_VF         3
-#define I40E_AQ_SW_ELEM_TYPE_EMP        4
-#define I40E_AQ_SW_ELEM_TYPE_BMC        5
-#define I40E_AQ_SW_ELEM_TYPE_PV         16
-#define I40E_AQ_SW_ELEM_TYPE_VEB        17
-#define I40E_AQ_SW_ELEM_TYPE_PA         18
-#define I40E_AQ_SW_ELEM_TYPE_VSI        19
-	u8     revision;
-#define I40E_AQ_SW_ELEM_REV_1           1
-	__le16 seid;
-	__le16 uplink_seid;
-	__le16 downlink_seid;
-	u8     reserved[3];
-	u8     connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR       0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT       0x2
-#define I40E_AQ_CONN_TYPE_CASCADED      0x3
-	__le16 scheduler_id;
-	__le16 element_info;
+	u8	element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC	1
+#define I40E_AQ_SW_ELEM_TYPE_PF		2
+#define I40E_AQ_SW_ELEM_TYPE_VF		3
+#define I40E_AQ_SW_ELEM_TYPE_EMP	4
+#define I40E_AQ_SW_ELEM_TYPE_BMC	5
+#define I40E_AQ_SW_ELEM_TYPE_PV		16
+#define I40E_AQ_SW_ELEM_TYPE_VEB	17
+#define I40E_AQ_SW_ELEM_TYPE_PA		18
+#define I40E_AQ_SW_ELEM_TYPE_VSI	19
+	u8	revision;
+#define I40E_AQ_SW_ELEM_REV_1		1
+	__le16	seid;
+	__le16	uplink_seid;
+	__le16	downlink_seid;
+	u8	reserved[3];
+	u8	connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR	0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT	0x2
+#define I40E_AQ_CONN_TYPE_CASCADED	0x3
+	__le16	scheduler_id;
+	__le16	element_info;
 };
 
 /* Get Switch Configuration (indirect 0x0200)
@@ -591,73 +591,73 @@
  *    the first in the array is the header, remainder are elements
  */
 struct i40e_aqc_get_switch_config_resp {
-	struct i40e_aqc_get_switch_config_header_resp header;
-	struct i40e_aqc_switch_config_element_resp    element[1];
+	struct i40e_aqc_get_switch_config_header_resp	header;
+	struct i40e_aqc_switch_config_element_resp	element[1];
 };
 
 /* Add Statistics (direct 0x0201)
  * Remove Statistics (direct 0x0202)
  */
 struct i40e_aqc_add_remove_statistics {
-	__le16 seid;
-	__le16 vlan;
-	__le16 stat_index;
-	u8     reserved[10];
+	__le16	seid;
+	__le16	vlan;
+	__le16	stat_index;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
 
 /* Set Port Parameters command (direct 0x0203) */
 struct i40e_aqc_set_port_parameters {
-	__le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS   1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS  2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA    4
-	__le16 bad_frame_vsi;
-	__le16 default_seid;        /* reserved for command */
-	u8     reserved[10];
+	__le16	command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS	1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS	2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA	4
+	__le16	bad_frame_vsi;
+	__le16	default_seid;        /* reserved for command */
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
 
 /* Get Switch Resource Allocation (indirect 0x0204) */
 struct i40e_aqc_get_switch_resource_alloc {
-	u8     num_entries;         /* reserved for command */
-	u8     reserved[7];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	num_entries;         /* reserved for command */
+	u8	reserved[7];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
 
 /* expect an array of these structs in the response buffer */
 struct i40e_aqc_switch_resource_alloc_element_resp {
-	u8     resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB                 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI                 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR             0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG                0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG                0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH      0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH        0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN                0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY      0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY     0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL      0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE         0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS          0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS        0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS   0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS          0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS         0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS            0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS        0x13
-	u8     reserved1;
-	__le16 guaranteed;
-	__le16 total;
-	__le16 used;
-	__le16 total_unalloced;
-	u8     reserved2[6];
+	u8	resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB		0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI		0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR		0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG		0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG		0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH	0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH	0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN		0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY	0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY	0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL	0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE	0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS	0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS	0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS	0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS	0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS	0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS		0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS	0x13
+	u8	reserved1;
+	__le16	guaranteed;
+	__le16	total;
+	__le16	used;
+	__le16	total_unalloced;
+	u8	reserved2[6];
 };
 
 /* Add VSI (indirect 0x0210)
@@ -671,24 +671,24 @@
  *     uses the same completion and data structure as Add VSI
  */
 struct i40e_aqc_add_get_update_vsi {
-	__le16 uplink_seid;
-	u8     connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL            0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT           0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED          0x3
-	u8     reserved1;
-	u8     vf_id;
-	u8     reserved2;
-	__le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT          0x0
-#define I40E_AQ_VSI_TYPE_MASK           (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF             0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2          0x1
-#define I40E_AQ_VSI_TYPE_PF             0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	uplink_seid;
+	u8	connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL	0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT	0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED	0x3
+	u8	reserved1;
+	u8	vf_id;
+	u8	reserved2;
+	__le16	vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT		0x0
+#define I40E_AQ_VSI_TYPE_MASK		(0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF		0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2		0x1
+#define I40E_AQ_VSI_TYPE_PF		0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG	0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV	0x4
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
@@ -706,121 +706,121 @@
 
 struct i40e_aqc_vsi_properties_data {
 	/* first 96 byte are written by SW */
-	__le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID       0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID     0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID         0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID       0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID   0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID    0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID    0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID    0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID     0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID        0x0200
+	__le16	valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID		0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID		0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID		0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID		0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID	0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID	0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID	0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID	0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID		0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID		0x0200
 	/* switch section */
-	__le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT             0x0000
-#define I40E_AQ_VSI_SW_ID_MASK              (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG     0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB     0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB     0x4000
-	u8     sw_reserved[2];
+	__le16	switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT		0x0000
+#define I40E_AQ_VSI_SW_ID_MASK		(0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG	0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB	0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB	0x4000
+	u8	sw_reserved[2];
 	/* security section */
-	u8     sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD    0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK    0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK     0x04
-	u8     sec_reserved;
+	u8	sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD	0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK	0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK	0x04
+	u8	sec_reserved;
 	/* VLAN section */
-	__le16 pvid; /* VLANS include priority bits */
-	__le16 fcoe_pvid;
-	u8     port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT        0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK         (0x03 << \
-						I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED       0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED     0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL          0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID       0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT        0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK         (0x3 << \
-					I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH     0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP       0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR          0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING      0x18
-	u8     pvlan_reserved[3];
+	__le16	pvid; /* VLANS include priority bits */
+	__le16	fcoe_pvid;
+	u8	port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
+					 I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED	0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED	0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL	0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID	0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT	0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << \
+					 I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH	0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
+	u8	pvlan_reserved[3];
 	/* ingress egress up sections */
-	__le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT      0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT      3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT      6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT      9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT      12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT      15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT      18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT      21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
-	__le32 egress_table;   /* same defines as for ingress table */
+	__le32	ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT	3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT	6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT	9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT	12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT	15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT	18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT	21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+	__le32	egress_table;   /* same defines as for ingress table */
 	/* cascaded PV section */
-	__le16 cas_pv_tag;
-	u8     cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT      0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK       (0x03 << \
-						I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE      0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE     0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY       0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG      0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE      0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
-	u8     cas_pv_reserved;
+	__le16	cas_pv_tag;
+	u8	cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK		(0x03 << \
+						 I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE		0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY		0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG		0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE		0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG	0x40
+	u8	cas_pv_reserved;
 	/* queue mapping section */
-	__le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG          0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG       0x1
-	__le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT             0x0
-#define I40E_AQ_VSI_QUEUE_MASK              (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
-	__le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT     0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK      (0x1FF << \
-						I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT     9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK      (0x7 << \
-						I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+	__le16	mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG	0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG	0x1
+	__le16	queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT		0x0
+#define I40E_AQ_VSI_QUEUE_MASK		(0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+	__le16	tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT	0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK	(0x1FF << \
+					 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT	9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK	(0x7 << \
+					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
 	/* queueing option section */
-	u8     queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA         0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA        0x20
-	u8     queueing_opt_reserved[3];
+	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+	u8	queueing_opt_reserved[3];
 	/* scheduler section */
-	u8     up_enable_bits;
-	u8     sched_reserved;
+	u8	up_enable_bits;
+	u8	sched_reserved;
 	/* outer up section */
-	__le32 outer_up_table; /* same structure and defines as ingress table */
-	u8     cmd_reserved[8];
+	__le32	outer_up_table; /* same structure and defines as ingress tbl */
+	u8	cmd_reserved[8];
 	/* last 32 bytes are written by FW */
-	__le16 qs_handle[8];
+	__le16	qs_handle[8];
 #define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
-	__le16 stat_counter_idx;
-	__le16 sched_id;
-	u8     resp_reserved[12];
+	__le16	stat_counter_idx;
+	__le16	sched_id;
+	u8	resp_reserved[12];
 };
 
 I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
@@ -830,26 +830,26 @@
  * (IS_CTRL_PORT only works on add PV)
  */
 struct i40e_aqc_add_update_pv {
-	__le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE                0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN    0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN    0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT           0x8
-	__le16 uplink_seid;
-	__le16 connected_seid;
-	u8     reserved[10];
+	__le16	command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE		0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN	0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN	0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT		0x8
+	__le16	uplink_seid;
+	__le16	connected_seid;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
 
 struct i40e_aqc_add_update_pv_completion {
 	/* reserved for update; for add also encodes error if rc == ENOSPC */
-	__le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV               0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED            0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER          0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY            0x8
-	u8     reserved[14];
+	__le16	pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV	0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED	0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER	0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY	0x8
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
@@ -859,48 +859,48 @@
  */
 
 struct i40e_aqc_get_pv_params_completion {
-	__le16 seid;
-	__le16 default_stag;
-	__le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE            0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG  0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG  0x4
-	u8     reserved[8];
-	__le16 default_port_seid;
+	__le16	seid;
+	__le16	default_stag;
+	__le16	pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE			0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG	0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG	0x4
+	u8	reserved[8];
+	__le16	default_port_seid;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
 
 /* Add VEB (direct 0x0230) */
 struct i40e_aqc_add_veb {
-	__le16 uplink_seid;
-	__le16 downlink_seid;
-	__le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING           0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT    1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK     (0x3 << \
+	__le16	uplink_seid;
+	__le16	downlink_seid;
+	__le16	veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING		0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT	1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK		(0x3 << \
 					I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT  0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA     0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER   0x8
-	u8     enable_tcs;
-	u8     reserved[9];
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT	0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA		0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER	0x8
+	u8	enable_tcs;
+	u8	reserved[9];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
 
 struct i40e_aqc_add_veb_completion {
-	u8     reserved[6];
-	__le16 switch_seid;
+	u8	reserved[6];
+	__le16	switch_seid;
 	/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
-	__le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB              0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED            0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER          0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY            0x8
-	__le16 statistic_index;
-	__le16 vebs_used;
-	__le16 vebs_free;
+	__le16	veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB		0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED		0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER	0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY		0x8
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
@@ -909,13 +909,13 @@
  * uses i40e_aqc_switch_seid for the descriptor
  */
 struct i40e_aqc_get_veb_parameters_completion {
-	__le16 seid;
-	__le16 switch_id;
-	__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
-	__le16 statistic_index;
-	__le16 vebs_used;
-	__le16 vebs_free;
-	u8     reserved[4];
+	__le16	seid;
+	__le16	switch_id;
+	__le16	veb_flags; /* only the first/last flags from 0x0230 is valid */
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
+	u8	reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
@@ -928,37 +928,37 @@
 
 /* used for the command for most vlan commands */
 struct i40e_aqc_macvlan {
-	__le16 num_addresses;
-	__le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	num_addresses;
+	__le16	seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID      0x8000
-	__le32 addr_high;
-	__le32 addr_low;
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID		0x8000
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
 
 /* indirect data for command and response */
 struct i40e_aqc_add_macvlan_element_data {
-	u8     mac_addr[6];
-	__le16 vlan_tag;
-	__le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH     0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH        0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
-	__le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT  0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK   (0x7FF << \
+	u8	mac_addr[6];
+	__le16	vlan_tag;
+	__le16	flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH	0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH		0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN	0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE		0x0008
+	__le16	queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT	0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK		(0x7FF << \
 					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
 	/* response section */
-	u8     match_method;
-#define I40E_AQC_MM_PERFECT_MATCH             0x01
-#define I40E_AQC_MM_HASH_MATCH                0x02
-#define I40E_AQC_MM_ERR_NO_RES                0xFF
-	u8     reserved1[3];
+	u8	match_method;
+#define I40E_AQC_MM_PERFECT_MATCH	0x01
+#define I40E_AQC_MM_HASH_MATCH		0x02
+#define I40E_AQC_MM_ERR_NO_RES		0xFF
+	u8	reserved1[3];
 };
 
 struct i40e_aqc_add_remove_macvlan_completion {
@@ -978,19 +978,19 @@
  */
 
 struct i40e_aqc_remove_macvlan_element_data {
-	u8     mac_addr[6];
-	__le16 vlan_tag;
-	u8     flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH      0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH         0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN        0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS           0x10
-	u8     reserved[3];
+	u8	mac_addr[6];
+	__le16	vlan_tag;
+	u8	flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH	0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH		0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN	0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS		0x10
+	u8	reserved[3];
 	/* reply section */
-	u8     error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS         0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL            0xFF
-	u8     reply_reserved[3];
+	u8	error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS		0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL		0xFF
+	u8	reply_reserved[3];
 };
 
 /* Add VLAN (indirect 0x0252)
@@ -998,59 +998,58 @@
  * use the generic i40e_aqc_macvlan for the command
  */
 struct i40e_aqc_add_remove_vlan_element_data {
-	__le16 vlan_tag;
-	u8     vlan_flags;
+	__le16	vlan_tag;
+	u8	vlan_flags;
 /* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL             0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT       1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK        (0x3 << \
-						I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR     0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY     0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY   0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT           3
-#define I40E_AQC_VLAN_PTYPE_MASK            (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI     0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI     0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI   0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI    0x18
+#define I40E_AQC_ADD_VLAN_LOCAL			0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT		1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK	(0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR		0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY		0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY	0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT		3
+#define I40E_AQC_VLAN_PTYPE_MASK	(0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI		0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI		0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI	0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI	0x18
 /* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL            0x1
-	u8     reserved;
-	u8     result;
+#define I40E_AQC_REMOVE_VLAN_ALL	0x1
+	u8	reserved;
+	u8	result;
 /* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS       0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST  0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+#define I40E_AQC_ADD_VLAN_SUCCESS	0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST	0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE	0xFF
 /* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS    0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL       0xFF
-	u8     reserved1[3];
+#define I40E_AQC_REMOVE_VLAN_SUCCESS	0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL	0xFF
+	u8	reserved1[3];
 };
 
 struct i40e_aqc_add_remove_vlan_completion {
-	u8     reserved[4];
-	__le16 vlans_used;
-	__le16 vlans_free;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved[4];
+	__le16	vlans_used;
+	__le16	vlans_free;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 /* Set VSI Promiscuous Modes (direct 0x0254) */
 struct i40e_aqc_set_vsi_promiscuous_modes {
-	__le16 promiscuous_flags;
-	__le16 valid_flags;
+	__le16	promiscuous_flags;
+	__le16	valid_flags;
 /* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST     0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST   0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST   0x04
-#define I40E_AQC_SET_VSI_DEFAULT             0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
-	__le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
-	__le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_VALID          0x8000
-	u8     reserved[8];
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST	0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST	0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST	0x04
+#define I40E_AQC_SET_VSI_DEFAULT		0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN		0x10
+	__le16	seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
+	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1059,23 +1058,23 @@
  * Uses generic i40e_aqc_add_remove_tag_completion for completion
  */
 struct i40e_aqc_add_tag {
-	__le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE     0x0001
-	__le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE		0x0001
+	__le16	seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
-	__le16 tag;
-	__le16 queue_number;
-	u8     reserved[8];
+	__le16	tag;
+	__le16	queue_number;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
 
 struct i40e_aqc_add_remove_tag_completion {
-	u8     reserved[12];
-	__le16 tags_used;
-	__le16 tags_free;
+	u8	reserved[12];
+	__le16	tags_used;
+	__le16	tags_free;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
@@ -1084,12 +1083,12 @@
  * Uses generic i40e_aqc_add_remove_tag_completion for completion
  */
 struct i40e_aqc_remove_tag {
-	__le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
-	__le16 tag;
-	u8     reserved[12];
+	__le16	tag;
+	u8	reserved[12];
 };
 
 /* Add multicast E-Tag (direct 0x0257)
@@ -1097,22 +1096,22 @@
  * and no external data
  */
 struct i40e_aqc_add_remove_mcast_etag {
-	__le16 pv_seid;
-	__le16 etag;
-	u8     num_unicast_etags;
-	u8     reserved[3];
-	__le32 addr_high;          /* address of array of 2-byte s-tags */
-	__le32 addr_low;
+	__le16	pv_seid;
+	__le16	etag;
+	u8	num_unicast_etags;
+	u8	reserved[3];
+	__le32	addr_high;          /* address of array of 2-byte s-tags */
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
 
 struct i40e_aqc_add_remove_mcast_etag_completion {
-	u8     reserved[4];
-	__le16 mcast_etags_used;
-	__le16 mcast_etags_free;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved[4];
+	__le16	mcast_etags_used;
+	__le16	mcast_etags_free;
+	__le32	addr_high;
+	__le32	addr_low;
 
 };
 
@@ -1120,21 +1119,21 @@
 
 /* Update S/E-Tag (direct 0x0259) */
 struct i40e_aqc_update_tag {
-	__le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
-	__le16 old_tag;
-	__le16 new_tag;
-	u8     reserved[10];
+	__le16	old_tag;
+	__le16	new_tag;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
 
 struct i40e_aqc_update_tag_completion {
-	u8     reserved[12];
-	__le16 tags_used;
-	__le16 tags_free;
+	u8	reserved[12];
+	__le16	tags_used;
+	__le16	tags_free;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
@@ -1145,30 +1144,30 @@
  * and the generic direct completion structure
  */
 struct i40e_aqc_add_remove_control_packet_filter {
-	u8     mac[6];
-	__le16 etype;
-	__le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC    0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP          0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE      0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX            0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX            0x0000
-	__le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK   (0x3FF << \
+	u8	mac[6];
+	__le16	etype;
+	__le16	flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC	0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP		0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE	0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX		0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX		0x0000
+	__le16	seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK	(0x3FF << \
 				I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
-	__le16 queue;
-	u8     reserved[2];
+	__le16	queue;
+	u8	reserved[2];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
 
 struct i40e_aqc_add_remove_control_packet_filter_completion {
-	__le16 mac_etype_used;
-	__le16 etype_used;
-	__le16 mac_etype_free;
-	__le16 etype_free;
-	u8     reserved[8];
+	__le16	mac_etype_used;
+	__le16	etype_used;
+	__le16	mac_etype_free;
+	__le16	etype_free;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
@@ -1179,23 +1178,23 @@
  * and the generic indirect completion structure
  */
 struct i40e_aqc_add_remove_cloud_filters {
-	u8     num_filters;
-	u8     reserved;
-	__le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK   (0x3FF << \
+	u8	num_filters;
+	u8	reserved;
+	__le16	seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
-	u8     reserved2[4];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved2[4];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
 
 struct i40e_aqc_add_remove_cloud_filters_element_data {
-	u8     outer_mac[6];
-	u8     inner_mac[6];
-	__le16 inner_vlan;
+	u8	outer_mac[6];
+	u8	inner_mac[6];
+	__le16	inner_vlan;
 	union {
 		struct {
 			u8 reserved[12];
@@ -1205,49 +1204,49 @@
 			u8 data[16];
 		} v6;
 	} ipaddr;
-	__le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK                  (0x3F << \
+	__le16	flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT			0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK			(0x3F << \
 					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
 /* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP                   0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP			0x0001
 /* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN            0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID     0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN		0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID	0x0004
 /* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID           0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID		0x0006
 /* 0x0007 reserved */
 /* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC                  0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC                  0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC      0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP                   0x000C
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC			0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC			0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC	0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP			0x000C
 
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE               0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT                    6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK                     0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                   0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                   0x0100
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE		0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT			6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK			0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4			0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6			0x0100
 
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT               9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK                0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN               0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC          1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                  3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT		9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK		0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN		0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC		1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE			2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP			3
 
-	__le32 tenant_id;
-	u8     reserved[4];
-	__le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK                   (0x3F << \
-					I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
-	u8     reserved2[14];
+	__le32	tenant_id;
+	u8	reserved[4];
+	__le16	queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT		0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK		(0x3F << \
+						 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+	u8	reserved2[14];
 	/* response section */
-	u8     allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS         0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL            0xFF
-	u8     response_reserved[7];
+	u8	allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS	0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL		0xFF
+	u8	response_reserved[7];
 };
 
 struct i40e_aqc_remove_cloud_filters_completion {
@@ -1269,14 +1268,14 @@
 struct i40e_aqc_add_delete_mirror_rule {
 	__le16 seid;
 	__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT            0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK             (0x7 << \
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT		0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK		(0x7 << \
 						I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS    1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS     2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN             3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS      4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS       5
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS	1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS	2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN		3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS	4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS	5
 	__le16 num_entries;
 	__le16 destination;  /* VSI for add, rule id for delete */
 	__le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
@@ -1286,12 +1285,12 @@
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
 
 struct i40e_aqc_add_delete_mirror_rule_completion {
-	u8     reserved[2];
-	__le16 rule_id;  /* only used on add */
-	__le16 mirror_rules_used;
-	__le16 mirror_rules_free;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved[2];
+	__le16	rule_id;  /* only used on add */
+	__le16	mirror_rules_used;
+	__le16	mirror_rules_free;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
@@ -1302,11 +1301,11 @@
  *    the command and response use the same descriptor structure
  */
 struct i40e_aqc_pfc_ignore {
-	u8     tc_bitmap;
-	u8     command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET    0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR  0x0
-	u8     reserved[14];
+	u8	tc_bitmap;
+	u8	command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET		0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR	0x0
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
@@ -1321,10 +1320,10 @@
  * this generic struct to pass the SEID in param0
  */
 struct i40e_aqc_tx_sched_ind {
-	__le16 vsi_seid;
-	u8     reserved[6];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	vsi_seid;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
@@ -1336,12 +1335,12 @@
 
 /* Configure VSI BW limits (direct 0x0400) */
 struct i40e_aqc_configure_vsi_bw_limit {
-	__le16 vsi_seid;
-	u8     reserved[2];
-	__le16 credit;
-	u8     reserved1[2];
-	u8     max_credit; /* 0-3, limit = 2^max */
-	u8     reserved2[7];
+	__le16	vsi_seid;
+	u8	reserved[2];
+	__le16	credit;
+	u8	reserved1[2];
+	u8	max_credit; /* 0-3, limit = 2^max */
+	u8	reserved2[7];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
@@ -1350,58 +1349,58 @@
  *    responds with i40e_aqc_qs_handles_resp
  */
 struct i40e_aqc_configure_vsi_ets_sla_bw_data {
-	u8     tc_valid_bits;
-	u8     reserved[15];
-	__le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+	u8	tc_valid_bits;
+	u8	reserved[15];
+	__le16	tc_bw_credits[8]; /* FW writesback QS handles here */
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
-	u8     reserved1[28];
+	__le16	tc_bw_max[2];
+	u8	reserved1[28];
 };
 
 /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
  *    responds with i40e_aqc_qs_handles_resp
  */
 struct i40e_aqc_configure_vsi_tc_bw_data {
-	u8     tc_valid_bits;
-	u8     reserved[3];
-	u8     tc_bw_credits[8];
-	u8     reserved1[4];
-	__le16 qs_handles[8];
+	u8	tc_valid_bits;
+	u8	reserved[3];
+	u8	tc_bw_credits[8];
+	u8	reserved1[4];
+	__le16	qs_handles[8];
 };
 
 /* Query vsi bw configuration (indirect 0x0408) */
 struct i40e_aqc_query_vsi_bw_config_resp {
-	u8     tc_valid_bits;
-	u8     tc_suspended_bits;
-	u8     reserved[14];
-	__le16 qs_handles[8];
-	u8     reserved1[4];
-	__le16 port_bw_limit;
-	u8     reserved2[2];
-	u8     max_bw; /* 0-3, limit = 2^max */
-	u8     reserved3[23];
+	u8	tc_valid_bits;
+	u8	tc_suspended_bits;
+	u8	reserved[14];
+	__le16	qs_handles[8];
+	u8	reserved1[4];
+	__le16	port_bw_limit;
+	u8	reserved2[2];
+	u8	max_bw; /* 0-3, limit = 2^max */
+	u8	reserved3[23];
 };
 
 /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
 struct i40e_aqc_query_vsi_ets_sla_config_resp {
-	u8     tc_valid_bits;
-	u8     reserved[3];
-	u8     share_credits[8];
-	__le16 credits[8];
+	u8	tc_valid_bits;
+	u8	reserved[3];
+	u8	share_credits[8];
+	__le16	credits[8];
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
+	__le16	tc_bw_max[2];
 };
 
 /* Configure Switching Component Bandwidth Limit (direct 0x0410) */
 struct i40e_aqc_configure_switching_comp_bw_limit {
-	__le16 seid;
-	u8     reserved[2];
-	__le16 credit;
-	u8     reserved1[2];
-	u8     max_bw; /* 0-3, limit = 2^max */
-	u8     reserved2[7];
+	__le16	seid;
+	u8	reserved[2];
+	__le16	credit;
+	u8	reserved1[2];
+	u8	max_bw; /* 0-3, limit = 2^max */
+	u8	reserved2[7];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
@@ -1411,75 +1410,75 @@
  * Disable Physical Port ETS (indirect 0x0415)
  */
 struct i40e_aqc_configure_switching_comp_ets_data {
-	u8     reserved[4];
-	u8     tc_valid_bits;
-	u8     seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK     0x1
-	u8     tc_strict_priority_flags;
-	u8     reserved1[17];
-	u8     tc_bw_share_credits[8];
-	u8     reserved2[96];
+	u8	reserved[4];
+	u8	tc_valid_bits;
+	u8	seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK	0x1
+	u8	tc_strict_priority_flags;
+	u8	reserved1[17];
+	u8	tc_bw_share_credits[8];
+	u8	reserved2[96];
 };
 
 /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
 struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
-	u8     tc_valid_bits;
-	u8     reserved[15];
-	__le16 tc_bw_credit[8];
+	u8	tc_valid_bits;
+	u8	reserved[15];
+	__le16	tc_bw_credit[8];
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
-	u8     reserved1[28];
+	__le16	tc_bw_max[2];
+	u8	reserved1[28];
 };
 
 /* Configure Switching Component Bandwidth Allocation per Tc
  * (indirect 0x0417)
  */
 struct i40e_aqc_configure_switching_comp_bw_config_data {
-	u8     tc_valid_bits;
-	u8     reserved[2];
-	u8     absolute_credits; /* bool */
-	u8     tc_bw_share_credits[8];
-	u8     reserved1[20];
+	u8	tc_valid_bits;
+	u8	reserved[2];
+	u8	absolute_credits; /* bool */
+	u8	tc_bw_share_credits[8];
+	u8	reserved1[20];
 };
 
 /* Query Switching Component Configuration (indirect 0x0418) */
 struct i40e_aqc_query_switching_comp_ets_config_resp {
-	u8     tc_valid_bits;
-	u8     reserved[35];
-	__le16 port_bw_limit;
-	u8     reserved1[2];
-	u8     tc_bw_max; /* 0-3, limit = 2^max */
-	u8     reserved2[23];
+	u8	tc_valid_bits;
+	u8	reserved[35];
+	__le16	port_bw_limit;
+	u8	reserved1[2];
+	u8	tc_bw_max; /* 0-3, limit = 2^max */
+	u8	reserved2[23];
 };
 
 /* Query PhysicalPort ETS Configuration (indirect 0x0419) */
 struct i40e_aqc_query_port_ets_config_resp {
-	u8     reserved[4];
-	u8     tc_valid_bits;
-	u8     reserved1;
-	u8     tc_strict_priority_bits;
-	u8     reserved2;
-	u8     tc_bw_share_credits[8];
-	__le16 tc_bw_limits[8];
+	u8	reserved[4];
+	u8	tc_valid_bits;
+	u8	reserved1;
+	u8	tc_strict_priority_bits;
+	u8	reserved2;
+	u8	tc_bw_share_credits[8];
+	__le16	tc_bw_limits[8];
 
 	/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
-	u8     reserved3[32];
+	__le16	tc_bw_max[2];
+	u8	reserved3[32];
 };
 
 /* Query Switching Component Bandwidth Allocation per Traffic Type
  * (indirect 0x041A)
  */
 struct i40e_aqc_query_switching_comp_bw_config_resp {
-	u8     tc_valid_bits;
-	u8     reserved[2];
-	u8     absolute_credits_enable; /* bool */
-	u8     tc_bw_share_credits[8];
-	__le16 tc_bw_limits[8];
+	u8	tc_valid_bits;
+	u8	reserved[2];
+	u8	absolute_credits_enable; /* bool */
+	u8	tc_bw_share_credits[8];
+	__le16	tc_bw_limits[8];
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
+	__le16	tc_bw_max[2];
 };
 
 /* Suspend/resume port TX traffic
@@ -1490,37 +1489,37 @@
  * (indirect 0x041D)
  */
 struct i40e_aqc_configure_partition_bw_data {
-	__le16 pf_valid_bits;
-	u8     min_bw[16];      /* guaranteed bandwidth */
-	u8     max_bw[16];      /* bandwidth limit */
+	__le16	pf_valid_bits;
+	u8	min_bw[16];      /* guaranteed bandwidth */
+	u8	max_bw[16];      /* bandwidth limit */
 };
 
 /* Get and set the active HMC resource profile and status.
  * (direct 0x0500) and (direct 0x0501)
  */
 struct i40e_aq_get_set_hmc_resource_profile {
-	u8     pm_profile;
-	u8     pe_vf_enabled;
-	u8     reserved[14];
+	u8	pm_profile;
+	u8	pe_vf_enabled;
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
 
 enum i40e_aq_hmc_profile {
 	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-	I40E_HMC_PROFILE_DEFAULT     = 1,
-	I40E_HMC_PROFILE_FAVOR_VF    = 2,
-	I40E_HMC_PROFILE_EQUAL       = 3,
+	I40E_HMC_PROFILE_DEFAULT	= 1,
+	I40E_HMC_PROFILE_FAVOR_VF	= 2,
+	I40E_HMC_PROFILE_EQUAL		= 3,
 };
 
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK	0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK	0x3F
 
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES  0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES     0x0002
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES	0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES	0x0002
 
 enum i40e_aq_phy_type {
 	I40E_PHY_TYPE_SGMII			= 0x0,
@@ -1578,147 +1577,147 @@
 };
 
 struct i40e_aq_get_phy_abilities_resp {
-	__le32 phy_type;       /* bitmap using the above enum for offsets */
-	u8     link_speed;     /* bitmap using the above enum bit patterns */
-	u8     abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER        0x04
-#define I40E_AQ_PHY_LINK_ENABLED		  0x08
-#define I40E_AQ_PHY_AN_ENABLED			  0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20
-	__le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX       0x0002
-#define I40E_AQ_EEE_1000BASE_T       0x0004
-#define I40E_AQ_EEE_10GBASE_T        0x0008
-#define I40E_AQ_EEE_1000BASE_KX      0x0010
-#define I40E_AQ_EEE_10GBASE_KX4      0x0020
-#define I40E_AQ_EEE_10GBASE_KR       0x0040
-	__le32 eeer_val;
-	u8     d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA  0x01
-	u8     reserved[3];
-	u8     phy_id[4];
-	u8     module_type[3];
-	u8     qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS          16
-	struct i40e_aqc_module_desc  qualified_module[I40E_AQ_PHY_MAX_QMS];
+	__le32	phy_type;       /* bitmap using the above enum for offsets */
+	u8	link_speed;     /* bitmap using the above enum bit patterns */
+	u8	abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX	0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX	0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER	0x04
+#define I40E_AQ_PHY_LINK_ENABLED	0x08
+#define I40E_AQ_PHY_AN_ENABLED		0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL	0x20
+	__le16	eee_capability;
+#define I40E_AQ_EEE_100BASE_TX		0x0002
+#define I40E_AQ_EEE_1000BASE_T		0x0004
+#define I40E_AQ_EEE_10GBASE_T		0x0008
+#define I40E_AQ_EEE_1000BASE_KX		0x0010
+#define I40E_AQ_EEE_10GBASE_KX4		0x0020
+#define I40E_AQ_EEE_10GBASE_KR		0x0040
+	__le32	eeer_val;
+	u8	d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA	0x01
+	u8	reserved[3];
+	u8	phy_id[4];
+	u8	module_type[3];
+	u8	qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS		16
+	struct i40e_aqc_module_desc	qualified_module[I40E_AQ_PHY_MAX_QMS];
 };
 
 /* Set PHY Config (direct 0x0601) */
 struct i40e_aq_set_phy_config { /* same bits as above in all */
-	__le32 phy_type;
-	u8     link_speed;
-	u8     abilities;
+	__le32	phy_type;
+	u8	link_speed;
+	u8	abilities;
 /* bits 0-2 use the values from get_phy_abilities_resp */
 #define I40E_AQ_PHY_ENABLE_LINK		0x08
 #define I40E_AQ_PHY_ENABLE_AN		0x10
 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20
-	__le16 eee_capability;
-	__le32 eeer;
-	u8     low_power_ctrl;
-	u8     reserved[3];
+	__le16	eee_capability;
+	__le32	eeer;
+	u8	low_power_ctrl;
+	u8	reserved[3];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
 
 /* Set MAC Config command data structure (direct 0x0603) */
 struct i40e_aq_set_mac_config {
-	__le16 max_frame_size;
-	u8     params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN           0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK      0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT     3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE      0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX   0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX   0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX   0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX   0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX   0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX   0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX   0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX   0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX   0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX   0x1
-	u8     tx_timer_priority; /* bitmap */
-	__le16 tx_timer_value;
-	__le16 fc_refresh_threshold;
-	u8     reserved[8];
+	__le16	max_frame_size;
+	u8	params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN		0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK	0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT	3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE	0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX	0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX	0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX	0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX	0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX	0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX	0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX	0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX	0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX	0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX	0x1
+	u8	tx_timer_priority; /* bitmap */
+	__le16	tx_timer_value;
+	__le16	fc_refresh_threshold;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
 
 /* Restart Auto-Negotiation (direct 0x605) */
 struct i40e_aqc_set_link_restart_an {
-	u8     command;
-#define I40E_AQ_PHY_RESTART_AN  0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
-	u8     reserved[15];
+	u8	command;
+#define I40E_AQ_PHY_RESTART_AN	0x02
+#define I40E_AQ_PHY_LINK_ENABLE	0x04
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
 
 /* Get Link Status cmd & response data structure (direct 0x0607) */
 struct i40e_aqc_get_link_status {
-	__le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK             0x3
-#define I40E_AQ_LSE_NOP              0x0
-#define I40E_AQ_LSE_DISABLE          0x2
-#define I40E_AQ_LSE_ENABLE           0x3
+	__le16	command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK		0x3
+#define I40E_AQ_LSE_NOP			0x0
+#define I40E_AQ_LSE_DISABLE		0x2
+#define I40E_AQ_LSE_ENABLE		0x3
 /* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED       0x1
-	u8     phy_type;    /* i40e_aq_phy_type   */
-	u8     link_speed;  /* i40e_aq_link_speed */
-	u8     link_info;
-#define I40E_AQ_LINK_UP              0x01
-#define I40E_AQ_LINK_FAULT           0x02
-#define I40E_AQ_LINK_FAULT_TX        0x04
-#define I40E_AQ_LINK_FAULT_RX        0x08
-#define I40E_AQ_LINK_FAULT_REMOTE    0x10
-#define I40E_AQ_MEDIA_AVAILABLE      0x40
-#define I40E_AQ_SIGNAL_DETECT        0x80
-	u8     an_info;
-#define I40E_AQ_AN_COMPLETED         0x01
-#define I40E_AQ_LP_AN_ABILITY        0x02
-#define I40E_AQ_PD_FAULT             0x04
-#define I40E_AQ_FEC_EN               0x08
-#define I40E_AQ_PHY_LOW_POWER        0x10
-#define I40E_AQ_LINK_PAUSE_TX        0x20
-#define I40E_AQ_LINK_PAUSE_RX        0x40
-#define I40E_AQ_QUALIFIED_MODULE     0x80
-	u8     ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM  0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT        0x02
-#define I40E_AQ_LINK_TX_MASK         (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE       0x00
-#define I40E_AQ_LINK_TX_DRAINED      0x01
-#define I40E_AQ_LINK_TX_FLUSHED      0x03
-#define I40E_AQ_LINK_FORCED_40G      0x10
-	u8     loopback;         /* use defines from i40e_aqc_set_lb_mode */
-	__le16 max_frame_size;
-	u8     config;
-#define I40E_AQ_CONFIG_CRC_ENA       0x04
-#define I40E_AQ_CONFIG_PACING_MASK   0x78
-	u8     reserved[5];
+#define I40E_AQ_LSE_IS_ENABLED		0x1
+	u8	phy_type;    /* i40e_aq_phy_type   */
+	u8	link_speed;  /* i40e_aq_link_speed */
+	u8	link_info;
+#define I40E_AQ_LINK_UP			0x01
+#define I40E_AQ_LINK_FAULT		0x02
+#define I40E_AQ_LINK_FAULT_TX		0x04
+#define I40E_AQ_LINK_FAULT_RX		0x08
+#define I40E_AQ_LINK_FAULT_REMOTE	0x10
+#define I40E_AQ_MEDIA_AVAILABLE		0x40
+#define I40E_AQ_SIGNAL_DETECT		0x80
+	u8	an_info;
+#define I40E_AQ_AN_COMPLETED		0x01
+#define I40E_AQ_LP_AN_ABILITY		0x02
+#define I40E_AQ_PD_FAULT		0x04
+#define I40E_AQ_FEC_EN			0x08
+#define I40E_AQ_PHY_LOW_POWER		0x10
+#define I40E_AQ_LINK_PAUSE_TX		0x20
+#define I40E_AQ_LINK_PAUSE_RX		0x40
+#define I40E_AQ_QUALIFIED_MODULE	0x80
+	u8	ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM	0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS	0x02
+#define I40E_AQ_LINK_TX_SHIFT		0x02
+#define I40E_AQ_LINK_TX_MASK		(0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE		0x00
+#define I40E_AQ_LINK_TX_DRAINED		0x01
+#define I40E_AQ_LINK_TX_FLUSHED		0x03
+#define I40E_AQ_LINK_FORCED_40G		0x10
+	u8	loopback; /* use defines from i40e_aqc_set_lb_mode */
+	__le16	max_frame_size;
+	u8	config;
+#define I40E_AQ_CONFIG_CRC_ENA		0x04
+#define I40E_AQ_CONFIG_PACING_MASK	0x78
+	u8	reserved[5];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
 
 /* Set event mask command (direct 0x613) */
 struct i40e_aqc_set_phy_int_mask {
-	u8     reserved[8];
-	__le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN       0x0002
-#define I40E_AQ_EVENT_MEDIA_NA          0x0004
-#define I40E_AQ_EVENT_LINK_FAULT        0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM    0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS  0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT     0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED      0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL  0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
-	u8     reserved1[6];
+	u8	reserved[8];
+	__le16	event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN	0x0002
+#define I40E_AQ_EVENT_MEDIA_NA		0x0004
+#define I40E_AQ_EVENT_LINK_FAULT	0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM	0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS	0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT	0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED	0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL	0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED	0x0200
+	u8	reserved1[6];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
@@ -1728,27 +1727,27 @@
  * Get Link Partner AN advt register (direct 0x0616)
  */
 struct i40e_aqc_an_advt_reg {
-	__le32 local_an_reg0;
-	__le16 local_an_reg1;
-	u8     reserved[10];
+	__le32	local_an_reg0;
+	__le16	local_an_reg1;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
 
 /* Set Loopback mode (0x0618) */
 struct i40e_aqc_set_lb_mode {
-	__le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL   0x01
-#define I40E_AQ_LB_PHY_REMOTE  0x02
-#define I40E_AQ_LB_MAC_LOCAL   0x04
-	u8     reserved[14];
+	__le16	lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL	0x01
+#define I40E_AQ_LB_PHY_REMOTE	0x02
+#define I40E_AQ_LB_MAC_LOCAL	0x04
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
 
 /* Set PHY Debug command (0x0622) */
 struct i40e_aqc_set_phy_debug {
-	u8     command_flags;
+	u8	command_flags;
 #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL	0x02
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT	2
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK	(0x03 << \
@@ -1757,15 +1756,15 @@
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD	0x01
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10
-	u8     reserved[15];
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
 
 enum i40e_aq_phy_reg_type {
-	I40E_AQC_PHY_REG_INTERNAL         = 0x1,
-	I40E_AQC_PHY_REG_EXERNAL_BASET    = 0x2,
-	I40E_AQC_PHY_REG_EXERNAL_MODULE   = 0x3
+	I40E_AQC_PHY_REG_INTERNAL	= 0x1,
+	I40E_AQC_PHY_REG_EXERNAL_BASET	= 0x2,
+	I40E_AQC_PHY_REG_EXERNAL_MODULE	= 0x3
 };
 
 /* NVM Read command (indirect 0x0701)
@@ -1773,40 +1772,40 @@
  * NVM Update commands (indirect 0x0703)
  */
 struct i40e_aqc_nvm_update {
-	u8     command_flags;
-#define I40E_AQ_NVM_LAST_CMD    0x01
-#define I40E_AQ_NVM_FLASH_ONLY  0x80
-	u8     module_pointer;
-	__le16 length;
-	__le32 offset;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	command_flags;
+#define I40E_AQ_NVM_LAST_CMD	0x01
+#define I40E_AQ_NVM_FLASH_ONLY	0x80
+	u8	module_pointer;
+	__le16	length;
+	__le32	offset;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
 
 /* NVM Config Read (indirect 0x0704) */
 struct i40e_aqc_nvm_config_read {
-	__le16 cmd_flags;
+	__le16	cmd_flags;
 #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1
 #define ANVM_READ_SINGLE_FEATURE		0
 #define ANVM_READ_MULTIPLE_FEATURES		1
-	__le16 element_count;
-	__le16 element_id;		/* Feature/field ID */
-	u8     reserved[2];
-	__le32 address_high;
-	__le32 address_low;
+	__le16	element_count;
+	__le16	element_id; /* Feature/field ID */
+	u8	reserved[2];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
 
 /* NVM Config Write (indirect 0x0705) */
 struct i40e_aqc_nvm_config_write {
-	__le16 cmd_flags;
-	__le16 element_count;
-	u8     reserved[4];
-	__le32 address_high;
-	__le32 address_low;
+	__le16	cmd_flags;
+	__le16	element_count;
+	u8	reserved[4];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
@@ -1831,10 +1830,10 @@
  * Send to Peer PF command (indirect 0x0803)
  */
 struct i40e_aqc_pf_vf_message {
-	__le32 id;
-	u8     reserved[4];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le32	id;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
@@ -1870,22 +1869,22 @@
  * uses i40e_aq_desc
  */
 struct i40e_aqc_alternate_write_done {
-	__le16 cmd_flags;
+	__le16	cmd_flags;
 #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK	1
 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY	0
 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI	1
 #define I40E_AQ_ALTERNATE_RESET_NEEDED		2
-	u8     reserved[14];
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
 
 /* Set OEM mode (direct 0x0905) */
 struct i40e_aqc_alternate_set_mode {
-	__le32 mode;
+	__le32	mode;
 #define I40E_AQ_ALTERNATE_MODE_NONE	0
 #define I40E_AQ_ALTERNATE_MODE_OEM	1
-	u8     reserved[12];
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
@@ -1896,33 +1895,33 @@
 
 /* Lan Queue Overflow Event (direct, 0x1001) */
 struct i40e_aqc_lan_overflow {
-	__le32 prtdcb_rupto;
-	__le32 otx_ctl;
-	u8     reserved[8];
+	__le32	prtdcb_rupto;
+	__le32	otx_ctl;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
 
 /* Get LLDP MIB (indirect 0x0A00) */
 struct i40e_aqc_lldp_get_mib {
-	u8     type;
-	u8     reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK                      0x3
-#define I40E_AQ_LLDP_MIB_LOCAL                          0x0
-#define I40E_AQ_LLDP_MIB_REMOTE                         0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE               0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK                   0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT                  0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE         0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR               0x1
-#define I40E_AQ_LLDP_TX_SHIFT              0x4
-#define I40E_AQ_LLDP_TX_MASK               (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+	u8	type;
+	u8	reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK		0x3
+#define I40E_AQ_LLDP_MIB_LOCAL			0x0
+#define I40E_AQ_LLDP_MIB_REMOTE			0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE	0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK		0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT		0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE	0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR	0x1
+#define I40E_AQ_LLDP_TX_SHIFT			0x4
+#define I40E_AQ_LLDP_TX_MASK			(0x03 << I40E_AQ_LLDP_TX_SHIFT)
 /* TX pause flags use I40E_AQ_LINK_TX_* above */
-	__le16 local_len;
-	__le16 remote_len;
-	u8     reserved2[2];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	local_len;
+	__le16	remote_len;
+	u8	reserved2[2];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
@@ -1931,12 +1930,12 @@
  * also used for the event (with type in the command field)
  */
 struct i40e_aqc_lldp_update_mib {
-	u8     command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE          0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE         0x1
-	u8     reserved[7];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE	0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE	0x1
+	u8	reserved[7];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
@@ -1945,35 +1944,35 @@
  * Delete LLDP TLV (indirect 0x0A04)
  */
 struct i40e_aqc_lldp_add_tlv {
-	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
-	u8     reserved1[1];
-	__le16 len;
-	u8     reserved2[4];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8	reserved1[1];
+	__le16	len;
+	u8	reserved2[4];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
 
 /* Update LLDP TLV (indirect 0x0A03) */
 struct i40e_aqc_lldp_update_tlv {
-	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
-	u8     reserved;
-	__le16 old_len;
-	__le16 new_offset;
-	__le16 new_len;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8	reserved;
+	__le16	old_len;
+	__le16	new_offset;
+	__le16	new_len;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
 
 /* Stop LLDP (direct 0x0A05) */
 struct i40e_aqc_lldp_stop {
-	u8     command;
-#define I40E_AQ_LLDP_AGENT_STOP                 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN             0x1
-	u8     reserved[15];
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_STOP		0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN	0x1
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
@@ -1981,9 +1980,9 @@
 /* Start LLDP (direct 0x0A06) */
 
 struct i40e_aqc_lldp_start {
-	u8     command;
-#define I40E_AQ_LLDP_AGENT_START                0x1
-	u8     reserved[15];
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_START	0x1
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
@@ -1994,44 +1993,44 @@
 
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
-	__le16 udp_port;
-	u8     reserved0[3];
-	u8     protocol_type;
+	__le16	udp_port;
+	u8	reserved0[3];
+	u8	protocol_type;
 #define I40E_AQC_TUNNEL_TYPE_VXLAN	0x00
 #define I40E_AQC_TUNNEL_TYPE_NGE	0x01
 #define I40E_AQC_TUNNEL_TYPE_TEREDO	0x10
-	u8     reserved1[10];
+	u8	reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
 
 struct i40e_aqc_add_udp_tunnel_completion {
-	__le16 udp_port;
-	u8     filter_entry_index;
-	u8     multiple_pfs;
-#define I40E_AQC_SINGLE_PF	0x0
-#define I40E_AQC_MULTIPLE_PFS	0x1
-	u8     total_filters;
-	u8     reserved[11];
+	__le16	udp_port;
+	u8	filter_entry_index;
+	u8	multiple_pfs;
+#define I40E_AQC_SINGLE_PF		0x0
+#define I40E_AQC_MULTIPLE_PFS		0x1
+	u8	total_filters;
+	u8	reserved[11];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
 
 /* remove UDP Tunnel command (0x0B01) */
 struct i40e_aqc_remove_udp_tunnel {
-	u8     reserved[2];
-	u8     index; /* 0 to 15 */
-	u8     reserved2[13];
+	u8	reserved[2];
+	u8	index; /* 0 to 15 */
+	u8	reserved2[13];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
 
 struct i40e_aqc_del_udp_tunnel_completion {
-	__le16 udp_port;
-	u8     index; /* 0 to 15 */
-	u8     multiple_pfs;
-	u8     total_filters_used;
-	u8     reserved1[11];
+	__le16	udp_port;
+	u8	index; /* 0 to 15 */
+	u8	multiple_pfs;
+	u8	total_filters_used;
+	u8	reserved1[11];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
@@ -2044,11 +2043,11 @@
 	u8	key1_len;  /* 0 to 15 */
 	u8	key2_len;  /* 0 to 15 */
 	u8	flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE	0x01
 /* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS	0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED	0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN	0x03
 	u8	network_key_index;
 #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0
 #define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1
@@ -2061,21 +2060,21 @@
 
 /* OEM mode commands (direct 0xFE0x) */
 struct i40e_aqc_oem_param_change {
-	__le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL   0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL   1
-#define I40E_AQ_OEM_PARAM_MAC           2
-	__le32 param_value1;
-	u8     param_value2[8];
+	__le32	param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL	0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL	1
+#define I40E_AQ_OEM_PARAM_MAC		2
+	__le32	param_value1;
+	u8	param_value2[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
 
 struct i40e_aqc_oem_state_change {
-	__le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN  0x0
-#define I40E_AQ_OEM_STATE_LINK_UP    0x1
-	u8     reserved[12];
+	__le32	state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN	0x0
+#define I40E_AQ_OEM_STATE_LINK_UP	0x1
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
@@ -2087,18 +2086,18 @@
 /* set test more (0xFF01, internal) */
 
 struct i40e_acq_set_test_mode {
-	u8     mode;
-#define I40E_AQ_TEST_PARTIAL    0
-#define I40E_AQ_TEST_FULL       1
-#define I40E_AQ_TEST_NVM        2
-	u8     reserved[3];
-	u8     command;
-#define I40E_AQ_TEST_OPEN        0
-#define I40E_AQ_TEST_CLOSE       1
-#define I40E_AQ_TEST_INC         2
-	u8     reserved2[3];
-	__le32 address_high;
-	__le32 address_low;
+	u8	mode;
+#define I40E_AQ_TEST_PARTIAL	0
+#define I40E_AQ_TEST_FULL	1
+#define I40E_AQ_TEST_NVM	2
+	u8	reserved[3];
+	u8	command;
+#define I40E_AQ_TEST_OPEN	0
+#define I40E_AQ_TEST_CLOSE	1
+#define I40E_AQ_TEST_INC	2
+	u8	reserved2[3];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
@@ -2151,21 +2150,21 @@
 #define I40E_AQ_CLUSTER_ID_ALTRAM	11
 
 struct i40e_aqc_debug_dump_internals {
-	u8     cluster_id;
-	u8     table_id;
-	__le16 data_size;
-	__le32 idx;
-	__le32 address_high;
-	__le32 address_low;
+	u8	cluster_id;
+	u8	table_id;
+	__le16	data_size;
+	__le32	idx;
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
 
 struct i40e_aqc_debug_modify_internals {
-	u8     cluster_id;
-	u8     cluster_specific_params[7];
-	__le32 address_high;
-	__le32 address_low;
+	u8	cluster_id;
+	u8	cluster_specific_params[7];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 30056b2..c49416c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -50,6 +50,7 @@
 		case I40E_DEV_ID_QSFP_A:
 		case I40E_DEV_ID_QSFP_B:
 		case I40E_DEV_ID_QSFP_C:
+		case I40E_DEV_ID_10G_BASE_T:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_VF:
@@ -1420,6 +1421,33 @@
 }
 
 /**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+				     u16 mask,
+				     struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_phy_int_mask *cmd =
+		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_phy_int_mask);
+
+	cmd->event_mask = cpu_to_le16(mask);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
  * @vsi_ctx: pointer to a vsi context struct
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 7067f4b..a03f459 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -895,90 +895,6 @@
 }
 
 /**
- * i40e_dbg_dump_stats - handles dump stats write into command datum
- * @pf: the i40e_pf created in command write
- * @stats: the stats structure to be dumped
- **/
-static void i40e_dbg_dump_stats(struct i40e_pf *pf,
-				struct i40e_hw_port_stats *stats)
-{
-	int i;
-
-	dev_info(&pf->pdev->dev, "  stats:\n");
-	dev_info(&pf->pdev->dev,
-		 "    crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
-		 stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
-	dev_info(&pf->pdev->dev,
-		 "    mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
-		 stats->mac_local_faults, stats->mac_remote_faults,
-		 stats->rx_length_errors);
-	dev_info(&pf->pdev->dev,
-		 "    link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
-		 stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
-	dev_info(&pf->pdev->dev,
-		 "    link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
-		 stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
-	dev_info(&pf->pdev->dev,
-		 "    rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
-		 stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
-	dev_info(&pf->pdev->dev,
-		 "    rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
-		 stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
-	dev_info(&pf->pdev->dev,
-		 "    rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
-		 stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
-	dev_info(&pf->pdev->dev,
-		 "    tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
-		 stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
-	dev_info(&pf->pdev->dev,
-		 "    tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
-		 stats->tx_size_1023, stats->tx_size_big,
-		 stats->mac_short_packet_dropped);
-	for (i = 0; i < 8; i += 4) {
-		dev_info(&pf->pdev->dev,
-			 "    priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
-			 i, stats->priority_xon_rx[i],
-			 i+1, stats->priority_xon_rx[i+1],
-			 i+2, stats->priority_xon_rx[i+2],
-			 i+3, stats->priority_xon_rx[i+3]);
-	}
-	for (i = 0; i < 8; i += 4) {
-		dev_info(&pf->pdev->dev,
-			 "    priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
-			 i, stats->priority_xoff_rx[i],
-			 i+1, stats->priority_xoff_rx[i+1],
-			 i+2, stats->priority_xoff_rx[i+2],
-			 i+3, stats->priority_xoff_rx[i+3]);
-	}
-	for (i = 0; i < 8; i += 4) {
-		dev_info(&pf->pdev->dev,
-			 "    priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
-			 i, stats->priority_xon_tx[i],
-			 i+1, stats->priority_xon_tx[i+1],
-			 i+2, stats->priority_xon_tx[i+2],
-			 i+3, stats->priority_xon_rx[i+3]);
-	}
-	for (i = 0; i < 8; i += 4) {
-		dev_info(&pf->pdev->dev,
-			 "    priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
-			 i, stats->priority_xoff_tx[i],
-			 i+1, stats->priority_xoff_tx[i+1],
-			 i+2, stats->priority_xoff_tx[i+2],
-			 i+3, stats->priority_xoff_tx[i+3]);
-	}
-	for (i = 0; i < 8; i += 4) {
-		dev_info(&pf->pdev->dev,
-			 "    priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
-			 i, stats->priority_xon_2_xoff[i],
-			 i+1, stats->priority_xon_2_xoff[i+1],
-			 i+2, stats->priority_xon_2_xoff[i+2],
-			 i+3, stats->priority_xon_2_xoff[i+3]);
-	}
-
-	i40e_dbg_dump_eth_stats(pf, &stats->eth);
-}
-
-/**
  * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
  * @pf: the i40e_pf created in command write
  * @seid: the seid the user put in
@@ -1342,11 +1258,6 @@
 					 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
 				dev_info(&pf->pdev->dev, "dump desc aq\n");
 			}
-		} else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
-			dev_info(&pf->pdev->dev, "pf stats:\n");
-			i40e_dbg_dump_stats(pf, &pf->stats);
-			dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
-			i40e_dbg_dump_stats(pf, &pf->stats_offsets);
 		} else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
 			dev_info(&pf->pdev->dev,
 				 "core reset count: %d\n", pf->corer_count);
@@ -1464,8 +1375,8 @@
 		} else {
 			dev_info(&pf->pdev->dev,
 				 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
-			dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
-			dev_info(&pf->pdev->dev, "dump stats\n");
+			dev_info(&pf->pdev->dev, "dump switch\n");
+			dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
 			dev_info(&pf->pdev->dev, "dump reset stats\n");
 			dev_info(&pf->pdev->dev, "dump port\n");
 			dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1dda467..b6e745f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -264,6 +264,14 @@
 			ecmd->supported = SUPPORTED_10000baseKR_Full;
 			ecmd->advertising = ADVERTISED_10000baseKR_Full;
 			break;
+		case I40E_DEV_ID_10G_BASE_T:
+			ecmd->supported = SUPPORTED_10000baseT_Full |
+					  SUPPORTED_1000baseT_Full |
+					  SUPPORTED_100baseT_Full;
+			ecmd->advertising = ADVERTISED_10000baseT_Full |
+					    ADVERTISED_1000baseT_Full |
+					    ADVERTISED_100baseT_Full;
+			break;
 		default:
 			/* all the rest are 10G/1G */
 			ecmd->supported = SUPPORTED_10000baseT_Full |
@@ -322,9 +330,13 @@
 	case I40E_PHY_TYPE_10GBASE_CR1:
 	case I40E_PHY_TYPE_10GBASE_T:
 		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_10000baseT_Full;
+				  SUPPORTED_10000baseT_Full |
+				  SUPPORTED_1000baseT_Full |
+				  SUPPORTED_100baseT_Full;
 		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_10000baseT_Full;
+				    ADVERTISED_10000baseT_Full |
+				    ADVERTISED_1000baseT_Full |
+				    ADVERTISED_100baseT_Full;
 		break;
 	case I40E_PHY_TYPE_XAUI:
 	case I40E_PHY_TYPE_XFI:
@@ -335,14 +347,22 @@
 	case I40E_PHY_TYPE_1000BASE_KX:
 	case I40E_PHY_TYPE_1000BASE_T:
 		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_1000baseT_Full;
+				  SUPPORTED_10000baseT_Full |
+				  SUPPORTED_1000baseT_Full |
+				  SUPPORTED_100baseT_Full;
 		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_1000baseT_Full;
+				    ADVERTISED_10000baseT_Full |
+				    ADVERTISED_1000baseT_Full |
+				    ADVERTISED_100baseT_Full;
 		break;
 	case I40E_PHY_TYPE_100BASE_TX:
 		ecmd->supported = SUPPORTED_Autoneg |
+				  SUPPORTED_10000baseT_Full |
+				  SUPPORTED_1000baseT_Full |
 				  SUPPORTED_100baseT_Full;
 		ecmd->advertising = ADVERTISED_Autoneg |
+				    ADVERTISED_10000baseT_Full |
+				    ADVERTISED_1000baseT_Full |
 				    ADVERTISED_100baseT_Full;
 		break;
 	case I40E_PHY_TYPE_SGMII:
@@ -426,6 +446,9 @@
 		case I40E_LINK_SPEED_1GB:
 			ethtool_cmd_speed_set(ecmd, SPEED_1000);
 			break;
+		case I40E_LINK_SPEED_100MB:
+			ethtool_cmd_speed_set(ecmd, SPEED_100);
+			break;
 		default:
 			break;
 		}
@@ -528,7 +551,7 @@
 		}
 		/* If autoneg is currently enabled */
 		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
-			config.abilities = abilities.abilities |
+			config.abilities = abilities.abilities &
 					   ~I40E_AQ_PHY_ENABLE_AN;
 			change = true;
 		}
@@ -1551,7 +1574,6 @@
 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
 	} else if (ec->rx_coalesce_usecs == 0) {
 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-		i40e_irq_dynamic_disable(vsi, vector);
 		if (ec->use_adaptive_rx_coalesce)
 			netif_info(pf, drv, netdev,
 				   "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n");
@@ -1566,7 +1588,6 @@
 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
 	} else if (ec->tx_coalesce_usecs == 0) {
 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
-		i40e_irq_dynamic_disable(vsi, vector);
 		if (ec->use_adaptive_tx_coalesce)
 			netif_info(pf, drv, netdev,
 				   "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c3a7f4a..1a98e23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 21
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -74,6 +74,7 @@
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
 	/* required last entry */
 	{0, }
 };
@@ -812,7 +813,10 @@
 	struct i40e_eth_stats *oes;
 	struct i40e_eth_stats *es;     /* device's eth stats */
 	u32 tx_restart, tx_busy;
+	struct i40e_ring *p;
 	u32 rx_page, rx_buf;
+	u64 bytes, packets;
+	unsigned int start;
 	u64 rx_p, rx_b;
 	u64 tx_p, tx_b;
 	u16 q;
@@ -836,10 +840,6 @@
 	rx_buf = 0;
 	rcu_read_lock();
 	for (q = 0; q < vsi->num_queue_pairs; q++) {
-		struct i40e_ring *p;
-		u64 bytes, packets;
-		unsigned int start;
-
 		/* locate Tx ring */
 		p = ACCESS_ONCE(vsi->tx_rings[q]);
 
@@ -2462,10 +2462,14 @@
 	}
 
 	/* Now associate this queue with this PCI function */
-	if (vsi->type == I40E_VSI_VMDQ2)
+	if (vsi->type == I40E_VSI_VMDQ2) {
 		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
-	else
+		qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+			   I40E_QTX_CTL_VFVM_INDX_MASK;
+	} else {
 		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+	}
+
 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
 		    I40E_QTX_CTL_PF_INDX_MASK);
 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -3440,7 +3444,7 @@
 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
 			break;
 
-		udelay(10);
+		usleep_range(10, 20);
 	}
 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
 		return -ETIMEDOUT;
@@ -3466,7 +3470,7 @@
 		/* warn the TX unit of coming changes */
 		i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
 		if (!enable)
-			udelay(10);
+			usleep_range(10, 20);
 
 		for (j = 0; j < 50; j++) {
 			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
@@ -3526,7 +3530,7 @@
 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
 			break;
 
-		udelay(10);
+		usleep_range(10, 20);
 	}
 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
 		return -ETIMEDOUT;
@@ -4449,6 +4453,9 @@
 	case I40E_LINK_SPEED_1GB:
 		strlcpy(speed, "1000 Mbps", SPEED_SIZE);
 		break;
+	case I40E_LINK_SPEED_100MB:
+		strncpy(speed, "100 Mbps", SPEED_SIZE);
+		break;
 	default:
 		break;
 	}
@@ -4479,12 +4486,8 @@
 static int i40e_up_complete(struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vsi->back;
-	u8 set_fc_aq_fail = 0;
 	int err;
 
-	/* force flow control off */
-	i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
-
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 		i40e_vsi_configure_msix(vsi);
 	else
@@ -5212,6 +5215,9 @@
 	int flush_wait_retry = 50;
 	int reg;
 
+	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+		return;
+
 	if (time_after(jiffies, pf->fd_flush_timestamp +
 				(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
 		set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
@@ -5273,6 +5279,9 @@
 	if (test_bit(__I40E_DOWN, &pf->state))
 		return;
 
+	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+		return;
+
 	if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
 	    (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
 	    (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
@@ -5353,14 +5362,21 @@
 static void i40e_link_event(struct i40e_pf *pf)
 {
 	bool new_link, old_link;
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 
-	new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
+	/* set this to force the get_link_status call to refresh state */
+	pf->hw.phy.get_link_info = true;
+
 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+	new_link = i40e_get_link_status(&pf->hw);
 
-	if (new_link == old_link)
+	if (new_link == old_link &&
+	    (test_bit(__I40E_DOWN, &vsi->state) ||
+	     new_link == netif_carrier_ok(vsi->netdev)))
 		return;
-	if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
-		i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
+
+	if (!test_bit(__I40E_DOWN, &vsi->state))
+		i40e_print_link_message(vsi, new_link);
 
 	/* Notify the base of the switch tree connected to
 	 * the link.  Floating VEBs are not notified.
@@ -5368,7 +5384,7 @@
 	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
 	else
-		i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+		i40e_vsi_link_event(vsi, new_link);
 
 	if (pf->vf)
 		i40e_vc_notify_link_state(pf);
@@ -5525,33 +5541,20 @@
 	memcpy(&pf->hw.phy.link_info_old, hw_link_info,
 	       sizeof(pf->hw.phy.link_info_old));
 
+	/* Do a new status request to re-enable LSE reporting
+	 * and load new status information into the hw struct
+	 * This completely ignores any state information
+	 * in the ARQ event info, instead choosing to always
+	 * issue the AQ update link status command.
+	 */
+	i40e_link_event(pf);
+
 	/* check for unqualified module, if link is down */
 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
 	    (!(status->link_info & I40E_AQ_LINK_UP)))
 		dev_err(&pf->pdev->dev,
 			"The driver failed to link because an unqualified module was detected.\n");
-
-	/* update link status */
-	hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
-	hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
-	hw_link_info->link_info = status->link_info;
-	hw_link_info->an_info = status->an_info;
-	hw_link_info->ext_info = status->ext_info;
-	hw_link_info->lse_enable =
-		le16_to_cpu(status->command_flags) &
-			    I40E_AQ_LSE_ENABLE;
-
-	/* process the event */
-	i40e_link_event(pf);
-
-	/* Do a new status request to re-enable LSE reporting
-	 * and load new status information into the hw struct,
-	 * then see if the status changed while processing the
-	 * initial event.
-	 */
-	i40e_update_link_info(&pf->hw, true);
-	i40e_link_event(pf);
 }
 
 /**
@@ -5967,6 +5970,7 @@
 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
 {
 	struct i40e_hw *hw = &pf->hw;
+	u8 set_fc_aq_fail = 0;
 	i40e_status ret;
 	u32 v;
 
@@ -6038,6 +6042,20 @@
 	if (ret)
 		goto end_core_reset;
 
+	/* driver is only interested in link up/down and module qualification
+	 * reports from firmware
+	 */
+	ret = i40e_aq_set_phy_int_mask(&pf->hw,
+				       I40E_AQ_EVENT_LINK_UPDOWN |
+				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+	if (ret)
+		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+
+	/* make sure our flow control settings are restored */
+	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+	if (ret)
+		dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+
 	/* Rebuild the VSIs and VEBs that existed before reset.
 	 * They are still in our local switch element arrays, so only
 	 * need to rebuild the switch model in the HW.
@@ -6092,6 +6110,13 @@
 		}
 	}
 
+	msleep(75);
+	ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+			 pf->hw.aq.asq_last_status);
+	}
+
 	/* reinit the misc interrupt */
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 		ret = i40e_setup_misc_vector(pf);
@@ -6149,12 +6174,13 @@
 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
-		u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
 				I40E_GL_MDET_TX_EVENT_SHIFT;
-		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
-				I40E_GL_MDET_TX_QUEUE_SHIFT;
+		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+				I40E_GL_MDET_TX_QUEUE_SHIFT) -
+				pf->hw.func_caps.base_queue;
 		if (netif_msg_tx_err(pf))
 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
 				 event, queue, pf_num, vf_num);
@@ -6167,8 +6193,9 @@
 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
 				I40E_GL_MDET_RX_EVENT_SHIFT;
-		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
-				I40E_GL_MDET_RX_QUEUE_SHIFT;
+		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+				I40E_GL_MDET_RX_QUEUE_SHIFT) -
+				pf->hw.func_caps.base_queue;
 		if (netif_msg_rx_err(pf))
 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
 				 event, queue, func);
@@ -6305,6 +6332,8 @@
 #endif
 	i40e_clean_adminq_subtask(pf);
 
+	i40e_link_event(pf);
+
 	i40e_service_event_complete(pf);
 
 	/* If the tasks have taken longer than one timer cycle or there
@@ -6676,6 +6705,7 @@
 {
 	i40e_status err = 0;
 	struct i40e_hw *hw = &pf->hw;
+	int other_vecs = 0;
 	int v_budget, i;
 	int vec;
 
@@ -6701,10 +6731,10 @@
 	 */
 	pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
 	pf->num_vmdq_msix = pf->num_vmdq_qps;
-	v_budget = 1 + pf->num_lan_msix;
-	v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+	other_vecs = 1;
+	other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
-		v_budget++;
+		other_vecs++;
 
 #ifdef I40E_FCOE
 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -6714,7 +6744,9 @@
 
 #endif
 	/* Scale down if necessary, and the rings will share vectors */
-	v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+	pf->num_lan_msix = min_t(int, pf->num_lan_msix,
+			(hw->func_caps.num_msix_vectors - other_vecs));
+	v_budget = pf->num_lan_msix + other_vecs;
 
 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
 				   GFP_KERNEL);
@@ -7931,8 +7963,8 @@
 						 vsi->num_q_vectors, vsi->idx);
 	if (vsi->base_vector < 0) {
 		dev_info(&pf->pdev->dev,
-			 "failed to get queue tracking for VSI %d, err=%d\n",
-			 vsi->seid, vsi->base_vector);
+			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
+			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
 		i40e_vsi_free_q_vectors(vsi);
 		ret = -ENOENT;
 		goto vector_setup_out;
@@ -7968,8 +8000,9 @@
 
 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
 	if (ret < 0) {
-		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
-			 vsi->seid, ret);
+		dev_info(&pf->pdev->dev,
+			 "failed to get tracking for %d queues for VSI %d err=%d\n",
+			 vsi->alloc_queue_pairs, vsi->seid, ret);
 		goto err_vsi;
 	}
 	vsi->base_queue = ret;
@@ -8098,8 +8131,9 @@
 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
 				vsi->idx);
 	if (ret < 0) {
-		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
-			 vsi->seid, ret);
+		dev_info(&pf->pdev->dev,
+			 "failed to get tracking for %d queues for VSI %d err=%d\n",
+			 vsi->alloc_queue_pairs, vsi->seid, ret);
 		goto err_vsi;
 	}
 	vsi->base_queue = ret;
@@ -8719,6 +8753,14 @@
 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
 				  I40E_AQ_AN_COMPLETED) ? true : false);
 
+	/* fill in link information and enable LSE reporting */
+	i40e_update_link_info(&pf->hw, true);
+	i40e_link_event(pf);
+
+	/* Initialize user-specific link properties */
+	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+				  I40E_AQ_AN_COMPLETED) ? true : false);
+
 	i40e_ptp_init(pf);
 
 	return ret;
@@ -8987,6 +9029,11 @@
 	hw->bus.func = PCI_FUNC(pdev->devfn);
 	pf->instance = pfs_found;
 
+	if (debug != -1) {
+		pf->msg_enable = pf->hw.debug_mask;
+		pf->msg_enable = debug;
+	}
+
 	/* do a special CORER for clearing PXE mode once at init */
 	if (hw->revision_id == 0 &&
 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
@@ -9158,6 +9205,22 @@
 		}
 	}
 
+	/* driver is only interested in link up/down and module qualification
+	 * reports from firmware
+	 */
+	err = i40e_aq_set_phy_int_mask(&pf->hw,
+				       I40E_AQ_EVENT_LINK_UPDOWN |
+				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+	if (err)
+		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+
+	msleep(75);
+	err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+	if (err) {
+		dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+			 pf->hw.aq.asq_last_status);
+	}
+
 	/* The main driver is (mostly) up and happy. We need to set this state
 	 * before setting up the misc vector or we get a race and the vector
 	 * ends up disabled forever.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 0988b5c..246c278 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -84,6 +84,8 @@
 				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
 				  bool atomic_reset);
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+				     struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index ce04d90..3a237c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -43,6 +43,7 @@
 #define I40E_DEV_ID_QSFP_A		0x1583
 #define I40E_DEV_ID_QSFP_B		0x1584
 #define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_10G_BASE_T		0x1586
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4eeed26..fff3c27 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -674,7 +674,7 @@
 		 * that the requested op was completed
 		 * successfully
 		 */
-		udelay(10);
+		usleep_range(10, 20);
 		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
 		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
 			rsd = true;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e656ea7..ff1b163 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -33,8 +33,8 @@
  * This file needs to comply with the Linux Kernel coding style.
  */
 
-#define I40E_FW_API_VERSION_MAJOR  0x0001
-#define I40E_FW_API_VERSION_MINOR  0x0002
+#define I40E_FW_API_VERSION_MAJOR	0x0001
+#define I40E_FW_API_VERSION_MINOR	0x0002
 #define I40E_FW_API_VERSION_A0_MINOR  0x0000
 
 struct i40e_aq_desc {
@@ -67,216 +67,216 @@
  */
 
 /* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT  0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT  9
-#define I40E_AQ_FLAG_RD_SHIFT  10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT  13
-#define I40E_AQ_FLAG_EI_SHIFT  14
-#define I40E_AQ_FLAG_FE_SHIFT  15
+#define I40E_AQ_FLAG_DD_SHIFT	0
+#define I40E_AQ_FLAG_CMP_SHIFT	1
+#define I40E_AQ_FLAG_ERR_SHIFT	2
+#define I40E_AQ_FLAG_VFE_SHIFT	3
+#define I40E_AQ_FLAG_LB_SHIFT	9
+#define I40E_AQ_FLAG_RD_SHIFT	10
+#define I40E_AQ_FLAG_VFC_SHIFT	11
+#define I40E_AQ_FLAG_BUF_SHIFT	12
+#define I40E_AQ_FLAG_SI_SHIFT	13
+#define I40E_AQ_FLAG_EI_SHIFT	14
+#define I40E_AQ_FLAG_FE_SHIFT	15
 
-#define I40E_AQ_FLAG_DD  (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB  (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD  (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI  (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI  (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE  (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD		(1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	(1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	(1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	(1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		(1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		(1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	(1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	(1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		(1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		(1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		(1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
-	I40E_AQ_RC_OK       = 0,    /* success */
-	I40E_AQ_RC_EPERM    = 1,    /* Operation not permitted */
-	I40E_AQ_RC_ENOENT   = 2,    /* No such element */
-	I40E_AQ_RC_ESRCH    = 3,    /* Bad opcode */
-	I40E_AQ_RC_EINTR    = 4,    /* operation interrupted */
-	I40E_AQ_RC_EIO      = 5,    /* I/O error */
-	I40E_AQ_RC_ENXIO    = 6,    /* No such resource */
-	I40E_AQ_RC_E2BIG    = 7,    /* Arg too long */
-	I40E_AQ_RC_EAGAIN   = 8,    /* Try again */
-	I40E_AQ_RC_ENOMEM   = 9,    /* Out of memory */
-	I40E_AQ_RC_EACCES   = 10,   /* Permission denied */
-	I40E_AQ_RC_EFAULT   = 11,   /* Bad address */
-	I40E_AQ_RC_EBUSY    = 12,   /* Device or resource busy */
-	I40E_AQ_RC_EEXIST   = 13,   /* object already exists */
-	I40E_AQ_RC_EINVAL   = 14,   /* Invalid argument */
-	I40E_AQ_RC_ENOTTY   = 15,   /* Not a typewriter */
-	I40E_AQ_RC_ENOSPC   = 16,   /* No space left or alloc failure */
-	I40E_AQ_RC_ENOSYS   = 17,   /* Function not implemented */
-	I40E_AQ_RC_ERANGE   = 18,   /* Parameter out of range */
-	I40E_AQ_RC_EFLUSHED = 19,   /* Cmd flushed because of prev cmd error */
-	I40E_AQ_RC_BAD_ADDR = 20,   /* Descriptor contains a bad pointer */
-	I40E_AQ_RC_EMODE    = 21,   /* Op not allowed in current dev mode */
-	I40E_AQ_RC_EFBIG    = 22,   /* File too large */
+	I40E_AQ_RC_OK		= 0,  /* success */
+	I40E_AQ_RC_EPERM	= 1,  /* Operation not permitted */
+	I40E_AQ_RC_ENOENT	= 2,  /* No such element */
+	I40E_AQ_RC_ESRCH	= 3,  /* Bad opcode */
+	I40E_AQ_RC_EINTR	= 4,  /* operation interrupted */
+	I40E_AQ_RC_EIO		= 5,  /* I/O error */
+	I40E_AQ_RC_ENXIO	= 6,  /* No such resource */
+	I40E_AQ_RC_E2BIG	= 7,  /* Arg too long */
+	I40E_AQ_RC_EAGAIN	= 8,  /* Try again */
+	I40E_AQ_RC_ENOMEM	= 9,  /* Out of memory */
+	I40E_AQ_RC_EACCES	= 10, /* Permission denied */
+	I40E_AQ_RC_EFAULT	= 11, /* Bad address */
+	I40E_AQ_RC_EBUSY	= 12, /* Device or resource busy */
+	I40E_AQ_RC_EEXIST	= 13, /* object already exists */
+	I40E_AQ_RC_EINVAL	= 14, /* Invalid argument */
+	I40E_AQ_RC_ENOTTY	= 15, /* Not a typewriter */
+	I40E_AQ_RC_ENOSPC	= 16, /* No space left or alloc failure */
+	I40E_AQ_RC_ENOSYS	= 17, /* Function not implemented */
+	I40E_AQ_RC_ERANGE	= 18, /* Parameter out of range */
+	I40E_AQ_RC_EFLUSHED	= 19, /* Cmd flushed due to prev cmd error */
+	I40E_AQ_RC_BAD_ADDR	= 20, /* Descriptor contains a bad pointer */
+	I40E_AQ_RC_EMODE	= 21, /* Op not allowed in current dev mode */
+	I40E_AQ_RC_EFBIG	= 22, /* File too large */
 };
 
 /* Admin Queue command opcodes */
 enum i40e_admin_queue_opc {
 	/* aq commands */
-	i40e_aqc_opc_get_version      = 0x0001,
-	i40e_aqc_opc_driver_version   = 0x0002,
-	i40e_aqc_opc_queue_shutdown   = 0x0003,
-	i40e_aqc_opc_set_pf_context   = 0x0004,
+	i40e_aqc_opc_get_version	= 0x0001,
+	i40e_aqc_opc_driver_version	= 0x0002,
+	i40e_aqc_opc_queue_shutdown	= 0x0003,
+	i40e_aqc_opc_set_pf_context	= 0x0004,
 
 	/* resource ownership */
-	i40e_aqc_opc_request_resource = 0x0008,
-	i40e_aqc_opc_release_resource = 0x0009,
+	i40e_aqc_opc_request_resource	= 0x0008,
+	i40e_aqc_opc_release_resource	= 0x0009,
 
-	i40e_aqc_opc_list_func_capabilities = 0x000A,
-	i40e_aqc_opc_list_dev_capabilities  = 0x000B,
+	i40e_aqc_opc_list_func_capabilities	= 0x000A,
+	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
 
-	i40e_aqc_opc_set_cppm_configuration = 0x0103,
-	i40e_aqc_opc_set_arp_proxy_entry    = 0x0104,
-	i40e_aqc_opc_set_ns_proxy_entry     = 0x0105,
+	i40e_aqc_opc_set_cppm_configuration	= 0x0103,
+	i40e_aqc_opc_set_arp_proxy_entry	= 0x0104,
+	i40e_aqc_opc_set_ns_proxy_entry		= 0x0105,
 
 	/* LAA */
-	i40e_aqc_opc_mng_laa                = 0x0106,   /* AQ obsolete */
-	i40e_aqc_opc_mac_address_read       = 0x0107,
-	i40e_aqc_opc_mac_address_write      = 0x0108,
+	i40e_aqc_opc_mng_laa		= 0x0106,   /* AQ obsolete */
+	i40e_aqc_opc_mac_address_read	= 0x0107,
+	i40e_aqc_opc_mac_address_write	= 0x0108,
 
 	/* PXE */
-	i40e_aqc_opc_clear_pxe_mode         = 0x0110,
+	i40e_aqc_opc_clear_pxe_mode	= 0x0110,
 
 	/* internal switch commands */
-	i40e_aqc_opc_get_switch_config         = 0x0200,
-	i40e_aqc_opc_add_statistics            = 0x0201,
-	i40e_aqc_opc_remove_statistics         = 0x0202,
-	i40e_aqc_opc_set_port_parameters       = 0x0203,
-	i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+	i40e_aqc_opc_get_switch_config		= 0x0200,
+	i40e_aqc_opc_add_statistics		= 0x0201,
+	i40e_aqc_opc_remove_statistics		= 0x0202,
+	i40e_aqc_opc_set_port_parameters	= 0x0203,
+	i40e_aqc_opc_get_switch_resource_alloc	= 0x0204,
 
-	i40e_aqc_opc_add_vsi                = 0x0210,
-	i40e_aqc_opc_update_vsi_parameters  = 0x0211,
-	i40e_aqc_opc_get_vsi_parameters     = 0x0212,
+	i40e_aqc_opc_add_vsi			= 0x0210,
+	i40e_aqc_opc_update_vsi_parameters	= 0x0211,
+	i40e_aqc_opc_get_vsi_parameters		= 0x0212,
 
-	i40e_aqc_opc_add_pv                = 0x0220,
-	i40e_aqc_opc_update_pv_parameters  = 0x0221,
-	i40e_aqc_opc_get_pv_parameters     = 0x0222,
+	i40e_aqc_opc_add_pv			= 0x0220,
+	i40e_aqc_opc_update_pv_parameters	= 0x0221,
+	i40e_aqc_opc_get_pv_parameters		= 0x0222,
 
-	i40e_aqc_opc_add_veb               = 0x0230,
-	i40e_aqc_opc_update_veb_parameters = 0x0231,
-	i40e_aqc_opc_get_veb_parameters    = 0x0232,
+	i40e_aqc_opc_add_veb			= 0x0230,
+	i40e_aqc_opc_update_veb_parameters	= 0x0231,
+	i40e_aqc_opc_get_veb_parameters		= 0x0232,
 
-	i40e_aqc_opc_delete_element  = 0x0243,
+	i40e_aqc_opc_delete_element		= 0x0243,
 
-	i40e_aqc_opc_add_macvlan                  = 0x0250,
-	i40e_aqc_opc_remove_macvlan               = 0x0251,
-	i40e_aqc_opc_add_vlan                     = 0x0252,
-	i40e_aqc_opc_remove_vlan                  = 0x0253,
-	i40e_aqc_opc_set_vsi_promiscuous_modes    = 0x0254,
-	i40e_aqc_opc_add_tag                      = 0x0255,
-	i40e_aqc_opc_remove_tag                   = 0x0256,
-	i40e_aqc_opc_add_multicast_etag           = 0x0257,
-	i40e_aqc_opc_remove_multicast_etag        = 0x0258,
-	i40e_aqc_opc_update_tag                   = 0x0259,
-	i40e_aqc_opc_add_control_packet_filter    = 0x025A,
-	i40e_aqc_opc_remove_control_packet_filter = 0x025B,
-	i40e_aqc_opc_add_cloud_filters            = 0x025C,
-	i40e_aqc_opc_remove_cloud_filters         = 0x025D,
+	i40e_aqc_opc_add_macvlan		= 0x0250,
+	i40e_aqc_opc_remove_macvlan		= 0x0251,
+	i40e_aqc_opc_add_vlan			= 0x0252,
+	i40e_aqc_opc_remove_vlan		= 0x0253,
+	i40e_aqc_opc_set_vsi_promiscuous_modes	= 0x0254,
+	i40e_aqc_opc_add_tag			= 0x0255,
+	i40e_aqc_opc_remove_tag			= 0x0256,
+	i40e_aqc_opc_add_multicast_etag		= 0x0257,
+	i40e_aqc_opc_remove_multicast_etag	= 0x0258,
+	i40e_aqc_opc_update_tag			= 0x0259,
+	i40e_aqc_opc_add_control_packet_filter	= 0x025A,
+	i40e_aqc_opc_remove_control_packet_filter	= 0x025B,
+	i40e_aqc_opc_add_cloud_filters		= 0x025C,
+	i40e_aqc_opc_remove_cloud_filters	= 0x025D,
 
-	i40e_aqc_opc_add_mirror_rule    = 0x0260,
-	i40e_aqc_opc_delete_mirror_rule = 0x0261,
+	i40e_aqc_opc_add_mirror_rule	= 0x0260,
+	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
 	/* DCB commands */
-	i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
-	i40e_aqc_opc_dcb_updated    = 0x0302,
+	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
+	i40e_aqc_opc_dcb_updated	= 0x0302,
 
 	/* TX scheduler */
-	i40e_aqc_opc_configure_vsi_bw_limit            = 0x0400,
-	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit    = 0x0406,
-	i40e_aqc_opc_configure_vsi_tc_bw               = 0x0407,
-	i40e_aqc_opc_query_vsi_bw_config               = 0x0408,
-	i40e_aqc_opc_query_vsi_ets_sla_config          = 0x040A,
-	i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
+	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit	= 0x0406,
+	i40e_aqc_opc_configure_vsi_tc_bw		= 0x0407,
+	i40e_aqc_opc_query_vsi_bw_config		= 0x0408,
+	i40e_aqc_opc_query_vsi_ets_sla_config		= 0x040A,
+	i40e_aqc_opc_configure_switching_comp_bw_limit	= 0x0410,
 
-	i40e_aqc_opc_enable_switching_comp_ets             = 0x0413,
-	i40e_aqc_opc_modify_switching_comp_ets             = 0x0414,
-	i40e_aqc_opc_disable_switching_comp_ets            = 0x0415,
-	i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
-	i40e_aqc_opc_configure_switching_comp_bw_config    = 0x0417,
-	i40e_aqc_opc_query_switching_comp_ets_config       = 0x0418,
-	i40e_aqc_opc_query_port_ets_config                 = 0x0419,
-	i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,
-	i40e_aqc_opc_suspend_port_tx                       = 0x041B,
-	i40e_aqc_opc_resume_port_tx                        = 0x041C,
-	i40e_aqc_opc_configure_partition_bw                = 0x041D,
+	i40e_aqc_opc_enable_switching_comp_ets			= 0x0413,
+	i40e_aqc_opc_modify_switching_comp_ets			= 0x0414,
+	i40e_aqc_opc_disable_switching_comp_ets			= 0x0415,
+	i40e_aqc_opc_configure_switching_comp_ets_bw_limit	= 0x0416,
+	i40e_aqc_opc_configure_switching_comp_bw_config		= 0x0417,
+	i40e_aqc_opc_query_switching_comp_ets_config		= 0x0418,
+	i40e_aqc_opc_query_port_ets_config			= 0x0419,
+	i40e_aqc_opc_query_switching_comp_bw_config		= 0x041A,
+	i40e_aqc_opc_suspend_port_tx				= 0x041B,
+	i40e_aqc_opc_resume_port_tx				= 0x041C,
+	i40e_aqc_opc_configure_partition_bw			= 0x041D,
 
 	/* hmc */
-	i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
-	i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
+	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
+	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
 
 	/* phy commands*/
-	i40e_aqc_opc_get_phy_abilities   = 0x0600,
-	i40e_aqc_opc_set_phy_config      = 0x0601,
-	i40e_aqc_opc_set_mac_config      = 0x0603,
-	i40e_aqc_opc_set_link_restart_an = 0x0605,
-	i40e_aqc_opc_get_link_status     = 0x0607,
-	i40e_aqc_opc_set_phy_int_mask    = 0x0613,
-	i40e_aqc_opc_get_local_advt_reg  = 0x0614,
-	i40e_aqc_opc_set_local_advt_reg  = 0x0615,
-	i40e_aqc_opc_get_partner_advt    = 0x0616,
-	i40e_aqc_opc_set_lb_modes        = 0x0618,
-	i40e_aqc_opc_get_phy_wol_caps    = 0x0621,
-	i40e_aqc_opc_set_phy_debug	 = 0x0622,
-	i40e_aqc_opc_upload_ext_phy_fm   = 0x0625,
+	i40e_aqc_opc_get_phy_abilities		= 0x0600,
+	i40e_aqc_opc_set_phy_config		= 0x0601,
+	i40e_aqc_opc_set_mac_config		= 0x0603,
+	i40e_aqc_opc_set_link_restart_an	= 0x0605,
+	i40e_aqc_opc_get_link_status		= 0x0607,
+	i40e_aqc_opc_set_phy_int_mask		= 0x0613,
+	i40e_aqc_opc_get_local_advt_reg		= 0x0614,
+	i40e_aqc_opc_set_local_advt_reg		= 0x0615,
+	i40e_aqc_opc_get_partner_advt		= 0x0616,
+	i40e_aqc_opc_set_lb_modes		= 0x0618,
+	i40e_aqc_opc_get_phy_wol_caps		= 0x0621,
+	i40e_aqc_opc_set_phy_debug		= 0x0622,
+	i40e_aqc_opc_upload_ext_phy_fm		= 0x0625,
 
 	/* NVM commands */
-	i40e_aqc_opc_nvm_read         = 0x0701,
-	i40e_aqc_opc_nvm_erase        = 0x0702,
-	i40e_aqc_opc_nvm_update       = 0x0703,
-	i40e_aqc_opc_nvm_config_read  = 0x0704,
-	i40e_aqc_opc_nvm_config_write = 0x0705,
+	i40e_aqc_opc_nvm_read			= 0x0701,
+	i40e_aqc_opc_nvm_erase			= 0x0702,
+	i40e_aqc_opc_nvm_update			= 0x0703,
+	i40e_aqc_opc_nvm_config_read		= 0x0704,
+	i40e_aqc_opc_nvm_config_write		= 0x0705,
 
 	/* virtualization commands */
-	i40e_aqc_opc_send_msg_to_pf   = 0x0801,
-	i40e_aqc_opc_send_msg_to_vf   = 0x0802,
-	i40e_aqc_opc_send_msg_to_peer = 0x0803,
+	i40e_aqc_opc_send_msg_to_pf		= 0x0801,
+	i40e_aqc_opc_send_msg_to_vf		= 0x0802,
+	i40e_aqc_opc_send_msg_to_peer		= 0x0803,
 
 	/* alternate structure */
-	i40e_aqc_opc_alternate_write          = 0x0900,
-	i40e_aqc_opc_alternate_write_indirect = 0x0901,
-	i40e_aqc_opc_alternate_read           = 0x0902,
-	i40e_aqc_opc_alternate_read_indirect  = 0x0903,
-	i40e_aqc_opc_alternate_write_done     = 0x0904,
-	i40e_aqc_opc_alternate_set_mode       = 0x0905,
-	i40e_aqc_opc_alternate_clear_port     = 0x0906,
+	i40e_aqc_opc_alternate_write		= 0x0900,
+	i40e_aqc_opc_alternate_write_indirect	= 0x0901,
+	i40e_aqc_opc_alternate_read		= 0x0902,
+	i40e_aqc_opc_alternate_read_indirect	= 0x0903,
+	i40e_aqc_opc_alternate_write_done	= 0x0904,
+	i40e_aqc_opc_alternate_set_mode		= 0x0905,
+	i40e_aqc_opc_alternate_clear_port	= 0x0906,
 
 	/* LLDP commands */
-	i40e_aqc_opc_lldp_get_mib    = 0x0A00,
-	i40e_aqc_opc_lldp_update_mib = 0x0A01,
-	i40e_aqc_opc_lldp_add_tlv    = 0x0A02,
-	i40e_aqc_opc_lldp_update_tlv = 0x0A03,
-	i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
-	i40e_aqc_opc_lldp_stop       = 0x0A05,
-	i40e_aqc_opc_lldp_start      = 0x0A06,
+	i40e_aqc_opc_lldp_get_mib	= 0x0A00,
+	i40e_aqc_opc_lldp_update_mib	= 0x0A01,
+	i40e_aqc_opc_lldp_add_tlv	= 0x0A02,
+	i40e_aqc_opc_lldp_update_tlv	= 0x0A03,
+	i40e_aqc_opc_lldp_delete_tlv	= 0x0A04,
+	i40e_aqc_opc_lldp_stop		= 0x0A05,
+	i40e_aqc_opc_lldp_start		= 0x0A06,
 
 	/* Tunnel commands */
-	i40e_aqc_opc_add_udp_tunnel       = 0x0B00,
-	i40e_aqc_opc_del_udp_tunnel       = 0x0B01,
-	i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
+	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
+	i40e_aqc_opc_tunnel_key_structure	= 0x0B10,
 
 	/* Async Events */
-	i40e_aqc_opc_event_lan_overflow = 0x1001,
+	i40e_aqc_opc_event_lan_overflow		= 0x1001,
 
 	/* OEM commands */
-	i40e_aqc_opc_oem_parameter_change     = 0xFE00,
-	i40e_aqc_opc_oem_device_status_change = 0xFE01,
+	i40e_aqc_opc_oem_parameter_change	= 0xFE00,
+	i40e_aqc_opc_oem_device_status_change	= 0xFE01,
 
 	/* debug commands */
-	i40e_aqc_opc_debug_get_deviceid     = 0xFF00,
-	i40e_aqc_opc_debug_set_mode         = 0xFF01,
-	i40e_aqc_opc_debug_read_reg         = 0xFF03,
-	i40e_aqc_opc_debug_write_reg        = 0xFF04,
-	i40e_aqc_opc_debug_modify_reg       = 0xFF07,
-	i40e_aqc_opc_debug_dump_internals   = 0xFF08,
-	i40e_aqc_opc_debug_modify_internals = 0xFF09,
+	i40e_aqc_opc_debug_get_deviceid		= 0xFF00,
+	i40e_aqc_opc_debug_set_mode		= 0xFF01,
+	i40e_aqc_opc_debug_read_reg		= 0xFF03,
+	i40e_aqc_opc_debug_write_reg		= 0xFF04,
+	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
+	i40e_aqc_opc_debug_dump_internals	= 0xFF08,
+	i40e_aqc_opc_debug_modify_internals	= 0xFF09,
 };
 
 /* command structures and indirect data structures */
@@ -303,7 +303,7 @@
 /* This macro is used extensively to ensure that command structures are 16
  * bytes in length as they have to map to the raw array of that size.
  */
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+#define I40E_CHECK_CMD_LENGTH(X)	I40E_CHECK_STRUCT_LEN(16, X)
 
 /* internal (0x00XX) commands */
 
@@ -321,22 +321,22 @@
 
 /* Send driver version (indirect 0x0002) */
 struct i40e_aqc_driver_version {
-	u8     driver_major_ver;
-	u8     driver_minor_ver;
-	u8     driver_build_ver;
-	u8     driver_subbuild_ver;
-	u8     reserved[4];
-	__le32 address_high;
-	__le32 address_low;
+	u8	driver_major_ver;
+	u8	driver_minor_ver;
+	u8	driver_build_ver;
+	u8	driver_subbuild_ver;
+	u8	reserved[4];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
 
 /* Queue Shutdown (direct 0x0003) */
 struct i40e_aqc_queue_shutdown {
-	__le32     driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING    0x1
-	u8     reserved[12];
+	__le32	driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING	0x1
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
@@ -352,19 +352,19 @@
 /* Request resource ownership (direct 0x0008)
  * Release resource ownership (direct 0x0009)
  */
-#define I40E_AQ_RESOURCE_NVM               1
-#define I40E_AQ_RESOURCE_SDP               2
-#define I40E_AQ_RESOURCE_ACCESS_READ       1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE      2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT  3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+#define I40E_AQ_RESOURCE_NVM			1
+#define I40E_AQ_RESOURCE_SDP			2
+#define I40E_AQ_RESOURCE_ACCESS_READ		1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE		2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT	3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT	180000
 
 struct i40e_aqc_request_resource {
-	__le16 resource_id;
-	__le16 access_type;
-	__le32 timeout;
-	__le32 resource_number;
-	u8     reserved[4];
+	__le16	resource_id;
+	__le16	access_type;
+	__le32	timeout;
+	__le32	resource_number;
+	u8	reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
@@ -374,7 +374,7 @@
  */
 struct i40e_aqc_list_capabilites {
 	u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN     1
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN	1
 	u8 pf_index;
 	u8 reserved[2];
 	__le32 count;
@@ -385,123 +385,123 @@
 I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
 
 struct i40e_aqc_list_capabilities_element_resp {
-	__le16 id;
-	u8     major_rev;
-	u8     minor_rev;
-	__le32 number;
-	__le32 logical_id;
-	__le32 phys_id;
-	u8     reserved[16];
+	__le16	id;
+	u8	major_rev;
+	u8	minor_rev;
+	__le32	number;
+	__le32	logical_id;
+	__le32	phys_id;
+	u8	reserved[16];
 };
 
 /* list of caps */
 
-#define I40E_AQ_CAP_ID_SWITCH_MODE      0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE         0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE      0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP       0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID  0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM    0x0006
-#define I40E_AQ_CAP_ID_SRIOV            0x0012
-#define I40E_AQ_CAP_ID_VF               0x0013
-#define I40E_AQ_CAP_ID_VMDQ             0x0014
-#define I40E_AQ_CAP_ID_8021QBG          0x0015
-#define I40E_AQ_CAP_ID_8021QBR          0x0016
-#define I40E_AQ_CAP_ID_VSI              0x0017
-#define I40E_AQ_CAP_ID_DCB              0x0018
-#define I40E_AQ_CAP_ID_FCOE             0x0021
-#define I40E_AQ_CAP_ID_RSS              0x0040
-#define I40E_AQ_CAP_ID_RXQ              0x0041
-#define I40E_AQ_CAP_ID_TXQ              0x0042
-#define I40E_AQ_CAP_ID_MSIX             0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX          0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR    0x0045
-#define I40E_AQ_CAP_ID_1588             0x0046
-#define I40E_AQ_CAP_ID_IWARP            0x0051
-#define I40E_AQ_CAP_ID_LED              0x0061
-#define I40E_AQ_CAP_ID_SDP              0x0062
-#define I40E_AQ_CAP_ID_MDIO             0x0063
-#define I40E_AQ_CAP_ID_FLEX10           0x00F1
-#define I40E_AQ_CAP_ID_CEM              0x00F2
+#define I40E_AQ_CAP_ID_SWITCH_MODE	0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE		0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE	0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP	0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID	0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM	0x0006
+#define I40E_AQ_CAP_ID_SRIOV		0x0012
+#define I40E_AQ_CAP_ID_VF		0x0013
+#define I40E_AQ_CAP_ID_VMDQ		0x0014
+#define I40E_AQ_CAP_ID_8021QBG		0x0015
+#define I40E_AQ_CAP_ID_8021QBR		0x0016
+#define I40E_AQ_CAP_ID_VSI		0x0017
+#define I40E_AQ_CAP_ID_DCB		0x0018
+#define I40E_AQ_CAP_ID_FCOE		0x0021
+#define I40E_AQ_CAP_ID_RSS		0x0040
+#define I40E_AQ_CAP_ID_RXQ		0x0041
+#define I40E_AQ_CAP_ID_TXQ		0x0042
+#define I40E_AQ_CAP_ID_MSIX		0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX		0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR	0x0045
+#define I40E_AQ_CAP_ID_1588		0x0046
+#define I40E_AQ_CAP_ID_IWARP		0x0051
+#define I40E_AQ_CAP_ID_LED		0x0061
+#define I40E_AQ_CAP_ID_SDP		0x0062
+#define I40E_AQ_CAP_ID_MDIO		0x0063
+#define I40E_AQ_CAP_ID_FLEX10		0x00F1
+#define I40E_AQ_CAP_ID_CEM		0x00F2
 
 /* Set CPPM Configuration (direct 0x0103) */
 struct i40e_aqc_cppm_configuration {
-	__le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC    0x0800
-#define I40E_AQ_CPPM_EN_DMCTH   0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX  0x2000
-#define I40E_AQ_CPPM_EN_HPTC    0x4000
-#define I40E_AQ_CPPM_EN_DMARC   0x8000
-	__le16 ttlx;
-	__le32 dmacr;
-	__le16 dmcth;
-	u8     hptc;
-	u8     reserved;
-	__le32 pfltrc;
+	__le16	command_flags;
+#define I40E_AQ_CPPM_EN_LTRC	0x0800
+#define I40E_AQ_CPPM_EN_DMCTH	0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX	0x2000
+#define I40E_AQ_CPPM_EN_HPTC	0x4000
+#define I40E_AQ_CPPM_EN_DMARC	0x8000
+	__le16	ttlx;
+	__le32	dmacr;
+	__le16	dmcth;
+	u8	hptc;
+	u8	reserved;
+	__le32	pfltrc;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
 
 /* Set ARP Proxy command / response (indirect 0x0104) */
 struct i40e_aqc_arp_proxy_data {
-	__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4           0x0008
-#define I40E_AQ_ARP_UNSUP_CTL           0x0010
-#define I40E_AQ_ARP_ENA                 0x0020
-#define I40E_AQ_ARP_ADD_IPV4            0x0040
-#define I40E_AQ_ARP_DEL_IPV4            0x0080
-	__le16 table_id;
-	__le32 pfpm_proxyfc;
-	__le32 ip_addr;
-	u8     mac_addr[6];
+	__le16	command_flags;
+#define I40E_AQ_ARP_INIT_IPV4	0x0008
+#define I40E_AQ_ARP_UNSUP_CTL	0x0010
+#define I40E_AQ_ARP_ENA		0x0020
+#define I40E_AQ_ARP_ADD_IPV4	0x0040
+#define I40E_AQ_ARP_DEL_IPV4	0x0080
+	__le16	table_id;
+	__le32	pfpm_proxyfc;
+	__le32	ip_addr;
+	u8	mac_addr[6];
 };
 
 /* Set NS Proxy Table Entry Command (indirect 0x0105) */
 struct i40e_aqc_ns_proxy_data {
-	__le16 table_idx_mac_addr_0;
-	__le16 table_idx_mac_addr_1;
-	__le16 table_idx_ipv6_0;
-	__le16 table_idx_ipv6_1;
-	__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0             0x0100
-#define I40E_AQ_NS_PROXY_DEL_0             0x0200
-#define I40E_AQ_NS_PROXY_ADD_1             0x0400
-#define I40E_AQ_NS_PROXY_DEL_1             0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0        0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0        0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1        0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1        0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ       0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL     0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL      0x0004
-	u8     mac_addr_0[6];
-	u8     mac_addr_1[6];
-	u8     local_mac_addr[6];
-	u8     ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
-	u8     ipv6_addr_1[16];
+	__le16	table_idx_mac_addr_0;
+	__le16	table_idx_mac_addr_1;
+	__le16	table_idx_ipv6_0;
+	__le16	table_idx_ipv6_1;
+	__le16	control;
+#define I40E_AQ_NS_PROXY_ADD_0		0x0100
+#define I40E_AQ_NS_PROXY_DEL_0		0x0200
+#define I40E_AQ_NS_PROXY_ADD_1		0x0400
+#define I40E_AQ_NS_PROXY_DEL_1		0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0	0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0	0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1	0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1	0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ	0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL	0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL	0x0004
+	u8	mac_addr_0[6];
+	u8	mac_addr_1[6];
+	u8	local_mac_addr[6];
+	u8	ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+	u8	ipv6_addr_1[16];
 };
 
 /* Manage LAA Command (0x0106) - obsolete */
 struct i40e_aqc_mng_laa {
 	__le16	command_flags;
-#define I40E_AQ_LAA_FLAG_WR   0x8000
-	u8     reserved[2];
-	__le32 sal;
-	__le16 sah;
-	u8     reserved2[6];
+#define I40E_AQ_LAA_FLAG_WR	0x8000
+	u8	reserved[2];
+	__le32	sal;
+	__le16	sah;
+	u8	reserved2[6];
 };
 
 /* Manage MAC Address Read Command (indirect 0x0107) */
 struct i40e_aqc_mac_address_read {
 	__le16	command_flags;
-#define I40E_AQC_LAN_ADDR_VALID   0x10
-#define I40E_AQC_SAN_ADDR_VALID   0x20
-#define I40E_AQC_PORT_ADDR_VALID  0x40
-#define I40E_AQC_WOL_ADDR_VALID   0x80
-#define I40E_AQC_ADDR_VALID_MASK  0xf0
-	u8     reserved[6];
-	__le32 addr_high;
-	__le32 addr_low;
+#define I40E_AQC_LAN_ADDR_VALID		0x10
+#define I40E_AQC_SAN_ADDR_VALID		0x20
+#define I40E_AQC_PORT_ADDR_VALID	0x40
+#define I40E_AQC_WOL_ADDR_VALID		0x80
+#define I40E_AQC_ADDR_VALID_MASK	0xf0
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
@@ -517,14 +517,14 @@
 
 /* Manage MAC Address Write Command (0x0108) */
 struct i40e_aqc_mac_address_write {
-	__le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY    0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL     0x4000
-#define I40E_AQC_WRITE_TYPE_PORT        0x8000
-#define I40E_AQC_WRITE_TYPE_MASK        0xc000
-	__le16 mac_sah;
-	__le32 mac_sal;
-	u8     reserved[8];
+	__le16	command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
+#define I40E_AQC_WRITE_TYPE_PORT	0x8000
+#define I40E_AQC_WRITE_TYPE_MASK	0xc000
+	__le16	mac_sah;
+	__le32	mac_sal;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
@@ -545,10 +545,10 @@
  * command
  */
 struct i40e_aqc_switch_seid {
-	__le16 seid;
-	u8     reserved[6];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	seid;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
@@ -557,34 +557,34 @@
  * uses i40e_aqc_switch_seid for the descriptor
  */
 struct i40e_aqc_get_switch_config_header_resp {
-	__le16 num_reported;
-	__le16 num_total;
-	u8     reserved[12];
+	__le16	num_reported;
+	__le16	num_total;
+	u8	reserved[12];
 };
 
 struct i40e_aqc_switch_config_element_resp {
-	u8     element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC        1
-#define I40E_AQ_SW_ELEM_TYPE_PF         2
-#define I40E_AQ_SW_ELEM_TYPE_VF         3
-#define I40E_AQ_SW_ELEM_TYPE_EMP        4
-#define I40E_AQ_SW_ELEM_TYPE_BMC        5
-#define I40E_AQ_SW_ELEM_TYPE_PV         16
-#define I40E_AQ_SW_ELEM_TYPE_VEB        17
-#define I40E_AQ_SW_ELEM_TYPE_PA         18
-#define I40E_AQ_SW_ELEM_TYPE_VSI        19
-	u8     revision;
-#define I40E_AQ_SW_ELEM_REV_1           1
-	__le16 seid;
-	__le16 uplink_seid;
-	__le16 downlink_seid;
-	u8     reserved[3];
-	u8     connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR       0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT       0x2
-#define I40E_AQ_CONN_TYPE_CASCADED      0x3
-	__le16 scheduler_id;
-	__le16 element_info;
+	u8	element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC	1
+#define I40E_AQ_SW_ELEM_TYPE_PF		2
+#define I40E_AQ_SW_ELEM_TYPE_VF		3
+#define I40E_AQ_SW_ELEM_TYPE_EMP	4
+#define I40E_AQ_SW_ELEM_TYPE_BMC	5
+#define I40E_AQ_SW_ELEM_TYPE_PV		16
+#define I40E_AQ_SW_ELEM_TYPE_VEB	17
+#define I40E_AQ_SW_ELEM_TYPE_PA		18
+#define I40E_AQ_SW_ELEM_TYPE_VSI	19
+	u8	revision;
+#define I40E_AQ_SW_ELEM_REV_1		1
+	__le16	seid;
+	__le16	uplink_seid;
+	__le16	downlink_seid;
+	u8	reserved[3];
+	u8	connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR	0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT	0x2
+#define I40E_AQ_CONN_TYPE_CASCADED	0x3
+	__le16	scheduler_id;
+	__le16	element_info;
 };
 
 /* Get Switch Configuration (indirect 0x0200)
@@ -592,73 +592,73 @@
  *    the first in the array is the header, remainder are elements
  */
 struct i40e_aqc_get_switch_config_resp {
-	struct i40e_aqc_get_switch_config_header_resp header;
-	struct i40e_aqc_switch_config_element_resp    element[1];
+	struct i40e_aqc_get_switch_config_header_resp	header;
+	struct i40e_aqc_switch_config_element_resp	element[1];
 };
 
 /* Add Statistics (direct 0x0201)
  * Remove Statistics (direct 0x0202)
  */
 struct i40e_aqc_add_remove_statistics {
-	__le16 seid;
-	__le16 vlan;
-	__le16 stat_index;
-	u8     reserved[10];
+	__le16	seid;
+	__le16	vlan;
+	__le16	stat_index;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
 
 /* Set Port Parameters command (direct 0x0203) */
 struct i40e_aqc_set_port_parameters {
-	__le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS   1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS  2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA    4
-	__le16 bad_frame_vsi;
-	__le16 default_seid;        /* reserved for command */
-	u8     reserved[10];
+	__le16	command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS	1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS	2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA	4
+	__le16	bad_frame_vsi;
+	__le16	default_seid;        /* reserved for command */
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
 
 /* Get Switch Resource Allocation (indirect 0x0204) */
 struct i40e_aqc_get_switch_resource_alloc {
-	u8     num_entries;         /* reserved for command */
-	u8     reserved[7];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	num_entries;         /* reserved for command */
+	u8	reserved[7];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
 
 /* expect an array of these structs in the response buffer */
 struct i40e_aqc_switch_resource_alloc_element_resp {
-	u8     resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB                 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI                 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR             0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG                0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG                0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH      0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH        0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN                0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY      0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY     0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL      0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE         0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS          0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS        0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS   0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS          0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS         0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS            0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS        0x13
-	u8     reserved1;
-	__le16 guaranteed;
-	__le16 total;
-	__le16 used;
-	__le16 total_unalloced;
-	u8     reserved2[6];
+	u8	resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB		0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI		0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR		0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG		0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG		0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH	0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH	0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN		0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY	0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY	0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL	0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE	0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS	0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS	0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS	0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS	0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS	0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS		0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS	0x13
+	u8	reserved1;
+	__le16	guaranteed;
+	__le16	total;
+	__le16	used;
+	__le16	total_unalloced;
+	u8	reserved2[6];
 };
 
 /* Add VSI (indirect 0x0210)
@@ -672,24 +672,24 @@
  *     uses the same completion and data structure as Add VSI
  */
 struct i40e_aqc_add_get_update_vsi {
-	__le16 uplink_seid;
-	u8     connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL            0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT           0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED          0x3
-	u8     reserved1;
-	u8     vf_id;
-	u8     reserved2;
-	__le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT          0x0
-#define I40E_AQ_VSI_TYPE_MASK           (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF             0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2          0x1
-#define I40E_AQ_VSI_TYPE_PF             0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	uplink_seid;
+	u8	connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL	0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT	0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED	0x3
+	u8	reserved1;
+	u8	vf_id;
+	u8	reserved2;
+	__le16	vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT		0x0
+#define I40E_AQ_VSI_TYPE_MASK		(0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF		0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2		0x1
+#define I40E_AQ_VSI_TYPE_PF		0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG	0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV	0x4
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
@@ -707,121 +707,121 @@
 
 struct i40e_aqc_vsi_properties_data {
 	/* first 96 byte are written by SW */
-	__le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID       0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID     0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID         0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID       0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID   0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID    0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID    0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID    0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID     0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID        0x0200
+	__le16	valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID		0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID		0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID		0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID		0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID	0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID	0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID	0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID	0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID		0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID		0x0200
 	/* switch section */
-	__le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT             0x0000
-#define I40E_AQ_VSI_SW_ID_MASK              (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG     0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB     0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB     0x4000
-	u8     sw_reserved[2];
+	__le16	switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT		0x0000
+#define I40E_AQ_VSI_SW_ID_MASK		(0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG	0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB	0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB	0x4000
+	u8	sw_reserved[2];
 	/* security section */
-	u8     sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD    0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK    0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK     0x04
-	u8     sec_reserved;
+	u8	sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD	0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK	0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK	0x04
+	u8	sec_reserved;
 	/* VLAN section */
-	__le16 pvid; /* VLANS include priority bits */
-	__le16 fcoe_pvid;
-	u8     port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT        0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK         (0x03 << \
-						I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED       0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED     0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL          0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID       0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT        0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK         (0x3 << \
-					I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH     0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP       0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR          0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING      0x18
-	u8     pvlan_reserved[3];
+	__le16	pvid; /* VLANS include priority bits */
+	__le16	fcoe_pvid;
+	u8	port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
+					 I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED	0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED	0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL	0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID	0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT	0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << \
+					 I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH	0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
+	u8	pvlan_reserved[3];
 	/* ingress egress up sections */
-	__le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT      0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT      3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT      6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT      9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT      12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT      15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT      18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT      21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK       (0x7 << \
-					I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
-	__le32 egress_table;   /* same defines as for ingress table */
+	__le32	ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT	3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT	6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT	9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT	12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT	15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT	18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT	21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+	__le32	egress_table;   /* same defines as for ingress table */
 	/* cascaded PV section */
-	__le16 cas_pv_tag;
-	u8     cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT      0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK       (0x03 << \
-						I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE      0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE     0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY       0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG      0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE      0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
-	u8     cas_pv_reserved;
+	__le16	cas_pv_tag;
+	u8	cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK		(0x03 << \
+						 I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE		0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY		0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG		0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE		0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG	0x40
+	u8	cas_pv_reserved;
 	/* queue mapping section */
-	__le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG          0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG       0x1
-	__le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT             0x0
-#define I40E_AQ_VSI_QUEUE_MASK              (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
-	__le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT     0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK      (0x1FF << \
-						I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT     9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK      (0x7 << \
-						I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+	__le16	mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG	0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG	0x1
+	__le16	queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT		0x0
+#define I40E_AQ_VSI_QUEUE_MASK		(0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+	__le16	tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT	0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK	(0x1FF << \
+					 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT	9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK	(0x7 << \
+					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
 	/* queueing option section */
-	u8     queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA         0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA        0x20
-	u8     queueing_opt_reserved[3];
+	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+	u8	queueing_opt_reserved[3];
 	/* scheduler section */
-	u8     up_enable_bits;
-	u8     sched_reserved;
+	u8	up_enable_bits;
+	u8	sched_reserved;
 	/* outer up section */
-	__le32 outer_up_table; /* same structure and defines as ingress table */
-	u8     cmd_reserved[8];
+	__le32	outer_up_table; /* same structure and defines as ingress tbl */
+	u8	cmd_reserved[8];
 	/* last 32 bytes are written by FW */
-	__le16 qs_handle[8];
+	__le16	qs_handle[8];
 #define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
-	__le16 stat_counter_idx;
-	__le16 sched_id;
-	u8     resp_reserved[12];
+	__le16	stat_counter_idx;
+	__le16	sched_id;
+	u8	resp_reserved[12];
 };
 
 I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
@@ -831,26 +831,26 @@
  * (IS_CTRL_PORT only works on add PV)
  */
 struct i40e_aqc_add_update_pv {
-	__le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE                0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN    0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN    0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT           0x8
-	__le16 uplink_seid;
-	__le16 connected_seid;
-	u8     reserved[10];
+	__le16	command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE		0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN	0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN	0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT		0x8
+	__le16	uplink_seid;
+	__le16	connected_seid;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
 
 struct i40e_aqc_add_update_pv_completion {
 	/* reserved for update; for add also encodes error if rc == ENOSPC */
-	__le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV               0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED            0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER          0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY            0x8
-	u8     reserved[14];
+	__le16	pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV	0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED	0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER	0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY	0x8
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
@@ -860,48 +860,48 @@
  */
 
 struct i40e_aqc_get_pv_params_completion {
-	__le16 seid;
-	__le16 default_stag;
-	__le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE            0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG  0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG  0x4
-	u8     reserved[8];
-	__le16 default_port_seid;
+	__le16	seid;
+	__le16	default_stag;
+	__le16	pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE			0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG	0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG	0x4
+	u8	reserved[8];
+	__le16	default_port_seid;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
 
 /* Add VEB (direct 0x0230) */
 struct i40e_aqc_add_veb {
-	__le16 uplink_seid;
-	__le16 downlink_seid;
-	__le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING           0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT    1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK     (0x3 << \
+	__le16	uplink_seid;
+	__le16	downlink_seid;
+	__le16	veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING		0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT	1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK		(0x3 << \
 					I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT  0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA     0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER   0x8
-	u8     enable_tcs;
-	u8     reserved[9];
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT	0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA		0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER	0x8
+	u8	enable_tcs;
+	u8	reserved[9];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
 
 struct i40e_aqc_add_veb_completion {
-	u8     reserved[6];
-	__le16 switch_seid;
+	u8	reserved[6];
+	__le16	switch_seid;
 	/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
-	__le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB              0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED            0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER          0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY            0x8
-	__le16 statistic_index;
-	__le16 vebs_used;
-	__le16 vebs_free;
+	__le16	veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB		0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED		0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER	0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY		0x8
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
@@ -910,13 +910,13 @@
  * uses i40e_aqc_switch_seid for the descriptor
  */
 struct i40e_aqc_get_veb_parameters_completion {
-	__le16 seid;
-	__le16 switch_id;
-	__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
-	__le16 statistic_index;
-	__le16 vebs_used;
-	__le16 vebs_free;
-	u8     reserved[4];
+	__le16	seid;
+	__le16	switch_id;
+	__le16	veb_flags; /* only the first/last flags from 0x0230 is valid */
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
+	u8	reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
@@ -929,37 +929,37 @@
 
 /* used for the command for most vlan commands */
 struct i40e_aqc_macvlan {
-	__le16 num_addresses;
-	__le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	num_addresses;
+	__le16	seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID      0x8000
-	__le32 addr_high;
-	__le32 addr_low;
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID		0x8000
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
 
 /* indirect data for command and response */
 struct i40e_aqc_add_macvlan_element_data {
-	u8     mac_addr[6];
-	__le16 vlan_tag;
-	__le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH     0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH        0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
-	__le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT  0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK   (0x7FF << \
+	u8	mac_addr[6];
+	__le16	vlan_tag;
+	__le16	flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH	0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH		0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN	0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE		0x0008
+	__le16	queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT	0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK		(0x7FF << \
 					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
 	/* response section */
-	u8     match_method;
-#define I40E_AQC_MM_PERFECT_MATCH             0x01
-#define I40E_AQC_MM_HASH_MATCH                0x02
-#define I40E_AQC_MM_ERR_NO_RES                0xFF
-	u8     reserved1[3];
+	u8	match_method;
+#define I40E_AQC_MM_PERFECT_MATCH	0x01
+#define I40E_AQC_MM_HASH_MATCH		0x02
+#define I40E_AQC_MM_ERR_NO_RES		0xFF
+	u8	reserved1[3];
 };
 
 struct i40e_aqc_add_remove_macvlan_completion {
@@ -979,19 +979,19 @@
  */
 
 struct i40e_aqc_remove_macvlan_element_data {
-	u8     mac_addr[6];
-	__le16 vlan_tag;
-	u8     flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH      0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH         0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN        0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS           0x10
-	u8     reserved[3];
+	u8	mac_addr[6];
+	__le16	vlan_tag;
+	u8	flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH	0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH		0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN	0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS		0x10
+	u8	reserved[3];
 	/* reply section */
-	u8     error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS         0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL            0xFF
-	u8     reply_reserved[3];
+	u8	error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS		0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL		0xFF
+	u8	reply_reserved[3];
 };
 
 /* Add VLAN (indirect 0x0252)
@@ -999,59 +999,58 @@
  * use the generic i40e_aqc_macvlan for the command
  */
 struct i40e_aqc_add_remove_vlan_element_data {
-	__le16 vlan_tag;
-	u8     vlan_flags;
+	__le16	vlan_tag;
+	u8	vlan_flags;
 /* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL             0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT       1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK        (0x3 << \
-						I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR     0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY     0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY   0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT           3
-#define I40E_AQC_VLAN_PTYPE_MASK            (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI     0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI     0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI   0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI    0x18
+#define I40E_AQC_ADD_VLAN_LOCAL			0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT		1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK	(0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR		0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY		0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY	0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT		3
+#define I40E_AQC_VLAN_PTYPE_MASK	(0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI		0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI		0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI	0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI	0x18
 /* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL            0x1
-	u8     reserved;
-	u8     result;
+#define I40E_AQC_REMOVE_VLAN_ALL	0x1
+	u8	reserved;
+	u8	result;
 /* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS       0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST  0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+#define I40E_AQC_ADD_VLAN_SUCCESS	0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST	0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE	0xFF
 /* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS    0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL       0xFF
-	u8     reserved1[3];
+#define I40E_AQC_REMOVE_VLAN_SUCCESS	0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL	0xFF
+	u8	reserved1[3];
 };
 
 struct i40e_aqc_add_remove_vlan_completion {
-	u8     reserved[4];
-	__le16 vlans_used;
-	__le16 vlans_free;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved[4];
+	__le16	vlans_used;
+	__le16	vlans_free;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 /* Set VSI Promiscuous Modes (direct 0x0254) */
 struct i40e_aqc_set_vsi_promiscuous_modes {
-	__le16 promiscuous_flags;
-	__le16 valid_flags;
+	__le16	promiscuous_flags;
+	__le16	valid_flags;
 /* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST     0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST   0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST   0x04
-#define I40E_AQC_SET_VSI_DEFAULT             0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
-	__le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
-	__le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_VALID          0x8000
-	u8     reserved[8];
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST	0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST	0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST	0x04
+#define I40E_AQC_SET_VSI_DEFAULT		0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN		0x10
+	__le16	seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
+	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1060,23 +1059,23 @@
  * Uses generic i40e_aqc_add_remove_tag_completion for completion
  */
 struct i40e_aqc_add_tag {
-	__le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE     0x0001
-	__le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE		0x0001
+	__le16	seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
-	__le16 tag;
-	__le16 queue_number;
-	u8     reserved[8];
+	__le16	tag;
+	__le16	queue_number;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
 
 struct i40e_aqc_add_remove_tag_completion {
-	u8     reserved[12];
-	__le16 tags_used;
-	__le16 tags_free;
+	u8	reserved[12];
+	__le16	tags_used;
+	__le16	tags_free;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
@@ -1085,12 +1084,12 @@
  * Uses generic i40e_aqc_add_remove_tag_completion for completion
  */
 struct i40e_aqc_remove_tag {
-	__le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
-	__le16 tag;
-	u8     reserved[12];
+	__le16	tag;
+	u8	reserved[12];
 };
 
 /* Add multicast E-Tag (direct 0x0257)
@@ -1098,22 +1097,22 @@
  * and no external data
  */
 struct i40e_aqc_add_remove_mcast_etag {
-	__le16 pv_seid;
-	__le16 etag;
-	u8     num_unicast_etags;
-	u8     reserved[3];
-	__le32 addr_high;          /* address of array of 2-byte s-tags */
-	__le32 addr_low;
+	__le16	pv_seid;
+	__le16	etag;
+	u8	num_unicast_etags;
+	u8	reserved[3];
+	__le32	addr_high;          /* address of array of 2-byte s-tags */
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
 
 struct i40e_aqc_add_remove_mcast_etag_completion {
-	u8     reserved[4];
-	__le16 mcast_etags_used;
-	__le16 mcast_etags_free;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved[4];
+	__le16	mcast_etags_used;
+	__le16	mcast_etags_free;
+	__le32	addr_high;
+	__le32	addr_low;
 
 };
 
@@ -1121,21 +1120,21 @@
 
 /* Update S/E-Tag (direct 0x0259) */
 struct i40e_aqc_update_tag {
-	__le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+	__le16	seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
-	__le16 old_tag;
-	__le16 new_tag;
-	u8     reserved[10];
+	__le16	old_tag;
+	__le16	new_tag;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
 
 struct i40e_aqc_update_tag_completion {
-	u8     reserved[12];
-	__le16 tags_used;
-	__le16 tags_free;
+	u8	reserved[12];
+	__le16	tags_used;
+	__le16	tags_free;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
@@ -1146,30 +1145,30 @@
  * and the generic direct completion structure
  */
 struct i40e_aqc_add_remove_control_packet_filter {
-	u8     mac[6];
-	__le16 etype;
-	__le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC    0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP          0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE      0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX            0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX            0x0000
-	__le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK   (0x3FF << \
+	u8	mac[6];
+	__le16	etype;
+	__le16	flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC	0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP		0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE	0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX		0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX		0x0000
+	__le16	seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK	(0x3FF << \
 				I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
-	__le16 queue;
-	u8     reserved[2];
+	__le16	queue;
+	u8	reserved[2];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
 
 struct i40e_aqc_add_remove_control_packet_filter_completion {
-	__le16 mac_etype_used;
-	__le16 etype_used;
-	__le16 mac_etype_free;
-	__le16 etype_free;
-	u8     reserved[8];
+	__le16	mac_etype_used;
+	__le16	etype_used;
+	__le16	mac_etype_free;
+	__le16	etype_free;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
@@ -1180,23 +1179,23 @@
  * and the generic indirect completion structure
  */
 struct i40e_aqc_add_remove_cloud_filters {
-	u8     num_filters;
-	u8     reserved;
-	__le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK   (0x3FF << \
+	u8	num_filters;
+	u8	reserved;
+	__le16	seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
-	u8     reserved2[4];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved2[4];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
 
 struct i40e_aqc_add_remove_cloud_filters_element_data {
-	u8     outer_mac[6];
-	u8     inner_mac[6];
-	__le16 inner_vlan;
+	u8	outer_mac[6];
+	u8	inner_mac[6];
+	__le16	inner_vlan;
 	union {
 		struct {
 			u8 reserved[12];
@@ -1206,52 +1205,49 @@
 			u8 data[16];
 		} v6;
 	} ipaddr;
-	__le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK                  (0x3F << \
+	__le16	flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT			0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK			(0x3F << \
 					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE               0x0002
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE        0x0004
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL        0x0007
 /* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP                   0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP			0x0001
 /* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN            0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID     0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN		0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID	0x0004
 /* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID           0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID		0x0006
 /* 0x0007 reserved */
 /* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC                  0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC                  0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC      0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP                   0x000C
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC			0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC			0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC	0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP			0x000C
 
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE               0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT                    6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK                     0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                   0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                   0x0100
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE		0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT			6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK			0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4			0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6			0x0100
 
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT               9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK                0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN               0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC          1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                  3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT		9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK		0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN		0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC		1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE			2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP			3
 
-	__le32 tenant_id;
-	u8     reserved[4];
-	__le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK                   (0x3F << \
-					I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
-	u8     reserved2[14];
+	__le32	tenant_id;
+	u8	reserved[4];
+	__le16	queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT		0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK		(0x3F << \
+						 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+	u8	reserved2[14];
 	/* response section */
-	u8     allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS         0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL            0xFF
-	u8     response_reserved[7];
+	u8	allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS	0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL		0xFF
+	u8	response_reserved[7];
 };
 
 struct i40e_aqc_remove_cloud_filters_completion {
@@ -1273,14 +1269,14 @@
 struct i40e_aqc_add_delete_mirror_rule {
 	__le16 seid;
 	__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT            0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK             (0x7 << \
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT		0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK		(0x7 << \
 						I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS    1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS     2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN             3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS      4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS       5
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS	1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS	2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN		3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS	4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS	5
 	__le16 num_entries;
 	__le16 destination;  /* VSI for add, rule id for delete */
 	__le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
@@ -1290,12 +1286,12 @@
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
 
 struct i40e_aqc_add_delete_mirror_rule_completion {
-	u8     reserved[2];
-	__le16 rule_id;  /* only used on add */
-	__le16 mirror_rules_used;
-	__le16 mirror_rules_free;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	reserved[2];
+	__le16	rule_id;  /* only used on add */
+	__le16	mirror_rules_used;
+	__le16	mirror_rules_free;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
@@ -1306,11 +1302,11 @@
  *    the command and response use the same descriptor structure
  */
 struct i40e_aqc_pfc_ignore {
-	u8     tc_bitmap;
-	u8     command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET    0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR  0x0
-	u8     reserved[14];
+	u8	tc_bitmap;
+	u8	command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET		0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR	0x0
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
@@ -1325,10 +1321,10 @@
  * this generic struct to pass the SEID in param0
  */
 struct i40e_aqc_tx_sched_ind {
-	__le16 vsi_seid;
-	u8     reserved[6];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	vsi_seid;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
@@ -1340,12 +1336,12 @@
 
 /* Configure VSI BW limits (direct 0x0400) */
 struct i40e_aqc_configure_vsi_bw_limit {
-	__le16 vsi_seid;
-	u8     reserved[2];
-	__le16 credit;
-	u8     reserved1[2];
-	u8     max_credit; /* 0-3, limit = 2^max */
-	u8     reserved2[7];
+	__le16	vsi_seid;
+	u8	reserved[2];
+	__le16	credit;
+	u8	reserved1[2];
+	u8	max_credit; /* 0-3, limit = 2^max */
+	u8	reserved2[7];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
@@ -1354,58 +1350,58 @@
  *    responds with i40e_aqc_qs_handles_resp
  */
 struct i40e_aqc_configure_vsi_ets_sla_bw_data {
-	u8     tc_valid_bits;
-	u8     reserved[15];
-	__le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+	u8	tc_valid_bits;
+	u8	reserved[15];
+	__le16	tc_bw_credits[8]; /* FW writesback QS handles here */
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
-	u8     reserved1[28];
+	__le16	tc_bw_max[2];
+	u8	reserved1[28];
 };
 
 /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
  *    responds with i40e_aqc_qs_handles_resp
  */
 struct i40e_aqc_configure_vsi_tc_bw_data {
-	u8     tc_valid_bits;
-	u8     reserved[3];
-	u8     tc_bw_credits[8];
-	u8     reserved1[4];
-	__le16 qs_handles[8];
+	u8	tc_valid_bits;
+	u8	reserved[3];
+	u8	tc_bw_credits[8];
+	u8	reserved1[4];
+	__le16	qs_handles[8];
 };
 
 /* Query vsi bw configuration (indirect 0x0408) */
 struct i40e_aqc_query_vsi_bw_config_resp {
-	u8     tc_valid_bits;
-	u8     tc_suspended_bits;
-	u8     reserved[14];
-	__le16 qs_handles[8];
-	u8     reserved1[4];
-	__le16 port_bw_limit;
-	u8     reserved2[2];
-	u8     max_bw; /* 0-3, limit = 2^max */
-	u8     reserved3[23];
+	u8	tc_valid_bits;
+	u8	tc_suspended_bits;
+	u8	reserved[14];
+	__le16	qs_handles[8];
+	u8	reserved1[4];
+	__le16	port_bw_limit;
+	u8	reserved2[2];
+	u8	max_bw; /* 0-3, limit = 2^max */
+	u8	reserved3[23];
 };
 
 /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
 struct i40e_aqc_query_vsi_ets_sla_config_resp {
-	u8     tc_valid_bits;
-	u8     reserved[3];
-	u8     share_credits[8];
-	__le16 credits[8];
+	u8	tc_valid_bits;
+	u8	reserved[3];
+	u8	share_credits[8];
+	__le16	credits[8];
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
+	__le16	tc_bw_max[2];
 };
 
 /* Configure Switching Component Bandwidth Limit (direct 0x0410) */
 struct i40e_aqc_configure_switching_comp_bw_limit {
-	__le16 seid;
-	u8     reserved[2];
-	__le16 credit;
-	u8     reserved1[2];
-	u8     max_bw; /* 0-3, limit = 2^max */
-	u8     reserved2[7];
+	__le16	seid;
+	u8	reserved[2];
+	__le16	credit;
+	u8	reserved1[2];
+	u8	max_bw; /* 0-3, limit = 2^max */
+	u8	reserved2[7];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
@@ -1415,75 +1411,75 @@
  * Disable Physical Port ETS (indirect 0x0415)
  */
 struct i40e_aqc_configure_switching_comp_ets_data {
-	u8     reserved[4];
-	u8     tc_valid_bits;
-	u8     seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK     0x1
-	u8     tc_strict_priority_flags;
-	u8     reserved1[17];
-	u8     tc_bw_share_credits[8];
-	u8     reserved2[96];
+	u8	reserved[4];
+	u8	tc_valid_bits;
+	u8	seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK	0x1
+	u8	tc_strict_priority_flags;
+	u8	reserved1[17];
+	u8	tc_bw_share_credits[8];
+	u8	reserved2[96];
 };
 
 /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
 struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
-	u8     tc_valid_bits;
-	u8     reserved[15];
-	__le16 tc_bw_credit[8];
+	u8	tc_valid_bits;
+	u8	reserved[15];
+	__le16	tc_bw_credit[8];
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
-	u8     reserved1[28];
+	__le16	tc_bw_max[2];
+	u8	reserved1[28];
 };
 
 /* Configure Switching Component Bandwidth Allocation per Tc
  * (indirect 0x0417)
  */
 struct i40e_aqc_configure_switching_comp_bw_config_data {
-	u8     tc_valid_bits;
-	u8     reserved[2];
-	u8     absolute_credits; /* bool */
-	u8     tc_bw_share_credits[8];
-	u8     reserved1[20];
+	u8	tc_valid_bits;
+	u8	reserved[2];
+	u8	absolute_credits; /* bool */
+	u8	tc_bw_share_credits[8];
+	u8	reserved1[20];
 };
 
 /* Query Switching Component Configuration (indirect 0x0418) */
 struct i40e_aqc_query_switching_comp_ets_config_resp {
-	u8     tc_valid_bits;
-	u8     reserved[35];
-	__le16 port_bw_limit;
-	u8     reserved1[2];
-	u8     tc_bw_max; /* 0-3, limit = 2^max */
-	u8     reserved2[23];
+	u8	tc_valid_bits;
+	u8	reserved[35];
+	__le16	port_bw_limit;
+	u8	reserved1[2];
+	u8	tc_bw_max; /* 0-3, limit = 2^max */
+	u8	reserved2[23];
 };
 
 /* Query PhysicalPort ETS Configuration (indirect 0x0419) */
 struct i40e_aqc_query_port_ets_config_resp {
-	u8     reserved[4];
-	u8     tc_valid_bits;
-	u8     reserved1;
-	u8     tc_strict_priority_bits;
-	u8     reserved2;
-	u8     tc_bw_share_credits[8];
-	__le16 tc_bw_limits[8];
+	u8	reserved[4];
+	u8	tc_valid_bits;
+	u8	reserved1;
+	u8	tc_strict_priority_bits;
+	u8	reserved2;
+	u8	tc_bw_share_credits[8];
+	__le16	tc_bw_limits[8];
 
 	/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
-	u8     reserved3[32];
+	__le16	tc_bw_max[2];
+	u8	reserved3[32];
 };
 
 /* Query Switching Component Bandwidth Allocation per Traffic Type
  * (indirect 0x041A)
  */
 struct i40e_aqc_query_switching_comp_bw_config_resp {
-	u8     tc_valid_bits;
-	u8     reserved[2];
-	u8     absolute_credits_enable; /* bool */
-	u8     tc_bw_share_credits[8];
-	__le16 tc_bw_limits[8];
+	u8	tc_valid_bits;
+	u8	reserved[2];
+	u8	absolute_credits_enable; /* bool */
+	u8	tc_bw_share_credits[8];
+	__le16	tc_bw_limits[8];
 
 	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16 tc_bw_max[2];
+	__le16	tc_bw_max[2];
 };
 
 /* Suspend/resume port TX traffic
@@ -1494,37 +1490,37 @@
  * (indirect 0x041D)
  */
 struct i40e_aqc_configure_partition_bw_data {
-	__le16 pf_valid_bits;
-	u8     min_bw[16];      /* guaranteed bandwidth */
-	u8     max_bw[16];      /* bandwidth limit */
+	__le16	pf_valid_bits;
+	u8	min_bw[16];      /* guaranteed bandwidth */
+	u8	max_bw[16];      /* bandwidth limit */
 };
 
 /* Get and set the active HMC resource profile and status.
  * (direct 0x0500) and (direct 0x0501)
  */
 struct i40e_aq_get_set_hmc_resource_profile {
-	u8     pm_profile;
-	u8     pe_vf_enabled;
-	u8     reserved[14];
+	u8	pm_profile;
+	u8	pe_vf_enabled;
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
 
 enum i40e_aq_hmc_profile {
 	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-	I40E_HMC_PROFILE_DEFAULT     = 1,
-	I40E_HMC_PROFILE_FAVOR_VF    = 2,
-	I40E_HMC_PROFILE_EQUAL       = 3,
+	I40E_HMC_PROFILE_DEFAULT	= 1,
+	I40E_HMC_PROFILE_FAVOR_VF	= 2,
+	I40E_HMC_PROFILE_EQUAL		= 3,
 };
 
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK	0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK	0x3F
 
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES  0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES     0x0002
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES	0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES	0x0002
 
 enum i40e_aq_phy_type {
 	I40E_PHY_TYPE_SGMII			= 0x0,
@@ -1582,147 +1578,147 @@
 };
 
 struct i40e_aq_get_phy_abilities_resp {
-	__le32 phy_type;       /* bitmap using the above enum for offsets */
-	u8     link_speed;     /* bitmap using the above enum bit patterns */
-	u8     abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER        0x04
-#define I40E_AQ_PHY_LINK_ENABLED		  0x08
-#define I40E_AQ_PHY_AN_ENABLED			  0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20
-	__le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX       0x0002
-#define I40E_AQ_EEE_1000BASE_T       0x0004
-#define I40E_AQ_EEE_10GBASE_T        0x0008
-#define I40E_AQ_EEE_1000BASE_KX      0x0010
-#define I40E_AQ_EEE_10GBASE_KX4      0x0020
-#define I40E_AQ_EEE_10GBASE_KR       0x0040
-	__le32 eeer_val;
-	u8     d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA  0x01
-	u8     reserved[3];
-	u8     phy_id[4];
-	u8     module_type[3];
-	u8     qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS          16
-	struct i40e_aqc_module_desc  qualified_module[I40E_AQ_PHY_MAX_QMS];
+	__le32	phy_type;       /* bitmap using the above enum for offsets */
+	u8	link_speed;     /* bitmap using the above enum bit patterns */
+	u8	abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX	0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX	0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER	0x04
+#define I40E_AQ_PHY_LINK_ENABLED	0x08
+#define I40E_AQ_PHY_AN_ENABLED		0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL	0x20
+	__le16	eee_capability;
+#define I40E_AQ_EEE_100BASE_TX		0x0002
+#define I40E_AQ_EEE_1000BASE_T		0x0004
+#define I40E_AQ_EEE_10GBASE_T		0x0008
+#define I40E_AQ_EEE_1000BASE_KX		0x0010
+#define I40E_AQ_EEE_10GBASE_KX4		0x0020
+#define I40E_AQ_EEE_10GBASE_KR		0x0040
+	__le32	eeer_val;
+	u8	d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA	0x01
+	u8	reserved[3];
+	u8	phy_id[4];
+	u8	module_type[3];
+	u8	qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS		16
+	struct i40e_aqc_module_desc	qualified_module[I40E_AQ_PHY_MAX_QMS];
 };
 
 /* Set PHY Config (direct 0x0601) */
 struct i40e_aq_set_phy_config { /* same bits as above in all */
-	__le32 phy_type;
-	u8     link_speed;
-	u8     abilities;
+	__le32	phy_type;
+	u8	link_speed;
+	u8	abilities;
 /* bits 0-2 use the values from get_phy_abilities_resp */
 #define I40E_AQ_PHY_ENABLE_LINK		0x08
 #define I40E_AQ_PHY_ENABLE_AN		0x10
 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20
-	__le16 eee_capability;
-	__le32 eeer;
-	u8     low_power_ctrl;
-	u8     reserved[3];
+	__le16	eee_capability;
+	__le32	eeer;
+	u8	low_power_ctrl;
+	u8	reserved[3];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
 
 /* Set MAC Config command data structure (direct 0x0603) */
 struct i40e_aq_set_mac_config {
-	__le16 max_frame_size;
-	u8     params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN           0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK      0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT     3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE      0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX   0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX   0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX   0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX   0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX   0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX   0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX   0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX   0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX   0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX   0x1
-	u8     tx_timer_priority; /* bitmap */
-	__le16 tx_timer_value;
-	__le16 fc_refresh_threshold;
-	u8     reserved[8];
+	__le16	max_frame_size;
+	u8	params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN		0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK	0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT	3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE	0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX	0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX	0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX	0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX	0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX	0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX	0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX	0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX	0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX	0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX	0x1
+	u8	tx_timer_priority; /* bitmap */
+	__le16	tx_timer_value;
+	__le16	fc_refresh_threshold;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
 
 /* Restart Auto-Negotiation (direct 0x605) */
 struct i40e_aqc_set_link_restart_an {
-	u8     command;
-#define I40E_AQ_PHY_RESTART_AN  0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
-	u8     reserved[15];
+	u8	command;
+#define I40E_AQ_PHY_RESTART_AN	0x02
+#define I40E_AQ_PHY_LINK_ENABLE	0x04
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
 
 /* Get Link Status cmd & response data structure (direct 0x0607) */
 struct i40e_aqc_get_link_status {
-	__le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK             0x3
-#define I40E_AQ_LSE_NOP              0x0
-#define I40E_AQ_LSE_DISABLE          0x2
-#define I40E_AQ_LSE_ENABLE           0x3
+	__le16	command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK		0x3
+#define I40E_AQ_LSE_NOP			0x0
+#define I40E_AQ_LSE_DISABLE		0x2
+#define I40E_AQ_LSE_ENABLE		0x3
 /* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED       0x1
-	u8     phy_type;    /* i40e_aq_phy_type   */
-	u8     link_speed;  /* i40e_aq_link_speed */
-	u8     link_info;
-#define I40E_AQ_LINK_UP              0x01
-#define I40E_AQ_LINK_FAULT           0x02
-#define I40E_AQ_LINK_FAULT_TX        0x04
-#define I40E_AQ_LINK_FAULT_RX        0x08
-#define I40E_AQ_LINK_FAULT_REMOTE    0x10
-#define I40E_AQ_MEDIA_AVAILABLE      0x40
-#define I40E_AQ_SIGNAL_DETECT        0x80
-	u8     an_info;
-#define I40E_AQ_AN_COMPLETED         0x01
-#define I40E_AQ_LP_AN_ABILITY        0x02
-#define I40E_AQ_PD_FAULT             0x04
-#define I40E_AQ_FEC_EN               0x08
-#define I40E_AQ_PHY_LOW_POWER        0x10
-#define I40E_AQ_LINK_PAUSE_TX        0x20
-#define I40E_AQ_LINK_PAUSE_RX        0x40
-#define I40E_AQ_QUALIFIED_MODULE     0x80
-	u8     ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM  0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT        0x02
-#define I40E_AQ_LINK_TX_MASK         (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE       0x00
-#define I40E_AQ_LINK_TX_DRAINED      0x01
-#define I40E_AQ_LINK_TX_FLUSHED      0x03
-#define I40E_AQ_LINK_FORCED_40G      0x10
-	u8     loopback;         /* use defines from i40e_aqc_set_lb_mode */
-	__le16 max_frame_size;
-	u8     config;
-#define I40E_AQ_CONFIG_CRC_ENA       0x04
-#define I40E_AQ_CONFIG_PACING_MASK   0x78
-	u8     reserved[5];
+#define I40E_AQ_LSE_IS_ENABLED		0x1
+	u8	phy_type;    /* i40e_aq_phy_type   */
+	u8	link_speed;  /* i40e_aq_link_speed */
+	u8	link_info;
+#define I40E_AQ_LINK_UP			0x01
+#define I40E_AQ_LINK_FAULT		0x02
+#define I40E_AQ_LINK_FAULT_TX		0x04
+#define I40E_AQ_LINK_FAULT_RX		0x08
+#define I40E_AQ_LINK_FAULT_REMOTE	0x10
+#define I40E_AQ_MEDIA_AVAILABLE		0x40
+#define I40E_AQ_SIGNAL_DETECT		0x80
+	u8	an_info;
+#define I40E_AQ_AN_COMPLETED		0x01
+#define I40E_AQ_LP_AN_ABILITY		0x02
+#define I40E_AQ_PD_FAULT		0x04
+#define I40E_AQ_FEC_EN			0x08
+#define I40E_AQ_PHY_LOW_POWER		0x10
+#define I40E_AQ_LINK_PAUSE_TX		0x20
+#define I40E_AQ_LINK_PAUSE_RX		0x40
+#define I40E_AQ_QUALIFIED_MODULE	0x80
+	u8	ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM	0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS	0x02
+#define I40E_AQ_LINK_TX_SHIFT		0x02
+#define I40E_AQ_LINK_TX_MASK		(0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE		0x00
+#define I40E_AQ_LINK_TX_DRAINED		0x01
+#define I40E_AQ_LINK_TX_FLUSHED		0x03
+#define I40E_AQ_LINK_FORCED_40G		0x10
+	u8	loopback; /* use defines from i40e_aqc_set_lb_mode */
+	__le16	max_frame_size;
+	u8	config;
+#define I40E_AQ_CONFIG_CRC_ENA		0x04
+#define I40E_AQ_CONFIG_PACING_MASK	0x78
+	u8	reserved[5];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
 
 /* Set event mask command (direct 0x613) */
 struct i40e_aqc_set_phy_int_mask {
-	u8     reserved[8];
-	__le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN       0x0002
-#define I40E_AQ_EVENT_MEDIA_NA          0x0004
-#define I40E_AQ_EVENT_LINK_FAULT        0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM    0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS  0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT     0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED      0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL  0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
-	u8     reserved1[6];
+	u8	reserved[8];
+	__le16	event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN	0x0002
+#define I40E_AQ_EVENT_MEDIA_NA		0x0004
+#define I40E_AQ_EVENT_LINK_FAULT	0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM	0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS	0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT	0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED	0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL	0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED	0x0200
+	u8	reserved1[6];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
@@ -1732,27 +1728,27 @@
  * Get Link Partner AN advt register (direct 0x0616)
  */
 struct i40e_aqc_an_advt_reg {
-	__le32 local_an_reg0;
-	__le16 local_an_reg1;
-	u8     reserved[10];
+	__le32	local_an_reg0;
+	__le16	local_an_reg1;
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
 
 /* Set Loopback mode (0x0618) */
 struct i40e_aqc_set_lb_mode {
-	__le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL   0x01
-#define I40E_AQ_LB_PHY_REMOTE  0x02
-#define I40E_AQ_LB_MAC_LOCAL   0x04
-	u8     reserved[14];
+	__le16	lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL	0x01
+#define I40E_AQ_LB_PHY_REMOTE	0x02
+#define I40E_AQ_LB_MAC_LOCAL	0x04
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
 
 /* Set PHY Debug command (0x0622) */
 struct i40e_aqc_set_phy_debug {
-	u8     command_flags;
+	u8	command_flags;
 #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL	0x02
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT	2
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK	(0x03 << \
@@ -1761,15 +1757,15 @@
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD	0x01
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10
-	u8     reserved[15];
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
 
 enum i40e_aq_phy_reg_type {
-	I40E_AQC_PHY_REG_INTERNAL         = 0x1,
-	I40E_AQC_PHY_REG_EXERNAL_BASET    = 0x2,
-	I40E_AQC_PHY_REG_EXERNAL_MODULE   = 0x3
+	I40E_AQC_PHY_REG_INTERNAL	= 0x1,
+	I40E_AQC_PHY_REG_EXERNAL_BASET	= 0x2,
+	I40E_AQC_PHY_REG_EXERNAL_MODULE	= 0x3
 };
 
 /* NVM Read command (indirect 0x0701)
@@ -1777,40 +1773,40 @@
  * NVM Update commands (indirect 0x0703)
  */
 struct i40e_aqc_nvm_update {
-	u8     command_flags;
-#define I40E_AQ_NVM_LAST_CMD    0x01
-#define I40E_AQ_NVM_FLASH_ONLY  0x80
-	u8     module_pointer;
-	__le16 length;
-	__le32 offset;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	command_flags;
+#define I40E_AQ_NVM_LAST_CMD	0x01
+#define I40E_AQ_NVM_FLASH_ONLY	0x80
+	u8	module_pointer;
+	__le16	length;
+	__le32	offset;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
 
 /* NVM Config Read (indirect 0x0704) */
 struct i40e_aqc_nvm_config_read {
-	__le16 cmd_flags;
+	__le16	cmd_flags;
 #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1
 #define ANVM_READ_SINGLE_FEATURE		0
 #define ANVM_READ_MULTIPLE_FEATURES		1
-	__le16 element_count;
-	__le16 element_id;		/* Feature/field ID */
-	u8     reserved[2];
-	__le32 address_high;
-	__le32 address_low;
+	__le16	element_count;
+	__le16	element_id; /* Feature/field ID */
+	u8	reserved[2];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
 
 /* NVM Config Write (indirect 0x0705) */
 struct i40e_aqc_nvm_config_write {
-	__le16 cmd_flags;
-	__le16 element_count;
-	u8     reserved[4];
-	__le32 address_high;
-	__le32 address_low;
+	__le16	cmd_flags;
+	__le16	element_count;
+	u8	reserved[4];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
@@ -1835,10 +1831,10 @@
  * Send to Peer PF command (indirect 0x0803)
  */
 struct i40e_aqc_pf_vf_message {
-	__le32 id;
-	u8     reserved[4];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le32	id;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
@@ -1874,22 +1870,22 @@
  * uses i40e_aq_desc
  */
 struct i40e_aqc_alternate_write_done {
-	__le16 cmd_flags;
+	__le16	cmd_flags;
 #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK	1
 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY	0
 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI	1
 #define I40E_AQ_ALTERNATE_RESET_NEEDED		2
-	u8     reserved[14];
+	u8	reserved[14];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
 
 /* Set OEM mode (direct 0x0905) */
 struct i40e_aqc_alternate_set_mode {
-	__le32 mode;
+	__le32	mode;
 #define I40E_AQ_ALTERNATE_MODE_NONE	0
 #define I40E_AQ_ALTERNATE_MODE_OEM	1
-	u8     reserved[12];
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
@@ -1900,33 +1896,33 @@
 
 /* Lan Queue Overflow Event (direct, 0x1001) */
 struct i40e_aqc_lan_overflow {
-	__le32 prtdcb_rupto;
-	__le32 otx_ctl;
-	u8     reserved[8];
+	__le32	prtdcb_rupto;
+	__le32	otx_ctl;
+	u8	reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
 
 /* Get LLDP MIB (indirect 0x0A00) */
 struct i40e_aqc_lldp_get_mib {
-	u8     type;
-	u8     reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK                      0x3
-#define I40E_AQ_LLDP_MIB_LOCAL                          0x0
-#define I40E_AQ_LLDP_MIB_REMOTE                         0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE               0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK                   0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT                  0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE         0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR               0x1
-#define I40E_AQ_LLDP_TX_SHIFT              0x4
-#define I40E_AQ_LLDP_TX_MASK               (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+	u8	type;
+	u8	reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK		0x3
+#define I40E_AQ_LLDP_MIB_LOCAL			0x0
+#define I40E_AQ_LLDP_MIB_REMOTE			0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE	0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK		0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT		0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE	0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR	0x1
+#define I40E_AQ_LLDP_TX_SHIFT			0x4
+#define I40E_AQ_LLDP_TX_MASK			(0x03 << I40E_AQ_LLDP_TX_SHIFT)
 /* TX pause flags use I40E_AQ_LINK_TX_* above */
-	__le16 local_len;
-	__le16 remote_len;
-	u8     reserved2[2];
-	__le32 addr_high;
-	__le32 addr_low;
+	__le16	local_len;
+	__le16	remote_len;
+	u8	reserved2[2];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
@@ -1935,12 +1931,12 @@
  * also used for the event (with type in the command field)
  */
 struct i40e_aqc_lldp_update_mib {
-	u8     command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE          0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE         0x1
-	u8     reserved[7];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE	0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE	0x1
+	u8	reserved[7];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
@@ -1949,35 +1945,35 @@
  * Delete LLDP TLV (indirect 0x0A04)
  */
 struct i40e_aqc_lldp_add_tlv {
-	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
-	u8     reserved1[1];
-	__le16 len;
-	u8     reserved2[4];
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8	reserved1[1];
+	__le16	len;
+	u8	reserved2[4];
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
 
 /* Update LLDP TLV (indirect 0x0A03) */
 struct i40e_aqc_lldp_update_tlv {
-	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
-	u8     reserved;
-	__le16 old_len;
-	__le16 new_offset;
-	__le16 new_len;
-	__le32 addr_high;
-	__le32 addr_low;
+	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8	reserved;
+	__le16	old_len;
+	__le16	new_offset;
+	__le16	new_len;
+	__le32	addr_high;
+	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
 
 /* Stop LLDP (direct 0x0A05) */
 struct i40e_aqc_lldp_stop {
-	u8     command;
-#define I40E_AQ_LLDP_AGENT_STOP                 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN             0x1
-	u8     reserved[15];
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_STOP		0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN	0x1
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
@@ -1985,9 +1981,9 @@
 /* Start LLDP (direct 0x0A06) */
 
 struct i40e_aqc_lldp_start {
-	u8     command;
-#define I40E_AQ_LLDP_AGENT_START                0x1
-	u8     reserved[15];
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_START	0x1
+	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
@@ -1998,13 +1994,13 @@
 
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
-	__le16 udp_port;
-	u8     reserved0[3];
-	u8     protocol_type;
+	__le16	udp_port;
+	u8	reserved0[3];
+	u8	protocol_type;
 #define I40E_AQC_TUNNEL_TYPE_VXLAN	0x00
 #define I40E_AQC_TUNNEL_TYPE_NGE	0x01
 #define I40E_AQC_TUNNEL_TYPE_TEREDO	0x10
-	u8     reserved1[10];
+	u8	reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
@@ -2013,8 +2009,8 @@
 	__le16 udp_port;
 	u8	filter_entry_index;
 	u8	multiple_pfs;
-#define I40E_AQC_SINGLE_PF				0x0
-#define I40E_AQC_MULTIPLE_PFS			0x1
+#define I40E_AQC_SINGLE_PF		0x0
+#define I40E_AQC_MULTIPLE_PFS		0x1
 	u8	total_filters;
 	u8	reserved[11];
 };
@@ -2023,23 +2019,19 @@
 
 /* remove UDP Tunnel command (0x0B01) */
 struct i40e_aqc_remove_udp_tunnel {
-	u8     reserved[2];
-	u8     index; /* 0 to 15 */
-	u8     pf_filters;
-	u8     total_filters;
-	u8     reserved2[11];
+	u8	reserved[2];
+	u8	index; /* 0 to 15 */
+	u8	reserved2[13];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
 
 struct i40e_aqc_del_udp_tunnel_completion {
-	__le16 udp_port;
-	u8     index; /* 0 to 15 */
-	u8     multiple_pfs;
-	u8     total_filters_used;
-	u8     reserved;
-	u8     tunnels_free;
-	u8     reserved1[9];
+	__le16	udp_port;
+	u8	index; /* 0 to 15 */
+	u8	multiple_pfs;
+	u8	total_filters_used;
+	u8	reserved1[11];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
@@ -2068,11 +2060,11 @@
 	u8	key1_len;  /* 0 to 15 */
 	u8	key2_len;  /* 0 to 15 */
 	u8	flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE	0x01
 /* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS	0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED	0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN	0x03
 	u8	network_key_index;
 #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0
 #define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1
@@ -2085,21 +2077,21 @@
 
 /* OEM mode commands (direct 0xFE0x) */
 struct i40e_aqc_oem_param_change {
-	__le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL   0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL   1
-#define I40E_AQ_OEM_PARAM_MAC           2
-	__le32 param_value1;
-	u8     param_value2[8];
+	__le32	param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL	0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL	1
+#define I40E_AQ_OEM_PARAM_MAC		2
+	__le32	param_value1;
+	u8	param_value2[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
 
 struct i40e_aqc_oem_state_change {
-	__le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN  0x0
-#define I40E_AQ_OEM_STATE_LINK_UP    0x1
-	u8     reserved[12];
+	__le32	state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN	0x0
+#define I40E_AQ_OEM_STATE_LINK_UP	0x1
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
@@ -2111,18 +2103,18 @@
 /* set test more (0xFF01, internal) */
 
 struct i40e_acq_set_test_mode {
-	u8     mode;
-#define I40E_AQ_TEST_PARTIAL    0
-#define I40E_AQ_TEST_FULL       1
-#define I40E_AQ_TEST_NVM        2
-	u8     reserved[3];
-	u8     command;
-#define I40E_AQ_TEST_OPEN        0
-#define I40E_AQ_TEST_CLOSE       1
-#define I40E_AQ_TEST_INC         2
-	u8     reserved2[3];
-	__le32 address_high;
-	__le32 address_low;
+	u8	mode;
+#define I40E_AQ_TEST_PARTIAL	0
+#define I40E_AQ_TEST_FULL	1
+#define I40E_AQ_TEST_NVM	2
+	u8	reserved[3];
+	u8	command;
+#define I40E_AQ_TEST_OPEN	0
+#define I40E_AQ_TEST_CLOSE	1
+#define I40E_AQ_TEST_INC	2
+	u8	reserved2[3];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
@@ -2175,21 +2167,21 @@
 #define I40E_AQ_CLUSTER_ID_ALTRAM	11
 
 struct i40e_aqc_debug_dump_internals {
-	u8     cluster_id;
-	u8     table_id;
-	__le16 data_size;
-	__le32 idx;
-	__le32 address_high;
-	__le32 address_low;
+	u8	cluster_id;
+	u8	table_id;
+	__le16	data_size;
+	__le32	idx;
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
 
 struct i40e_aqc_debug_modify_internals {
-	u8     cluster_id;
-	u8     cluster_specific_params[7];
-	__le32 address_high;
-	__le32 address_low;
+	u8	cluster_id;
+	u8	cluster_specific_params[7];
+	__le32	address_high;
+	__le32	address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 9525605..28c40c5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -50,6 +50,7 @@
 		case I40E_DEV_ID_QSFP_A:
 		case I40E_DEV_ID_QSFP_B:
 		case I40E_DEV_ID_QSFP_C:
+		case I40E_DEV_ID_10G_BASE_T:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 1537643..8fe34fc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -43,6 +43,7 @@
 #define I40E_DEV_ID_QSFP_A		0x1583
 #define I40E_DEV_ID_QSFP_B		0x1584
 #define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_10G_BASE_T		0x1586
 #define I40E_DEV_ID_VF		0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c51bc7a..b2f01eb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1494,7 +1494,7 @@
 
 	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
 				&adapter->crit_section))
-		udelay(500);
+		usleep_range(500, 1000);
 
 	if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
 		dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
@@ -1647,10 +1647,8 @@
 					   v_msg->v_retval, event.msg_buf,
 					   event.msg_size);
 		if (pending != 0) {
-			dev_info(&adapter->pdev->dev,
-				 "%s: ARQ: Pending events %d\n",
-				 __func__, pending);
 			memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+			event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
 		}
 	} while (pending);
 
@@ -1980,7 +1978,7 @@
 		if ((rstat == I40E_VFR_VFACTIVE) ||
 		    (rstat == I40E_VFR_COMPLETED))
 			return 0;
-		udelay(10);
+		usleep_range(10, 20);
 	}
 	return -EBUSY;
 }
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ade067d..ccc3ce2 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2558,11 +2558,10 @@
 				MVNETA_GMAC_FORCE_LINK_DOWN);
 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
 			mvneta_port_up(pp);
-			netdev_info(pp->dev, "link up\n");
 		} else {
 			mvneta_port_down(pp);
-			netdev_info(pp->dev, "link down\n");
 		}
+		phy_print_status(phydev);
 	}
 }
 
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index c3b209c..21ddece 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -106,6 +106,7 @@
 #define SDMA_CMD_ERD		(1 << 7)
 
 /* Bit definitions of the Port Config Reg */
+#define PCR_DUPLEX_FULL		(1 << 15)
 #define PCR_HS			(1 << 12)
 #define PCR_EN			(1 << 7)
 #define PCR_PM			(1 << 0)
@@ -113,11 +114,17 @@
 /* Bit definitions of the Port Config Extend Reg */
 #define PCXR_2BSM		(1 << 28)
 #define PCXR_DSCP_EN		(1 << 21)
+#define PCXR_RMII_EN		(1 << 20)
+#define PCXR_AN_SPEED_DIS	(1 << 19)
+#define PCXR_SPEED_100		(1 << 18)
 #define PCXR_MFL_1518		(0 << 14)
 #define PCXR_MFL_1536		(1 << 14)
 #define PCXR_MFL_2048		(2 << 14)
 #define PCXR_MFL_64K		(3 << 14)
+#define PCXR_FLOWCTL_DIS	(1 << 12)
 #define PCXR_FLP		(1 << 11)
+#define PCXR_AN_FLOWCTL_DIS	(1 << 10)
+#define PCXR_AN_DUPLEX_DIS	(1 << 9)
 #define PCXR_PRIO_TX_OFF	3
 #define PCXR_TX_HIGH_PRI	(7 << PCXR_PRIO_TX_OFF)
 
@@ -170,7 +177,6 @@
 #define LINK_UP			(1 << 3)
 
 /* Bit definitions for work to be done */
-#define WORK_LINK		(1 << 0)
 #define WORK_TX_DONE		(1 << 1)
 
 /*
@@ -197,6 +203,9 @@
 struct pxa168_eth_private {
 	int port_num;		/* User Ethernet port number    */
 	int phy_addr;
+	int phy_speed;
+	int phy_duplex;
+	phy_interface_t phy_intf;
 
 	int rx_resource_err;	/* Rx ring resource error flag */
 
@@ -269,11 +278,11 @@
 static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static int pxa168_init_phy(struct net_device *dev);
 static void eth_port_reset(struct net_device *dev);
 static void eth_port_start(struct net_device *dev);
 static int pxa168_eth_open(struct net_device *dev);
 static int pxa168_eth_stop(struct net_device *dev);
-static int ethernet_phy_setup(struct net_device *dev);
 
 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
 {
@@ -305,26 +314,6 @@
 		netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
 }
 
-static int ethernet_phy_get(struct pxa168_eth_private *pep)
-{
-	unsigned int reg_data;
-
-	reg_data = rdl(pep, PHY_ADDRESS);
-
-	return (reg_data >> (5 * pep->port_num)) & 0x1f;
-}
-
-static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
-{
-	u32 reg_data;
-	int addr_shift = 5 * pep->port_num;
-
-	reg_data = rdl(pep, PHY_ADDRESS);
-	reg_data &= ~(0x1f << addr_shift);
-	reg_data |= (phy_addr & 0x1f) << addr_shift;
-	wrl(pep, PHY_ADDRESS, reg_data);
-}
-
 static void rxq_refill(struct net_device *dev)
 {
 	struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -655,14 +644,7 @@
 	struct pxa168_eth_private *pep = netdev_priv(dev);
 	int tx_curr_desc, rx_curr_desc;
 
-	/* Perform PHY reset, if there is a PHY. */
-	if (pep->phy != NULL) {
-		struct ethtool_cmd cmd;
-
-		pxa168_get_settings(pep->dev, &cmd);
-		phy_init_hw(pep->phy);
-		pxa168_set_settings(pep->dev, &cmd);
-	}
+	phy_start(pep->phy);
 
 	/* Assignment of Tx CTRP of given queue */
 	tx_curr_desc = pep->tx_curr_desc_q;
@@ -717,6 +699,8 @@
 	val = rdl(pep, PORT_CONFIG);
 	val &= ~PCR_EN;
 	wrl(pep, PORT_CONFIG, val);
+
+	phy_stop(pep->phy);
 }
 
 /*
@@ -884,43 +868,9 @@
 	}
 	if (icr & ICR_RXBUF)
 		ret = 1;
-	if (icr & ICR_MII_CH) {
-		pep->work_todo |= WORK_LINK;
-		ret = 1;
-	}
 	return ret;
 }
 
-static void handle_link_event(struct pxa168_eth_private *pep)
-{
-	struct net_device *dev = pep->dev;
-	u32 port_status;
-	int speed;
-	int duplex;
-	int fc;
-
-	port_status = rdl(pep, PORT_STATUS);
-	if (!(port_status & LINK_UP)) {
-		if (netif_carrier_ok(dev)) {
-			netdev_info(dev, "link down\n");
-			netif_carrier_off(dev);
-			txq_reclaim(dev, 1);
-		}
-		return;
-	}
-	if (port_status & PORT_SPEED_100)
-		speed = 100;
-	else
-		speed = 10;
-
-	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
-	fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1;
-	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
-		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
-	if (!netif_carrier_ok(dev))
-		netif_carrier_on(dev);
-}
-
 static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
 {
 	struct net_device *dev = (struct net_device *)dev_id;
@@ -978,8 +928,11 @@
 		skb_size = PCXR_MFL_64K;
 
 	/* Extended Port Configuration */
-	wrl(pep,
-	    PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+	wrl(pep, PORT_CONFIG_EXT,
+	    PCXR_AN_SPEED_DIS |		 /* Disable HW AN */
+	    PCXR_AN_DUPLEX_DIS |
+	    PCXR_AN_FLOWCTL_DIS |
+	    PCXR_2BSM |			 /* Two byte prefix aligns IP hdr */
 	    PCXR_DSCP_EN |		 /* Enable DSCP in IP */
 	    skb_size | PCXR_FLP |	 /* do not force link pass */
 	    PCXR_TX_HIGH_PRI);		 /* Transmit - high priority queue */
@@ -987,6 +940,69 @@
 	return 0;
 }
 
+static void pxa168_eth_adjust_link(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct phy_device *phy = pep->phy;
+	u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
+	u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
+
+	cfg = cfg_o & ~PCR_DUPLEX_FULL;
+	cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
+
+	if (phy->interface == PHY_INTERFACE_MODE_RMII)
+		cfgext |= PCXR_RMII_EN;
+	if (phy->speed == SPEED_100)
+		cfgext |= PCXR_SPEED_100;
+	if (phy->duplex)
+		cfg |= PCR_DUPLEX_FULL;
+	if (!phy->pause)
+		cfgext |= PCXR_FLOWCTL_DIS;
+
+	/* Bail out if there has nothing changed */
+	if (cfg == cfg_o && cfgext == cfgext_o)
+		return;
+
+	wrl(pep, PORT_CONFIG, cfg);
+	wrl(pep, PORT_CONFIG_EXT, cfgext);
+
+	phy_print_status(phy);
+}
+
+static int pxa168_init_phy(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct ethtool_cmd cmd;
+	int err;
+
+	if (pep->phy)
+		return 0;
+
+	pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+	if (!pep->phy)
+		return -ENODEV;
+
+	err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
+				 pep->phy_intf);
+	if (err)
+		return err;
+
+	err = pxa168_get_settings(dev, &cmd);
+	if (err)
+		return err;
+
+	cmd.phy_address = pep->phy_addr;
+	cmd.speed = pep->phy_speed;
+	cmd.duplex = pep->phy_duplex;
+	cmd.advertising = PHY_BASIC_FEATURES;
+	cmd.autoneg = AUTONEG_ENABLE;
+
+	if (cmd.speed != 0)
+		cmd.autoneg = AUTONEG_DISABLE;
+
+	return pxa168_set_settings(dev, &cmd);
+}
+
 static int pxa168_init_hw(struct pxa168_eth_private *pep)
 {
 	int err = 0;
@@ -1133,6 +1149,10 @@
 	struct pxa168_eth_private *pep = netdev_priv(dev);
 	int err;
 
+	err = pxa168_init_phy(dev);
+	if (err)
+		return err;
+
 	err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
 	if (err) {
 		dev_err(&dev->dev, "can't assign irq\n");
@@ -1231,10 +1251,6 @@
 	struct net_device *dev = pep->dev;
 	int work_done = 0;
 
-	if (unlikely(pep->work_todo & WORK_LINK)) {
-		pep->work_todo &= ~(WORK_LINK);
-		handle_link_event(pep);
-	}
 	/*
 	 * We call txq_reclaim every time since in NAPI interupts are disabled
 	 * and due to this we miss the TX_DONE interrupt,which is not updated in
@@ -1357,77 +1373,6 @@
 	return -EOPNOTSUPP;
 }
 
-static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
-{
-	struct mii_bus *bus = pep->smi_bus;
-	struct phy_device *phydev;
-	int start;
-	int num;
-	int i;
-
-	if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
-		/* Scan entire range */
-		start = ethernet_phy_get(pep);
-		num = 32;
-	} else {
-		/* Use phy addr specific to platform */
-		start = phy_addr & 0x1f;
-		num = 1;
-	}
-	phydev = NULL;
-	for (i = 0; i < num; i++) {
-		int addr = (start + i) & 0x1f;
-		if (bus->phy_map[addr] == NULL)
-			mdiobus_scan(bus, addr);
-
-		if (phydev == NULL) {
-			phydev = bus->phy_map[addr];
-			if (phydev != NULL)
-				ethernet_phy_set_addr(pep, addr);
-		}
-	}
-
-	return phydev;
-}
-
-static void phy_init(struct pxa168_eth_private *pep)
-{
-	struct phy_device *phy = pep->phy;
-
-	phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
-
-	if (pep->pd && pep->pd->speed != 0) {
-		phy->autoneg = AUTONEG_DISABLE;
-		phy->advertising = 0;
-		phy->speed = pep->pd->speed;
-		phy->duplex = pep->pd->duplex;
-	} else {
-		phy->autoneg = AUTONEG_ENABLE;
-		phy->speed = 0;
-		phy->duplex = 0;
-		phy->supported &= PHY_BASIC_FEATURES;
-		phy->advertising = phy->supported | ADVERTISED_Autoneg;
-	}
-
-	phy_start_aneg(phy);
-}
-
-static int ethernet_phy_setup(struct net_device *dev)
-{
-	struct pxa168_eth_private *pep = netdev_priv(dev);
-
-	if (pep->pd && pep->pd->init)
-		pep->pd->init();
-
-	pep->phy = phy_scan(pep, pep->phy_addr & 0x1f);
-	if (pep->phy != NULL)
-		phy_init(pep);
-
-	update_hash_table_mac_address(pep, NULL, dev->dev_addr);
-
-	return 0;
-}
-
 static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1505,16 +1450,14 @@
 	pep = netdev_priv(dev);
 	pep->dev = dev;
 	pep->clk = clk;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res == NULL) {
-		err = -ENODEV;
-		goto err_netdev;
-	}
 	pep->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(pep->base)) {
 		err = -ENOMEM;
 		goto err_netdev;
 	}
+
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	BUG_ON(!res);
 	dev->irq = res->start;
@@ -1552,13 +1495,23 @@
 
 		pep->port_num = pep->pd->port_number;
 		pep->phy_addr = pep->pd->phy_addr;
+		pep->phy_speed = pep->pd->speed;
+		pep->phy_duplex = pep->pd->duplex;
+		pep->phy_intf = pep->pd->intf;
+
+		if (pep->pd->init)
+			pep->pd->init();
 	} else if (pdev->dev.of_node) {
 		of_property_read_u32(pdev->dev.of_node, "port-id",
 				     &pep->port_num);
 
 		np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
-		if (np)
-			of_property_read_u32(np, "reg", &pep->phy_addr);
+		if (!np) {
+			dev_err(&pdev->dev, "missing phy-handle\n");
+			return -EINVAL;
+		}
+		of_property_read_u32(np, "reg", &pep->phy_addr);
+		pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
 	}
 
 	/* Hardware supports only 3 ports */
@@ -1588,9 +1541,6 @@
 		goto err_free_mdio;
 
 	pxa168_init_hw(pep);
-	err = ethernet_phy_setup(dev);
-	if (err)
-		goto err_mdiobus;
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	err = register_netdev(dev);
 	if (err)
@@ -1621,13 +1571,13 @@
 				  pep->htpr, pep->htpr_dma);
 		pep->htpr = NULL;
 	}
+	if (pep->phy)
+		phy_disconnect(pep->phy);
 	if (pep->clk) {
 		clk_disable(pep->clk);
 		clk_put(pep->clk);
 		pep->clk = NULL;
 	}
-	if (pep->phy != NULL)
-		phy_detach(pep->phy);
 
 	iounmap(pep->base);
 	pep->base = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b16e1b9..3c05e58 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -990,11 +990,11 @@
 	{
 		.opcode = MLX4_CMD_CONFIG_DEV,
 		.has_inbox = false,
-		.has_outbox = false,
+		.has_outbox = true,
 		.out_is_imm = false,
 		.encode_slave_id = false,
 		.verify = NULL,
-		.wrapper = mlx4_CMD_EPERM_wrapper
+		.wrapper = mlx4_CONFIG_DEV_wrapper
 	},
 	{
 		.opcode = MLX4_CMD_ALLOC_RES,
@@ -1338,6 +1338,15 @@
 		.verify = NULL,
 		.wrapper = mlx4_QUERY_IF_STAT_wrapper
 	},
+	{
+		.opcode = MLX4_CMD_ACCESS_REG,
+		.has_inbox = true,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_ACCESS_REG_wrapper,
+	},
 	/* Native multicast commands are not available for guests */
 	{
 		.opcode = MLX4_CMD_QP_ATTACH,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 57dda95..9990144 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -35,52 +35,6 @@
 
 #include "mlx4_en.h"
 
-int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
-	int port_up = 0;
-	int err = 0;
-
-	if (priv->hwtstamp_config.tx_type == tx_type &&
-	    priv->hwtstamp_config.rx_filter == rx_filter)
-		return 0;
-
-	mutex_lock(&mdev->state_lock);
-	if (priv->port_up) {
-		port_up = 1;
-		mlx4_en_stop_port(dev, 1);
-	}
-
-	mlx4_en_free_resources(priv);
-
-	en_warn(priv, "Changing Time Stamp configuration\n");
-
-	priv->hwtstamp_config.tx_type = tx_type;
-	priv->hwtstamp_config.rx_filter = rx_filter;
-
-	if (rx_filter != HWTSTAMP_FILTER_NONE)
-		dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
-	else
-		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
-
-	err = mlx4_en_alloc_resources(priv);
-	if (err) {
-		en_err(priv, "Failed reallocating port resources\n");
-		goto out;
-	}
-	if (port_up) {
-		err = mlx4_en_start_port(dev);
-		if (err)
-			en_err(priv, "Failed starting port\n");
-	}
-
-out:
-	mutex_unlock(&mdev->state_lock);
-	netdev_features_change(dev);
-	return err;
-}
-
 /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
  */
 static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae83da9..8ea4d5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,7 @@
 #include <linux/ethtool.h>
 #include <linux/netdevice.h>
 #include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
 #include <linux/in.h>
 #include <net/ip.h>
 
@@ -374,7 +375,302 @@
 	}
 }
 
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static u32 mlx4_en_autoneg_get(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	u32 autoneg = AUTONEG_DISABLE;
+
+	if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
+	    (priv->port_state.flags & MLX4_EN_PORT_ANE))
+		autoneg = AUTONEG_ENABLE;
+
+	return autoneg;
+}
+
+static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
+{
+	u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+			 | MLX4_PROT_MASK(MLX4_1000BASE_T)
+			 | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
+			return SUPPORTED_TP;
+	}
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+			 | MLX4_PROT_MASK(MLX4_10GBASE_SR)
+			 | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+			 | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
+			 | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+			 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
+			return SUPPORTED_FIBRE;
+	}
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+			 | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+			 | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+			 | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+			 | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+			 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
+			return SUPPORTED_Backplane;
+	}
+	return 0;
+}
+
+static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
+{
+	u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
+
+	if (!eth_proto) /* link down */
+		eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+			 | MLX4_PROT_MASK(MLX4_1000BASE_T)
+			 | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
+			return PORT_TP;
+	}
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
+			 | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+			 | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+			 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
+			return PORT_FIBRE;
+	}
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+			 | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
+			 | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
+			return PORT_DA;
+	}
+
+	if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+			 | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+			 | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+			 | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+			 | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+			 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
+			return PORT_NONE;
+	}
+	return PORT_OTHER;
+}
+
+#define MLX4_LINK_MODES_SZ \
+	(FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
+
+enum ethtool_report {
+	SUPPORTED = 0,
+	ADVERTISED = 1,
+	SPEED = 2
+};
+
+/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
+static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
+	[MLX4_100BASE_TX] = {
+		SUPPORTED_100baseT_Full,
+		ADVERTISED_100baseT_Full,
+		SPEED_100
+		},
+
+	[MLX4_1000BASE_T] = {
+		SUPPORTED_1000baseT_Full,
+		ADVERTISED_1000baseT_Full,
+		SPEED_1000
+		},
+	[MLX4_1000BASE_CX_SGMII] = {
+		SUPPORTED_1000baseKX_Full,
+		ADVERTISED_1000baseKX_Full,
+		SPEED_1000
+		},
+	[MLX4_1000BASE_KX] = {
+		SUPPORTED_1000baseKX_Full,
+		ADVERTISED_1000baseKX_Full,
+		SPEED_1000
+		},
+
+	[MLX4_10GBASE_T] = {
+		SUPPORTED_10000baseT_Full,
+		ADVERTISED_10000baseT_Full,
+		SPEED_10000
+		},
+	[MLX4_10GBASE_CX4] = {
+		SUPPORTED_10000baseKX4_Full,
+		ADVERTISED_10000baseKX4_Full,
+		SPEED_10000
+		},
+	[MLX4_10GBASE_KX4] = {
+		SUPPORTED_10000baseKX4_Full,
+		ADVERTISED_10000baseKX4_Full,
+		SPEED_10000
+		},
+	[MLX4_10GBASE_KR] = {
+		SUPPORTED_10000baseKR_Full,
+		ADVERTISED_10000baseKR_Full,
+		SPEED_10000
+		},
+	[MLX4_10GBASE_CR] = {
+		SUPPORTED_10000baseKR_Full,
+		ADVERTISED_10000baseKR_Full,
+		SPEED_10000
+		},
+	[MLX4_10GBASE_SR] = {
+		SUPPORTED_10000baseKR_Full,
+		ADVERTISED_10000baseKR_Full,
+		SPEED_10000
+		},
+
+	[MLX4_20GBASE_KR2] = {
+		SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
+		ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
+		SPEED_20000
+		},
+
+	[MLX4_40GBASE_CR4] = {
+		SUPPORTED_40000baseCR4_Full,
+		ADVERTISED_40000baseCR4_Full,
+		SPEED_40000
+		},
+	[MLX4_40GBASE_KR4] = {
+		SUPPORTED_40000baseKR4_Full,
+		ADVERTISED_40000baseKR4_Full,
+		SPEED_40000
+		},
+	[MLX4_40GBASE_SR4] = {
+		SUPPORTED_40000baseSR4_Full,
+		ADVERTISED_40000baseSR4_Full,
+		SPEED_40000
+		},
+
+	[MLX4_56GBASE_KR4] = {
+		SUPPORTED_56000baseKR4_Full,
+		ADVERTISED_56000baseKR4_Full,
+		SPEED_56000
+		},
+	[MLX4_56GBASE_CR4] = {
+		SUPPORTED_56000baseCR4_Full,
+		ADVERTISED_56000baseCR4_Full,
+		SPEED_56000
+		},
+	[MLX4_56GBASE_SR4] = {
+		SUPPORTED_56000baseSR4_Full,
+		ADVERTISED_56000baseSR4_Full,
+		SPEED_56000
+		},
+};
+
+static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
+{
+	int i;
+	u32 link_modes = 0;
+
+	for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+		if (eth_proto & MLX4_PROT_MASK(i))
+			link_modes |= ptys2ethtool_map[i][report];
+	}
+	return link_modes;
+}
+
+static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
+{
+	int i;
+	u32 ptys_modes = 0;
+
+	for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+		if (ptys2ethtool_map[i][report] & link_modes)
+			ptys_modes |= 1 << i;
+	}
+	return ptys_modes;
+}
+
+/* Convert actual speed (SPEED_XXX) to ptys link modes */
+static u32 speed2ptys_link_modes(u32 speed)
+{
+	int i;
+	u32 ptys_modes = 0;
+
+	for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+		if (ptys2ethtool_map[i][SPEED] == speed)
+			ptys_modes |= 1 << i;
+	}
+	return ptys_modes;
+}
+
+static int ethtool_get_ptys_settings(struct net_device *dev,
+				     struct ethtool_cmd *cmd)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_ptys_reg ptys_reg;
+	u32 eth_proto;
+	int ret;
+
+	memset(&ptys_reg, 0, sizeof(ptys_reg));
+	ptys_reg.local_port = priv->port;
+	ptys_reg.proto_mask = MLX4_PTYS_EN;
+	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
+				   MLX4_ACCESS_REG_QUERY, &ptys_reg);
+	if (ret) {
+		en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
+			ret);
+		return ret;
+	}
+	en_dbg(DRV, priv, "ptys_reg.proto_mask       %x\n",
+	       ptys_reg.proto_mask);
+	en_dbg(DRV, priv, "ptys_reg.eth_proto_cap    %x\n",
+	       be32_to_cpu(ptys_reg.eth_proto_cap));
+	en_dbg(DRV, priv, "ptys_reg.eth_proto_admin  %x\n",
+	       be32_to_cpu(ptys_reg.eth_proto_admin));
+	en_dbg(DRV, priv, "ptys_reg.eth_proto_oper   %x\n",
+	       be32_to_cpu(ptys_reg.eth_proto_oper));
+	en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
+	       be32_to_cpu(ptys_reg.eth_proto_lp_adv));
+
+	cmd->supported = 0;
+	cmd->advertising = 0;
+
+	cmd->supported |= ptys_get_supported_port(&ptys_reg);
+
+	eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
+	cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
+
+	eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
+	cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+
+	cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
+
+	cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
+		ADVERTISED_Asym_Pause : 0;
+
+	cmd->port = ptys_get_active_port(&ptys_reg);
+	cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
+		XCVR_EXTERNAL : XCVR_INTERNAL;
+
+	if (mlx4_en_autoneg_get(dev)) {
+		cmd->supported |= SUPPORTED_Autoneg;
+		cmd->advertising |= ADVERTISED_Autoneg;
+	}
+
+	cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+		AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+	eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
+	cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+
+	cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+			ADVERTISED_Autoneg : 0;
+
+	cmd->phy_address = 0;
+	cmd->mdio_support = 0;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+	cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+
+	return ret;
+}
+
+static void ethtool_get_default_settings(struct net_device *dev,
+					 struct ethtool_cmd *cmd)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	int trans_type;
@@ -382,18 +678,7 @@
 	cmd->autoneg = AUTONEG_DISABLE;
 	cmd->supported = SUPPORTED_10000baseT_Full;
 	cmd->advertising = ADVERTISED_10000baseT_Full;
-
-	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
-		return -ENOMEM;
-
-	trans_type = priv->port_state.transciver;
-	if (netif_carrier_ok(dev)) {
-		ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
-		cmd->duplex = DUPLEX_FULL;
-	} else {
-		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
-		cmd->duplex = DUPLEX_UNKNOWN;
-	}
+	trans_type = priv->port_state.transceiver;
 
 	if (trans_type > 0 && trans_type <= 0xC) {
 		cmd->port = PORT_FIBRE;
@@ -409,17 +694,118 @@
 		cmd->port = -1;
 		cmd->transceiver = -1;
 	}
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int ret = -EINVAL;
+
+	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+		return -ENOMEM;
+
+	en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
+	       priv->port_state.flags & MLX4_EN_PORT_ANC,
+	       priv->port_state.flags & MLX4_EN_PORT_ANE);
+
+	if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
+		ret = ethtool_get_ptys_settings(dev, cmd);
+	if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
+		ethtool_get_default_settings(dev, cmd);
+
+	if (netif_carrier_ok(dev)) {
+		ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
+		cmd->duplex = DUPLEX_FULL;
+	} else {
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
 	return 0;
 }
 
+/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
+static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
+				   __be32 proto_cap)
+{
+	__be32 proto_admin = 0;
+
+	if (!speed) { /* Speed = 0 ==> Reset Link modes */
+		proto_admin = proto_cap;
+		en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
+			be32_to_cpu(proto_cap));
+	} else {
+		u32 ptys_link_modes = speed2ptys_link_modes(speed);
+
+		proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
+		en_info(priv, "Setting Speed to %d\n", speed);
+	}
+	return proto_admin;
+}
+
 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
-	if ((cmd->autoneg == AUTONEG_ENABLE) ||
-	    (ethtool_cmd_speed(cmd) != SPEED_10000) ||
-	    (cmd->duplex != DUPLEX_FULL))
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_ptys_reg ptys_reg;
+	__be32 proto_admin;
+	int ret;
+
+	u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
+	int speed = ethtool_cmd_speed(cmd);
+
+	en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
+	       speed, cmd->advertising, cmd->autoneg, cmd->duplex);
+
+	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
+	    (cmd->autoneg == AUTONEG_ENABLE) || (cmd->duplex == DUPLEX_HALF))
 		return -EINVAL;
 
-	/* Nothing to change */
+	memset(&ptys_reg, 0, sizeof(ptys_reg));
+	ptys_reg.local_port = priv->port;
+	ptys_reg.proto_mask = MLX4_PTYS_EN;
+	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
+				   MLX4_ACCESS_REG_QUERY, &ptys_reg);
+	if (ret) {
+		en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
+			ret);
+		return 0;
+	}
+
+	proto_admin = cpu_to_be32(ptys_adv);
+	if (speed >= 0 && speed != priv->port_state.link_speed)
+		/* If speed was set then speed decides :-) */
+		proto_admin = speed_set_ptys_admin(priv, speed,
+						   ptys_reg.eth_proto_cap);
+
+	proto_admin &= ptys_reg.eth_proto_cap;
+
+	if (proto_admin == ptys_reg.eth_proto_admin)
+		return 0; /* Nothing to change */
+
+	if (!proto_admin) {
+		en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
+		return -EINVAL; /* nothing to change due to bad input */
+	}
+
+	en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
+	       be32_to_cpu(proto_admin));
+
+	ptys_reg.eth_proto_admin = proto_admin;
+	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
+				   &ptys_reg);
+	if (ret) {
+		en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
+			be32_to_cpu(ptys_reg.eth_proto_admin), ret);
+		return ret;
+	}
+
+	en_warn(priv, "Port link mode changed, restarting port...\n");
+	mutex_lock(&priv->mdev->state_lock);
+	if (priv->port_up) {
+		mlx4_en_stop_port(dev, 1);
+		if (mlx4_en_start_port(dev))
+			en_err(priv, "Failed restarting port %d\n", priv->port);
+	}
+	mutex_unlock(&priv->mdev->state_lock);
 	return 0;
 }
 
@@ -596,6 +982,7 @@
 	int err = 0;
 
 	rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
+	rss_rings = 1 << ilog2(rss_rings);
 
 	while (n--) {
 		ring_index[n] = rss_map->qps[n % rss_rings].qpn -
@@ -1309,6 +1696,86 @@
 	return ret;
 }
 
+static int mlx4_en_get_module_info(struct net_device *dev,
+				   struct ethtool_modinfo *modinfo)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int ret;
+	u8 data[4];
+
+	/* Read first 2 bytes to get Module & REV ID */
+	ret = mlx4_get_module_info(mdev->dev, priv->port,
+				   0/*offset*/, 2/*size*/, data);
+	if (ret < 2)
+		return -EIO;
+
+	switch (data[0] /* identifier */) {
+	case MLX4_MODULE_ID_QSFP:
+		modinfo->type = ETH_MODULE_SFF_8436;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		break;
+	case MLX4_MODULE_ID_QSFP_PLUS:
+		if (data[1] >= 0x3) { /* revision id */
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		}
+		break;
+	case MLX4_MODULE_ID_QSFP28:
+		modinfo->type = ETH_MODULE_SFF_8636;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+		break;
+	case MLX4_MODULE_ID_SFP:
+		modinfo->type = ETH_MODULE_SFF_8472;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return 0;
+}
+
+static int mlx4_en_get_module_eeprom(struct net_device *dev,
+				     struct ethtool_eeprom *ee,
+				     u8 *data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int offset = ee->offset;
+	int i = 0, ret;
+
+	if (ee->len == 0)
+		return -EINVAL;
+
+	memset(data, 0, ee->len);
+
+	while (i < ee->len) {
+		en_dbg(DRV, priv,
+		       "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
+		       i, offset, ee->len - i);
+
+		ret = mlx4_get_module_info(mdev->dev, priv->port,
+					   offset, ee->len - i, data + i);
+
+		if (!ret) /* Done reading */
+			return 0;
+
+		if (ret < 0) {
+			en_err(priv,
+			       "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
+			       i, offset, ee->len - i, ret);
+			return 0;
+		}
+
+		i += ret;
+		offset += ret;
+	}
+	return 0;
+}
 
 const struct ethtool_ops mlx4_en_ethtool_ops = {
 	.get_drvinfo = mlx4_en_get_drvinfo,
@@ -1341,6 +1808,8 @@
 	.get_priv_flags = mlx4_en_get_priv_flags,
 	.get_tunable		= mlx4_en_get_tunable,
 	.set_tunable		= mlx4_en_set_tunable,
+	.get_module_info = mlx4_en_get_module_info,
+	.get_module_eeprom = mlx4_en_get_module_eeprom
 };
 
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2091ae88..9f16f75 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -221,15 +221,12 @@
 {
 	struct mlx4_en_dev *mdev;
 	int i;
-	int err;
 
 	printk_once(KERN_INFO "%s", mlx4_en_version);
 
 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
-	if (!mdev) {
-		err = -ENOMEM;
+	if (!mdev)
 		goto err_free_res;
-	}
 
 	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
 		goto err_free_dev;
@@ -264,8 +261,7 @@
 	}
 
 	/* Build device profile according to supplied module parameters */
-	err = mlx4_en_get_profile(mdev);
-	if (err) {
+	if (mlx4_en_get_profile(mdev)) {
 		mlx4_err(mdev, "Bad module parameters, aborting\n");
 		goto err_mr;
 	}
@@ -286,10 +282,8 @@
 	 * Note: we cannot use the shared workqueue because of deadlocks caused
 	 *       by the rtnl lock */
 	mdev->workqueue = create_singlethread_workqueue("mlx4_en");
-	if (!mdev->workqueue) {
-		err = -ENOMEM;
+	if (!mdev->workqueue)
 		goto err_mr;
-	}
 
 	/* At this stage all non-port specific tasks are complete:
 	 * mark the card state as up */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fe..0efbae9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -575,7 +575,7 @@
 	struct mlx4_mac_entry *entry;
 	int index = 0;
 	int err = 0;
-	u64 reg_id;
+	u64 reg_id = 0;
 	int *qpn = &priv->base_qpn;
 	u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
 
@@ -1843,8 +1843,7 @@
 		}
 		local_bh_enable();
 
-		while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
-			msleep(1);
+		napi_synchronize(&cq->napi);
 		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
 		mlx4_en_deactivate_cq(priv, cq);
 
@@ -2157,7 +2156,7 @@
 		return -ERANGE;
 	}
 
-	if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
+	if (mlx4_en_reset_config(dev, config, dev->features)) {
 		config.tx_type = HWTSTAMP_TX_OFF;
 		config.rx_filter = HWTSTAMP_FILTER_NONE;
 	}
@@ -2190,6 +2189,16 @@
 		netdev_features_t features)
 {
 	struct mlx4_en_priv *priv = netdev_priv(netdev);
+	int ret = 0;
+
+	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+		en_info(priv, "Turn %s RX vlan strip offload\n",
+			(features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
+		ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+					   features);
+		if (ret)
+			return ret;
+	}
 
 	if (features & NETIF_F_LOOPBACK)
 		priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
@@ -2431,6 +2440,21 @@
 
 	priv = netdev_priv(dev);
 	memset(priv, 0, sizeof(struct mlx4_en_priv));
+	spin_lock_init(&priv->stats_lock);
+	INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
+	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+	INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+#ifdef CONFIG_MLX4_EN_VXLAN
+	INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
+	INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
+#endif
+#ifdef CONFIG_RFS_ACCEL
+	INIT_LIST_HEAD(&priv->filters);
+	spin_lock_init(&priv->filters_lock);
+#endif
+
 	priv->dev = dev;
 	priv->mdev = mdev;
 	priv->ddev = &mdev->pdev->dev;
@@ -2462,16 +2486,6 @@
 	priv->cqe_size = mdev->dev->caps.cqe_size;
 	priv->mac_index = -1;
 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
-	spin_lock_init(&priv->stats_lock);
-	INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
-	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
-	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
-	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
-	INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
-	INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
-	INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
-#endif
 #ifdef CONFIG_MLX4_EN_DCB
 	if (!mlx4_is_slave(priv->mdev->dev)) {
 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2514,11 +2528,6 @@
 	if (err)
 		goto out;
 
-#ifdef CONFIG_RFS_ACCEL
-	INIT_LIST_HEAD(&priv->filters);
-	spin_lock_init(&priv->filters_lock);
-#endif
-
 	/* Initialize time stamping config */
 	priv->hwtstamp_config.flags = 0;
 	priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -2559,7 +2568,8 @@
 	dev->features = dev->hw_features | NETIF_F_HIGHDMA |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 			NETIF_F_HW_VLAN_CTAG_FILTER;
-	dev->hw_features |= NETIF_F_LOOPBACK;
+	dev->hw_features |= NETIF_F_LOOPBACK |
+			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
 	if (mdev->dev->caps.steering_mode ==
 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -2633,3 +2643,79 @@
 	return err;
 }
 
+int mlx4_en_reset_config(struct net_device *dev,
+			 struct hwtstamp_config ts_config,
+			 netdev_features_t features)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int port_up = 0;
+	int err = 0;
+
+	if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
+	    priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
+	    !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
+		return 0; /* Nothing to change */
+
+	if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+	    (features & NETIF_F_HW_VLAN_CTAG_RX) &&
+	    (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
+		en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdev->state_lock);
+	if (priv->port_up) {
+		port_up = 1;
+		mlx4_en_stop_port(dev, 1);
+	}
+
+	mlx4_en_free_resources(priv);
+
+	en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
+		ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+	priv->hwtstamp_config.tx_type = ts_config.tx_type;
+	priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+
+	if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+		else
+			dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+	} else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
+		/* RX time-stamping is OFF, update the RX vlan offload
+		 * to the latest wanted state
+		 */
+		if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
+			dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+		else
+			dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+	}
+
+	/* RX vlan offload and RX time-stamping can't co-exist !
+	 * Regardless of the caller's choice,
+	 * Turn Off RX vlan offload in case of time-stamping is ON
+	 */
+	if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+			en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
+		dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+	}
+
+	err = mlx4_en_alloc_resources(priv);
+	if (err) {
+		en_err(priv, "Failed reallocating port resources\n");
+		goto out;
+	}
+	if (port_up) {
+		err = mlx4_en_start_port(dev);
+		if (err)
+			en_err(priv, "Failed starting port\n");
+	}
+
+out:
+	mutex_unlock(&mdev->state_lock);
+	netdev_features_change(dev);
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a0261d..134b12e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -91,21 +91,37 @@
 	 * already synchronized, no need in locking */
 	state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
 	switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
+	case MLX4_EN_100M_SPEED:
+		state->link_speed = SPEED_100;
+		break;
 	case MLX4_EN_1G_SPEED:
-		state->link_speed = 1000;
+		state->link_speed = SPEED_1000;
 		break;
 	case MLX4_EN_10G_SPEED_XAUI:
 	case MLX4_EN_10G_SPEED_XFI:
-		state->link_speed = 10000;
+		state->link_speed = SPEED_10000;
+		break;
+	case MLX4_EN_20G_SPEED:
+		state->link_speed = SPEED_20000;
 		break;
 	case MLX4_EN_40G_SPEED:
-		state->link_speed = 40000;
+		state->link_speed = SPEED_40000;
+		break;
+	case MLX4_EN_56G_SPEED:
+		state->link_speed = SPEED_56000;
 		break;
 	default:
 		state->link_speed = -1;
 		break;
 	}
-	state->transciver = qport_context->transceiver;
+
+	state->transceiver = qport_context->transceiver;
+
+	state->flags = 0; /* Reset and recalculate the port flags */
+	state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
+		MLX4_EN_PORT_ANC : 0;
+	state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
+		MLX4_EN_PORT_ANE : 0;
 
 out:
 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 745090b..040da4b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -53,22 +53,49 @@
 	MLX4_MCAST_ENABLE       = 2,
 };
 
+enum mlx4_link_mode {
+	MLX4_1000BASE_CX_SGMII	 = 0,
+	MLX4_1000BASE_KX	 = 1,
+	MLX4_10GBASE_CX4	 = 2,
+	MLX4_10GBASE_KX4	 = 3,
+	MLX4_10GBASE_KR		 = 4,
+	MLX4_20GBASE_KR2	 = 5,
+	MLX4_40GBASE_CR4	 = 6,
+	MLX4_40GBASE_KR4	 = 7,
+	MLX4_56GBASE_KR4	 = 8,
+	MLX4_10GBASE_CR		 = 12,
+	MLX4_10GBASE_SR		 = 13,
+	MLX4_40GBASE_SR4	 = 15,
+	MLX4_56GBASE_CR4	 = 17,
+	MLX4_56GBASE_SR4	 = 18,
+	MLX4_100BASE_TX		 = 24,
+	MLX4_1000BASE_T		 = 25,
+	MLX4_10GBASE_T		 = 26,
+};
+
+#define MLX4_PROT_MASK(link_mode) (1<<link_mode)
+
 enum {
-	MLX4_EN_1G_SPEED	= 0x02,
-	MLX4_EN_10G_SPEED_XFI	= 0x01,
+	MLX4_EN_100M_SPEED	= 0x04,
 	MLX4_EN_10G_SPEED_XAUI	= 0x00,
+	MLX4_EN_10G_SPEED_XFI	= 0x01,
+	MLX4_EN_1G_SPEED	= 0x02,
+	MLX4_EN_20G_SPEED	= 0x08,
 	MLX4_EN_40G_SPEED	= 0x40,
+	MLX4_EN_56G_SPEED	= 0x20,
 	MLX4_EN_OTHER_SPEED	= 0x0f,
 };
 
 struct mlx4_en_query_port_context {
 	u8 link_up;
 #define MLX4_EN_LINK_UP_MASK	0x80
-	u8 reserved;
+#define MLX4_EN_ANC_MASK	0x40
+	u8 autoneg;
+#define MLX4_EN_AUTONEG_MASK	0x80
 	__be16 mtu;
 	u8 reserved2;
 	u8 link_speed;
-#define MLX4_EN_SPEED_MASK	0x43
+#define MLX4_EN_SPEED_MASK	0x6f
 	u16 reserved3[5];
 	__be64 mac;
 	u8 transceiver;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 01660c5..b173a0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -74,7 +74,7 @@
 	page_alloc->page_size = PAGE_SIZE << order;
 	page_alloc->page = page;
 	page_alloc->dma = dma;
-	page_alloc->page_offset = frag_info->frag_align;
+	page_alloc->page_offset = 0;
 	/* Not doing get_page() for each frag is a big win
 	 * on asymetric workloads. Note we can not use atomic_set().
 	 */
@@ -119,7 +119,6 @@
 
 out:
 	while (i--) {
-		frag_info = &priv->frag_info[i];
 		if (page_alloc[i].page != ring_alloc[i].page) {
 			dma_unmap_page(priv->ddev, page_alloc[i].dma,
 				page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
@@ -157,7 +156,7 @@
 		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
 
 		if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
-				     frag_info, GFP_KERNEL))
+				     frag_info, GFP_KERNEL | __GFP_COLD))
 			goto out;
 	}
 	return 0;
@@ -269,7 +268,7 @@
 
 			if (mlx4_en_prepare_rx_desc(priv, ring,
 						    ring->actual_size,
-						    GFP_KERNEL)) {
+						    GFP_KERNEL | __GFP_COLD)) {
 				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
 					en_err(priv, "Failed to allocate enough rx buffers\n");
 					return -ENOMEM;
@@ -636,7 +635,8 @@
 	int index = ring->prod & ring->size_mask;
 
 	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
-		if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
+		if (mlx4_en_prepare_rx_desc(priv, ring, index,
+					    GFP_ATOMIC | __GFP_COLD))
 			break;
 		ring->prod++;
 		index = ring->prod & ring->size_mask;
@@ -879,8 +879,8 @@
 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
 
-	if (priv->port_up)
-		napi_schedule(&cq->napi);
+	if (likely(priv->port_up))
+		napi_schedule_irqoff(&cq->napi);
 	else
 		mlx4_en_arm_cq(priv, cq);
 }
@@ -946,15 +946,8 @@
 			(eff_mtu > buf_size + frag_sizes[i]) ?
 				frag_sizes[i] : eff_mtu - buf_size;
 		priv->frag_info[i].frag_prefix_size = buf_size;
-		if (!i)	{
-			priv->frag_info[i].frag_align = NET_IP_ALIGN;
-			priv->frag_info[i].frag_stride =
-				ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
-		} else {
-			priv->frag_info[i].frag_align = 0;
-			priv->frag_info[i].frag_stride =
-				ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
-		}
+		priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
+						       SMP_CACHE_BYTES);
 		buf_size += priv->frag_info[i].frag_size;
 		i++;
 	}
@@ -967,11 +960,10 @@
 	       eff_mtu, priv->num_frags);
 	for (i = 0; i < priv->num_frags; i++) {
 		en_err(priv,
-		       "  frag:%d - size:%d prefix:%d align:%d stride:%d\n",
+		       "  frag:%d - size:%d prefix:%d stride:%d\n",
 		       i,
 		       priv->frag_info[i].frag_size,
 		       priv->frag_info[i].frag_prefix_size,
-		       priv->frag_info[i].frag_align,
 		       priv->frag_info[i].frag_stride);
 	}
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 49d5afc..2d8ee66 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,11 +129,15 @@
 	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
 		return -ENOMEM;
 
-	/* The device supports 1G, 10G and 40G speeds */
-	if (priv->port_state.link_speed != 1000 &&
-	    priv->port_state.link_speed != 10000 &&
-	    priv->port_state.link_speed != 40000)
+	/* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */
+	if (priv->port_state.link_speed != SPEED_100 &&
+	    priv->port_state.link_speed != SPEED_1000 &&
+	    priv->port_state.link_speed != SPEED_10000 &&
+	    priv->port_state.link_speed != SPEED_20000 &&
+	    priv->port_state.link_speed != SPEED_40000 &&
+	    priv->port_state.link_speed != SPEED_56000)
 		return priv->port_state.link_speed;
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 454d9fe..d0cecbd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -479,8 +479,8 @@
 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
 
-	if (priv->port_up)
-		napi_schedule(&cq->napi);
+	if (likely(priv->port_up))
+		napi_schedule_irqoff(&cq->napi);
 	else
 		mlx4_en_arm_cq(priv, cq);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2e88a23..d6dba77 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -139,7 +139,10 @@
 		[10] = "TCP/IP offloads/flow-steering for VXLAN support",
 		[11] = "MAD DEMUX (Secure-Host) support",
 		[12] = "Large cache line (>64B) CQE stride support",
-		[13] = "Large cache line (>64B) EQE stride support"
+		[13] = "Large cache line (>64B) EQE stride support",
+		[14] = "Ethernet protocol control support",
+		[15] = "Ethernet Backplane autoneg support",
+		[16] = "CONFIG DEV support"
 	};
 	int i;
 
@@ -560,6 +563,7 @@
 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET	0x76
 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET	0x77
 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE	0x7a
+#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET	0x7a
 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET	0x80
 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET	0x82
 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET	0x84
@@ -571,8 +575,10 @@
 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET	0x90
 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
+#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET		0x94
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
+#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET		0x9c
 #define QUERY_DEV_CAP_FW_REASSIGN_MAC		0x9d
 #define QUERY_DEV_CAP_VXLAN			0x9e
 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET		0xb0
@@ -737,15 +743,22 @@
 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
 	dev_cap->max_rq_desc_sz = size;
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+	if (field & (1 << 5))
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
 	if (field & (1 << 6))
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
 	if (field & (1 << 7))
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
-
 	MLX4_GET(dev_cap->bmme_flags, outbox,
 		 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+	if (field & 0x20)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
 	MLX4_GET(dev_cap->reserved_lkey, outbox,
 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
+	if (field32 & (1 << 0))
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
 	if (field & 1<<6)
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1841,14 +1854,18 @@
 
 struct mlx4_config_dev {
 	__be32	update_flags;
-	__be32	rsdv1[3];
+	__be32	rsvd1[3];
 	__be16	vxlan_udp_dport;
 	__be16	rsvd2;
+	__be32	rsvd3[27];
+	__be16	rsvd4;
+	u8	rsvd5;
+	u8	rx_checksum_val;
 };
 
 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
 
-static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
 {
 	int err;
 	struct mlx4_cmd_mailbox *mailbox;
@@ -1866,6 +1883,77 @@
 	return err;
 }
 
+static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+{
+	int err;
+	struct mlx4_cmd_mailbox *mailbox;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
+			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+	if (!err)
+		memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+/* Conversion between the HW values and the actual functionality.
+ * The value represented by the array index,
+ * and the functionality determined by the flags.
+ */
+static const u8 config_dev_csum_flags[] = {
+	[0] =	0,
+	[1] =	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
+	[2] =	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP	|
+		MLX4_RX_CSUM_MODE_L4,
+	[3] =	MLX4_RX_CSUM_MODE_L4			|
+		MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP	|
+		MLX4_RX_CSUM_MODE_MULTI_VLAN
+};
+
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+			      struct mlx4_config_dev_params *params)
+{
+	struct mlx4_config_dev config_dev;
+	int err;
+	u8 csum_mask;
+
+#define CONFIG_DEV_RX_CSUM_MODE_MASK			0x7
+#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET	0
+#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET	4
+
+	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
+		return -ENOTSUPP;
+
+	err = mlx4_CONFIG_DEV_get(dev, &config_dev);
+	if (err)
+		return err;
+
+	csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
+			CONFIG_DEV_RX_CSUM_MODE_MASK;
+
+	if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
+		return -EINVAL;
+	params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
+
+	csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
+			CONFIG_DEV_RX_CSUM_MODE_MASK;
+
+	if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
+		return -EINVAL;
+	params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
+
+	params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
+
 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
 {
 	struct mlx4_config_dev config_dev;
@@ -1874,7 +1962,7 @@
 	config_dev.update_flags    = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
 	config_dev.vxlan_udp_dport = udp_port;
 
-	return mlx4_CONFIG_DEV(dev, &config_dev);
+	return mlx4_CONFIG_DEV_set(dev, &config_dev);
 }
 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
 
@@ -2144,3 +2232,142 @@
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	return err;
 }
+
+/* Access Reg commands */
+enum mlx4_access_reg_masks {
+	MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
+	MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
+	MLX4_ACCESS_REG_LEN_MASK = 0x7ff
+};
+
+struct mlx4_access_reg {
+	__be16 constant1;
+	u8 status;
+	u8 resrvd1;
+	__be16 reg_id;
+	u8 method;
+	u8 constant2;
+	__be32 resrvd2[2];
+	__be16 len_const;
+	__be16 resrvd3;
+#define MLX4_ACCESS_REG_HEADER_SIZE (20)
+	u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
+} __attribute__((__packed__));
+
+/**
+ * mlx4_ACCESS_REG - Generic access reg command.
+ * @dev: mlx4_dev.
+ * @reg_id: register ID to access.
+ * @method: Access method Read/Write.
+ * @reg_len: register length to Read/Write in bytes.
+ * @reg_data: reg_data pointer to Read/Write From/To.
+ *
+ * Access ConnectX registers FW command.
+ * Returns 0 on success and copies outbox mlx4_access_reg data
+ * field into reg_data or a negative error code.
+ */
+static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
+			   enum mlx4_access_reg_method method,
+			   u16 reg_len, void *reg_data)
+{
+	struct mlx4_cmd_mailbox *inbox, *outbox;
+	struct mlx4_access_reg *inbuf, *outbuf;
+	int err;
+
+	inbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(inbox))
+		return PTR_ERR(inbox);
+
+	outbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(outbox)) {
+		mlx4_free_cmd_mailbox(dev, inbox);
+		return PTR_ERR(outbox);
+	}
+
+	inbuf = inbox->buf;
+	outbuf = outbox->buf;
+
+	inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
+	inbuf->constant2 = 0x1;
+	inbuf->reg_id = cpu_to_be16(reg_id);
+	inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
+
+	reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
+	inbuf->len_const =
+		cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
+			    ((0x3) << 12));
+
+	memcpy(inbuf->reg_data, reg_data, reg_len);
+	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
+			   MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_WRAPPED);
+	if (err)
+		goto out;
+
+	if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
+		err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
+		mlx4_err(dev,
+			 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
+			 reg_id, err);
+		goto out;
+	}
+
+	memcpy(reg_data, outbuf->reg_data, reg_len);
+out:
+	mlx4_free_cmd_mailbox(dev, inbox);
+	mlx4_free_cmd_mailbox(dev, outbox);
+	return err;
+}
+
+/* ConnectX registers IDs */
+enum mlx4_reg_id {
+	MLX4_REG_ID_PTYS = 0x5004,
+};
+
+/**
+ * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
+ * register
+ * @dev: mlx4_dev.
+ * @method: Access method Read/Write.
+ * @ptys_reg: PTYS register data pointer.
+ *
+ * Access ConnectX PTYS register, to Read/Write Port Type/Speed
+ * configuration
+ * Returns 0 on success or a negative error code.
+ */
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+			 enum mlx4_access_reg_method method,
+			 struct mlx4_ptys_reg *ptys_reg)
+{
+	return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
+			       method, sizeof(*ptys_reg), ptys_reg);
+}
+EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
+
+int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_access_reg *inbuf = inbox->buf;
+	u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
+	u16 reg_id = be16_to_cpu(inbuf->reg_id);
+
+	if (slave != mlx4_master_func_num(dev) &&
+	    method == MLX4_ACCESS_REG_WRITE)
+		return -EPERM;
+
+	if (reg_id == MLX4_REG_ID_PTYS) {
+		struct mlx4_ptys_reg *ptys_reg =
+			(struct mlx4_ptys_reg *)inbuf->reg_data;
+
+		ptys_reg->local_port =
+			mlx4_slave_convert_port(dev, slave,
+						ptys_reg->local_port);
+	}
+
+	return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
+			    0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
+			    MLX4_CMD_NATIVE);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 90de6e1..9f82196 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -901,9 +901,12 @@
 	struct mlx4_priv *priv = mlx4_priv(mdev);
 	enum mlx4_port_type types[MLX4_MAX_PORTS];
 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
+	static DEFINE_MUTEX(set_port_type_mutex);
 	int i;
 	int err = 0;
 
+	mutex_lock(&set_port_type_mutex);
+
 	if (!strcmp(buf, "ib\n"))
 		info->tmp_type = MLX4_PORT_TYPE_IB;
 	else if (!strcmp(buf, "eth\n"))
@@ -912,7 +915,8 @@
 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
 	else {
 		mlx4_err(mdev, "%s is not supported port type\n", buf);
-		return -EINVAL;
+		err = -EINVAL;
+		goto err_out;
 	}
 
 	mlx4_stop_sense(mdev);
@@ -958,6 +962,9 @@
 out:
 	mlx4_start_sense(mdev);
 	mutex_unlock(&priv->port_mutex);
+err_out:
+	mutex_unlock(&set_port_type_mutex);
+
 	return err ? err : count;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index de10dbb..f8fc7bd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -947,6 +947,11 @@
 			  struct mlx4_cmd_mailbox *inbox,
 			  struct mlx4_cmd_mailbox *outbox,
 			  struct mlx4_cmd_info *cmd);
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd);
 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
 		     struct mlx4_vhcr *vhcr,
 		     struct mlx4_cmd_mailbox *inbox,
@@ -1273,6 +1278,11 @@
 					 struct mlx4_cmd_mailbox *inbox,
 					 struct mlx4_cmd_mailbox *outbox,
 					 struct mlx4_cmd_info *cmd);
+int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd);
 
 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fef658..ef83d12 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -421,10 +421,16 @@
 	enum mlx4_qp_state indir_state;
 };
 
+enum mlx4_en_port_flag {
+	MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
+	MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
+};
+
 struct mlx4_en_port_state {
 	int link_state;
 	int link_speed;
-	int transciver;
+	int transceiver;
+	u32 flags;
 };
 
 struct mlx4_en_pkt_stats {
@@ -475,7 +481,6 @@
 	u16 frag_size;
 	u16 frag_prefix_size;
 	u16 frag_stride;
-	u16 frag_align;
 };
 
 #ifdef CONFIG_MLX4_EN_DCB
@@ -829,6 +834,13 @@
 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
 
+#define DEV_FEATURE_CHANGED(dev, new_features, feature) \
+	((dev->features & feature) ^ (new_features & feature))
+
+int mlx4_en_reset_config(struct net_device *dev,
+			 struct hwtstamp_config ts_config,
+			 netdev_features_t new_features);
+
 /*
  * Functions for time stamping
  */
@@ -838,9 +850,6 @@
 			    u64 timestamp);
 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
-int mlx4_en_timestamp_config(struct net_device *dev,
-			     int tx_type,
-			     int rx_filter);
 
 /* Globals
  */
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 94eeb2c..30eb1ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1311,3 +1311,159 @@
 	return 0;
 }
 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
+
+/* Cable Module Info */
+#define MODULE_INFO_MAX_READ 48
+
+#define I2C_ADDR_LOW  0x50
+#define I2C_ADDR_HIGH 0x51
+#define I2C_PAGE_SIZE 256
+
+/* Module Info Data */
+struct mlx4_cable_info {
+	u8	i2c_addr;
+	u8	page_num;
+	__be16	dev_mem_address;
+	__be16	reserved1;
+	__be16	size;
+	__be32	reserved2[2];
+	u8	data[MODULE_INFO_MAX_READ];
+};
+
+enum cable_info_err {
+	 CABLE_INF_INV_PORT      = 0x1,
+	 CABLE_INF_OP_NOSUP      = 0x2,
+	 CABLE_INF_NOT_CONN      = 0x3,
+	 CABLE_INF_NO_EEPRM      = 0x4,
+	 CABLE_INF_PAGE_ERR      = 0x5,
+	 CABLE_INF_INV_ADDR      = 0x6,
+	 CABLE_INF_I2C_ADDR      = 0x7,
+	 CABLE_INF_QSFP_VIO      = 0x8,
+	 CABLE_INF_I2C_BUSY      = 0x9,
+};
+
+#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
+
+static inline const char *cable_info_mad_err_str(u16 mad_status)
+{
+	u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
+
+	switch (err) {
+	case CABLE_INF_INV_PORT:
+		return "invalid port selected";
+	case CABLE_INF_OP_NOSUP:
+		return "operation not supported for this port (the port is of type CX4 or internal)";
+	case CABLE_INF_NOT_CONN:
+		return "cable is not connected";
+	case CABLE_INF_NO_EEPRM:
+		return "the connected cable has no EPROM (passive copper cable)";
+	case CABLE_INF_PAGE_ERR:
+		return "page number is greater than 15";
+	case CABLE_INF_INV_ADDR:
+		return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
+	case CABLE_INF_I2C_ADDR:
+		return "invalid I2C slave address";
+	case CABLE_INF_QSFP_VIO:
+		return "at least one cable violates the QSFP specification and ignores the modsel signal";
+	case CABLE_INF_I2C_BUSY:
+		return "I2C bus is constantly busy";
+	}
+	return "Unknown Error";
+}
+
+/**
+ * mlx4_get_module_info - Read cable module eeprom data
+ * @dev: mlx4_dev.
+ * @port: port number.
+ * @offset: byte offset in eeprom to start reading data from.
+ * @size: num of bytes to read.
+ * @data: output buffer to put the requested data into.
+ *
+ * Reads cable module eeprom data, puts the outcome data into
+ * data pointer paramer.
+ * Returns num of read bytes on success or a negative error
+ * code.
+ */
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+			 u16 offset, u16 size, u8 *data)
+{
+	struct mlx4_cmd_mailbox *inbox, *outbox;
+	struct mlx4_mad_ifc *inmad, *outmad;
+	struct mlx4_cable_info *cable_info;
+	u16 i2c_addr;
+	int ret;
+
+	if (size > MODULE_INFO_MAX_READ)
+		size = MODULE_INFO_MAX_READ;
+
+	inbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(inbox))
+		return PTR_ERR(inbox);
+
+	outbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(outbox)) {
+		mlx4_free_cmd_mailbox(dev, inbox);
+		return PTR_ERR(outbox);
+	}
+
+	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+	inmad->method = 0x1; /* Get */
+	inmad->class_version = 0x1;
+	inmad->mgmt_class = 0x1;
+	inmad->base_version = 0x1;
+	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+	if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
+		/* Cross pages reads are not allowed
+		 * read until offset 256 in low page
+		 */
+		size -= offset + size - I2C_PAGE_SIZE;
+
+	i2c_addr = I2C_ADDR_LOW;
+	if (offset >= I2C_PAGE_SIZE) {
+		/* Reset offset to high page */
+		i2c_addr = I2C_ADDR_HIGH;
+		offset -= I2C_PAGE_SIZE;
+	}
+
+	cable_info = (struct mlx4_cable_info *)inmad->data;
+	cable_info->dev_mem_address = cpu_to_be16(offset);
+	cable_info->page_num = 0;
+	cable_info->i2c_addr = i2c_addr;
+	cable_info->size = cpu_to_be16(size);
+
+	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_NATIVE);
+	if (ret)
+		goto out;
+
+	if (be16_to_cpu(outmad->status)) {
+		/* Mad returned with bad status */
+		ret = be16_to_cpu(outmad->status);
+		mlx4_warn(dev,
+			  "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+			  0xFF60, port, i2c_addr, offset, size,
+			  ret, cable_info_mad_err_str(ret));
+
+		if (i2c_addr == I2C_ADDR_HIGH &&
+		    MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
+			/* Some SFP cables do not support i2c slave
+			 * address 0x51 (high page), abort silently.
+			 */
+			ret = 0;
+		else
+			ret = -ret;
+		goto out;
+	}
+	cable_info = (struct mlx4_cable_info *)outmad->data;
+	memcpy(data, cable_info->data, size);
+	ret = size;
+out:
+	mlx4_free_cmd_mailbox(dev, inbox);
+	mlx4_free_cmd_mailbox(dev, outbox);
+	return ret;
+}
+EXPORT_SYMBOL(mlx4_get_module_info);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5d2498d..d718ca0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2872,6 +2872,23 @@
 	return err;
 }
 
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd)
+{
+	int err;
+	u8 get = vhcr->op_modifier;
+
+	if (get != 1)
+		return -EPERM;
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+	return err;
+}
+
 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
 			      int len, struct res_mtt **res)
 {
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 9e4ddbb..66c2d50 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -326,13 +326,9 @@
 	    macintosh_config->ident == MAC_MODEL_P588 ||
 	    macintosh_config->ident == MAC_MODEL_P575 ||
 	    macintosh_config->ident == MAC_MODEL_C610) {
-		unsigned long flags;
 		int card_present;
 
-		local_irq_save(flags);
 		card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
-		local_irq_restore(flags);
-
 		if (!card_present) {
 			printk("none.\n");
 			return -ENODEV;
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 040b137..32497f0 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -6,10 +6,10 @@
 
 /* The header prepended to received packets. */
 struct rx_header {
-    ushort pad;			/* Pad. */
-    ushort rx_count;
-    ushort rx_status;		/* Unknown bit assignments :-<.  */
-    ushort cur_addr;		/* Apparently the current buffer address(?) */
+	ushort pad;		/* Pad. */
+	ushort rx_count;
+	ushort rx_status;	/* Unknown bit assignments :-<.  */
+	ushort cur_addr;	/* Apparently the current buffer address(?) */
 };
 
 #define PAR_DATA	0
@@ -29,22 +29,25 @@
 #define RdAddr	0xC0
 #define HNib	0x10
 
-enum page0_regs
-{
-    /* The first six registers hold the ethernet physical station address. */
-    PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
-    TxCNT0 = 6, TxCNT1 = 7,		/* The transmit byte count. */
-    TxSTAT = 8, RxSTAT = 9,		/* Tx and Rx status. */
-    ISR = 10, IMR = 11,			/* Interrupt status and mask. */
-    CMR1 = 12,				/* Command register 1. */
-    CMR2 = 13,				/* Command register 2. */
-    MODSEL = 14,			/* Mode select register. */
-    MAR = 14,				/* Memory address register (?). */
-    CMR2_h = 0x1d, };
+enum page0_regs {
+	/* The first six registers hold
+	 * the ethernet physical station address.
+	 */
+	PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+	TxCNT0 = 6, TxCNT1 = 7,		/* The transmit byte count. */
+	TxSTAT = 8, RxSTAT = 9,		/* Tx and Rx status. */
+	ISR = 10, IMR = 11,		/* Interrupt status and mask. */
+	CMR1 = 12,			/* Command register 1. */
+	CMR2 = 13,			/* Command register 2. */
+	MODSEL = 14,		/* Mode select register. */
+	MAR = 14,			/* Memory address register (?). */
+	CMR2_h = 0x1d,
+};
 
-enum eepage_regs
-{ PROM_CMD = 6, PROM_DATA = 7 };	/* Note that PROM_CMD is in the "high" bits. */
-
+enum eepage_regs {
+	PROM_CMD = 6,
+	PROM_DATA = 7	/* Note that PROM_CMD is in the "high" bits. */
+};
 
 #define ISR_TxOK	0x01
 #define ISR_RxOK	0x04
@@ -72,141 +75,146 @@
 #define CMR2h_Normal	2	/* Accept physical and broadcast address. */
 #define CMR2h_PROMISC	3	/* Promiscuous mode. */
 
-/* An inline function used below: it differs from inb() by explicitly return an unsigned
-   char, saving a truncation. */
+/* An inline function used below: it differs from inb() by explicitly
+ * return an unsigned char, saving a truncation.
+ */
 static inline unsigned char inbyte(unsigned short port)
 {
-    unsigned char _v;
-    __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
-    return _v;
+	unsigned char _v;
+
+	__asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port));
+	return _v;
 }
 
 /* Read register OFFSET.
-   This command should always be terminated with read_end(). */
+ * This command should always be terminated with read_end().
+ */
 static inline unsigned char read_nibble(short port, unsigned char offset)
 {
-    unsigned char retval;
-    outb(EOC+offset, port + PAR_DATA);
-    outb(RdAddr+offset, port + PAR_DATA);
-    inbyte(port + PAR_STATUS);		/* Settling time delay */
-    retval = inbyte(port + PAR_STATUS);
-    outb(EOC+offset, port + PAR_DATA);
+	unsigned char retval;
 
-    return retval;
+	outb(EOC+offset, port + PAR_DATA);
+	outb(RdAddr+offset, port + PAR_DATA);
+	inbyte(port + PAR_STATUS);	/* Settling time delay */
+	retval = inbyte(port + PAR_STATUS);
+	outb(EOC+offset, port + PAR_DATA);
+
+	return retval;
 }
 
 /* Functions for bulk data read.  The interrupt line is always disabled. */
 /* Get a byte using read mode 0, reading data from the control lines. */
 static inline unsigned char read_byte_mode0(short ioaddr)
 {
-    unsigned char low_nib;
+	unsigned char low_nib;
 
-    outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
-    inbyte(ioaddr + PAR_STATUS);
-    low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
-    outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
-    inbyte(ioaddr + PAR_STATUS);	/* Settling time delay -- needed!  */
-    inbyte(ioaddr + PAR_STATUS);	/* Settling time delay -- needed!  */
-    return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+	outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+	inbyte(ioaddr + PAR_STATUS);
+	low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+	outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+	inbyte(ioaddr + PAR_STATUS);	/* Settling time delay -- needed!  */
+	inbyte(ioaddr + PAR_STATUS);	/* Settling time delay -- needed!  */
+	return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
 }
 
 /* The same as read_byte_mode0(), but does multiple inb()s for stability. */
 static inline unsigned char read_byte_mode2(short ioaddr)
 {
-    unsigned char low_nib;
+	unsigned char low_nib;
 
-    outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
-    inbyte(ioaddr + PAR_STATUS);
-    low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
-    outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
-    inbyte(ioaddr + PAR_STATUS);	/* Settling time delay -- needed!  */
-    return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+	outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+	inbyte(ioaddr + PAR_STATUS);
+	low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+	outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+	inbyte(ioaddr + PAR_STATUS);	/* Settling time delay -- needed!  */
+	return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
 }
 
 /* Read a byte through the data register. */
 static inline unsigned char read_byte_mode4(short ioaddr)
 {
-    unsigned char low_nib;
+	unsigned char low_nib;
 
-    outb(RdAddr | MAR, ioaddr + PAR_DATA);
-    low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
-    outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
-    return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+	outb(RdAddr | MAR, ioaddr + PAR_DATA);
+	low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+	outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+	return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
 }
 
 /* Read a byte through the data register, double reading to allow settling. */
 static inline unsigned char read_byte_mode6(short ioaddr)
 {
-    unsigned char low_nib;
+	unsigned char low_nib;
 
-    outb(RdAddr | MAR, ioaddr + PAR_DATA);
-    inbyte(ioaddr + PAR_STATUS);
-    low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
-    outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
-    inbyte(ioaddr + PAR_STATUS);
-    return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+	outb(RdAddr | MAR, ioaddr + PAR_DATA);
+	inbyte(ioaddr + PAR_STATUS);
+	low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+	outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+	inbyte(ioaddr + PAR_STATUS);
+	return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
 }
 
 static inline void
 write_reg(short port, unsigned char reg, unsigned char value)
 {
-    unsigned char outval;
-    outb(EOC | reg, port + PAR_DATA);
-    outval = WrAddr | reg;
-    outb(outval, port + PAR_DATA);
-    outb(outval, port + PAR_DATA);	/* Double write for PS/2. */
+	unsigned char outval;
 
-    outval &= 0xf0;
-    outval |= value;
-    outb(outval, port + PAR_DATA);
-    outval &= 0x1f;
-    outb(outval, port + PAR_DATA);
-    outb(outval, port + PAR_DATA);
+	outb(EOC | reg, port + PAR_DATA);
+	outval = WrAddr | reg;
+	outb(outval, port + PAR_DATA);
+	outb(outval, port + PAR_DATA);	/* Double write for PS/2. */
 
-    outb(EOC | outval, port + PAR_DATA);
+	outval &= 0xf0;
+	outval |= value;
+	outb(outval, port + PAR_DATA);
+	outval &= 0x1f;
+	outb(outval, port + PAR_DATA);
+	outb(outval, port + PAR_DATA);
+
+	outb(EOC | outval, port + PAR_DATA);
 }
 
 static inline void
 write_reg_high(short port, unsigned char reg, unsigned char value)
 {
-    unsigned char outval = EOC | HNib | reg;
+	unsigned char outval = EOC | HNib | reg;
 
-    outb(outval, port + PAR_DATA);
-    outval &= WrAddr | HNib | 0x0f;
-    outb(outval, port + PAR_DATA);
-    outb(outval, port + PAR_DATA);	/* Double write for PS/2. */
+	outb(outval, port + PAR_DATA);
+	outval &= WrAddr | HNib | 0x0f;
+	outb(outval, port + PAR_DATA);
+	outb(outval, port + PAR_DATA);	/* Double write for PS/2. */
 
-    outval = WrAddr | HNib | value;
-    outb(outval, port + PAR_DATA);
-    outval &= HNib | 0x0f;		/* HNib | value */
-    outb(outval, port + PAR_DATA);
-    outb(outval, port + PAR_DATA);
+	outval = WrAddr | HNib | value;
+	outb(outval, port + PAR_DATA);
+	outval &= HNib | 0x0f;		/* HNib | value */
+	outb(outval, port + PAR_DATA);
+	outb(outval, port + PAR_DATA);
 
-    outb(EOC | HNib | outval, port + PAR_DATA);
+	outb(EOC | HNib | outval, port + PAR_DATA);
 }
 
 /* Write a byte out using nibble mode.  The low nibble is written first. */
 static inline void
 write_reg_byte(short port, unsigned char reg, unsigned char value)
 {
-    unsigned char outval;
-    outb(EOC | reg, port + PAR_DATA); 	/* Reset the address register. */
-    outval = WrAddr | reg;
-    outb(outval, port + PAR_DATA);
-    outb(outval, port + PAR_DATA);	/* Double write for PS/2. */
+	unsigned char outval;
 
-    outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
-    outb(value & 0x0f, port + PAR_DATA);
-    value >>= 4;
-    outb(value, port + PAR_DATA);
-    outb(0x10 | value, port + PAR_DATA);
-    outb(0x10 | value, port + PAR_DATA);
+	outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+	outval = WrAddr | reg;
+	outb(outval, port + PAR_DATA);
+	outb(outval, port + PAR_DATA);	/* Double write for PS/2. */
 
-    outb(EOC  | value, port + PAR_DATA); 	/* Reset the address register. */
+	outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+	outb(value & 0x0f, port + PAR_DATA);
+	value >>= 4;
+	outb(value, port + PAR_DATA);
+	outb(0x10 | value, port + PAR_DATA);
+	outb(0x10 | value, port + PAR_DATA);
+
+	outb(EOC  | value, port + PAR_DATA); /* Reset the address register. */
 }
 
-/*
- * Bulk data writes to the packet buffer.  The interrupt line remains enabled.
+/* Bulk data writes to the packet buffer.  The interrupt line remains enabled.
  * The first, faster method uses only the dataport (data modes 0, 2 & 4).
  * The second (backup) method uses data and control regs (modes 1, 3 & 5).
  * It should only be needed when there is skew between the individual data
@@ -214,28 +222,28 @@
  */
 static inline void write_byte_mode0(short ioaddr, unsigned char value)
 {
-    outb(value & 0x0f, ioaddr + PAR_DATA);
-    outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+	outb(value & 0x0f, ioaddr + PAR_DATA);
+	outb((value>>4) | 0x10, ioaddr + PAR_DATA);
 }
 
 static inline void write_byte_mode1(short ioaddr, unsigned char value)
 {
-    outb(value & 0x0f, ioaddr + PAR_DATA);
-    outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
-    outb((value>>4) | 0x10, ioaddr + PAR_DATA);
-    outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+	outb(value & 0x0f, ioaddr + PAR_DATA);
+	outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+	outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+	outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
 }
 
 /* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
 static inline void write_word_mode0(short ioaddr, unsigned short value)
 {
-    outb(value & 0x0f, ioaddr + PAR_DATA);
-    value >>= 4;
-    outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
-    value >>= 4;
-    outb(value & 0x0f, ioaddr + PAR_DATA);
-    value >>= 4;
-    outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+	outb(value & 0x0f, ioaddr + PAR_DATA);
+	value >>= 4;
+	outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+	value >>= 4;
+	outb(value & 0x0f, ioaddr + PAR_DATA);
+	value >>= 4;
+	outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
 }
 
 /*  EEPROM_Ctrl bits. */
@@ -248,10 +256,10 @@
 
 /* Delay between EEPROM clock transitions. */
 #define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_WRITE_CMD(offset)	(((5 << 6) + (offset)) << 17)
-#define EE_READ(offset) 	(((6 << 6) + (offset)) << 17)
+#define EE_READ(offset)		(((6 << 6) + (offset)) << 17)
 #define EE_ERASE(offset)	(((7 << 6) + (offset)) << 17)
 #define EE_CMD_SIZE	27	/* The command+address+data size. */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 60e9c2c..dbe8606 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2769,10 +2769,6 @@
 
 	/* get base addr */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (unlikely(res == NULL)) {
-		dev_err(&pdev->dev, "invalid resource\n");
-		return -EINVAL;
-	}
 
 	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
 	if (!ndev)
@@ -2781,8 +2777,6 @@
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
 
-	/* The sh Ether-specific entries in the device structure. */
-	ndev->base_addr = res->start;
 	devno = pdev->id;
 	if (devno < 0)
 		devno = 0;
@@ -2806,6 +2800,8 @@
 		goto out_release;
 	}
 
+	ndev->base_addr = res->start;
+
 	spin_lock_init(&mdp->lock);
 	mdp->pdev = pdev;
 
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b147d46..7fd6e27 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -90,9 +90,6 @@
 
 	/* Get memory resource */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		goto err_out;
-
 	addr = devm_ioremap_resource(dev, res);
 	if (IS_ERR(addr))
 		return PTR_ERR(addr);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index a77f05c..fbb6cfa 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3689,6 +3689,11 @@
 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
 	.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
 	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+	.sriov_init = efx_ef10_sriov_init,
+	.sriov_fini = efx_ef10_sriov_fini,
+	.sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
+	.sriov_wanted = efx_ef10_sriov_wanted,
+	.sriov_reset = efx_ef10_sriov_reset,
 
 	.revision = EFX_REV_HUNT_A0,
 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b2cc590..b49d048 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1314,7 +1314,7 @@
 	/* If RSS is requested for the PF *and* VFs then we can't write RSS
 	 * table entries that are inaccessible to VFs
 	 */
-	if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+	if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
 	    count > efx_vf_size(efx)) {
 		netif_warn(efx, probe, efx->net_dev,
 			   "Reducing number of RSS channels from %u to %u for "
@@ -1426,7 +1426,9 @@
 	}
 
 	/* RSS might be usable on VFs even if it is disabled on the PF */
-	efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
+
+	efx->rss_spread = ((efx->n_rx_channels > 1 ||
+			    !efx->type->sriov_wanted(efx)) ?
 			   efx->n_rx_channels : efx_vf_size(efx));
 
 	return 0;
@@ -2166,7 +2168,7 @@
 	}
 
 	ether_addr_copy(net_dev->dev_addr, new_addr);
-	efx_sriov_mac_address_changed(efx);
+	efx->type->sriov_mac_address_changed(efx);
 
 	/* Reconfigure the MAC */
 	mutex_lock(&efx->mac_lock);
@@ -2210,10 +2212,10 @@
 	.ndo_set_rx_mode	= efx_set_rx_mode,
 	.ndo_set_features	= efx_set_features,
 #ifdef CONFIG_SFC_SRIOV
-	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
-	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
-	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
-	.ndo_get_vf_config	= efx_sriov_get_vf_config,
+	.ndo_set_vf_mac		= efx_siena_sriov_set_vf_mac,
+	.ndo_set_vf_vlan	= efx_siena_sriov_set_vf_vlan,
+	.ndo_set_vf_spoofchk	= efx_siena_sriov_set_vf_spoofchk,
+	.ndo_get_vf_config	= efx_siena_sriov_get_vf_config,
 #endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = efx_netpoll,
@@ -2433,7 +2435,7 @@
 	if (rc)
 		goto fail;
 	efx_restore_filters(efx);
-	efx_sriov_reset(efx);
+	efx->type->sriov_reset(efx);
 
 	mutex_unlock(&efx->mac_lock);
 
@@ -2826,7 +2828,7 @@
 	efx_disable_interrupts(efx);
 	rtnl_unlock();
 
-	efx_sriov_fini(efx);
+	efx->type->sriov_fini(efx);
 	efx_unregister_netdev(efx);
 
 	efx_mtd_remove(efx);
@@ -3023,7 +3025,7 @@
 	if (rc)
 		goto fail4;
 
-	rc = efx_sriov_init(efx);
+	rc = efx->type->sriov_init(efx);
 	if (rc)
 		netif_err(efx, probe, efx->net_dev,
 			  "SR-IOV can't be enabled rc %d\n", rc);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 1570375..f166c8e 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2766,6 +2766,11 @@
 	.mtd_write = falcon_mtd_write,
 	.mtd_sync = falcon_mtd_sync,
 #endif
+	.sriov_init = efx_falcon_sriov_init,
+	.sriov_fini = efx_falcon_sriov_fini,
+	.sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+	.sriov_wanted = efx_falcon_sriov_wanted,
+	.sriov_reset = efx_falcon_sriov_reset,
 
 	.revision = EFX_REV_FALCON_A1,
 	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2862,6 +2867,11 @@
 	.mtd_write = falcon_mtd_write,
 	.mtd_sync = falcon_mtd_sync,
 #endif
+	.sriov_init = efx_falcon_sriov_init,
+	.sriov_fini = efx_falcon_sriov_fini,
+	.sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+	.sriov_wanted = efx_falcon_sriov_wanted,
+	.sriov_reset = efx_falcon_sriov_reset,
 
 	.revision = EFX_REV_FALCON_B0,
 	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 6859437..7597532 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -226,6 +226,9 @@
 				    struct efx_special_buffer *buffer,
 				    unsigned int len)
 {
+#ifdef CONFIG_SFC_SRIOV
+	struct siena_nic_data *nic_data = efx->nic_data;
+#endif
 	len = ALIGN(len, EFX_BUF_SIZE);
 
 	if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
@@ -237,8 +240,8 @@
 	buffer->index = efx->next_buffer_table;
 	efx->next_buffer_table += buffer->entries;
 #ifdef CONFIG_SFC_SRIOV
-	BUG_ON(efx_sriov_enabled(efx) &&
-	       efx->vf_buftbl_base < efx->next_buffer_table);
+	BUG_ON(efx_siena_sriov_enabled(efx) &&
+	       nic_data->vf_buftbl_base < efx->next_buffer_table);
 #endif
 
 	netif_dbg(efx, probe, efx->net_dev,
@@ -667,7 +670,7 @@
 		 * the firmware (though we will still have to poll for
 		 * completion). If that fails, fall back to the old scheme.
 		 */
-		if (efx_sriov_enabled(efx)) {
+		if (efx_siena_sriov_enabled(efx)) {
 			rc = efx_mcdi_flush_rxqs(efx);
 			if (!rc)
 				goto wait;
@@ -1195,13 +1198,13 @@
 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
 			   channel->channel, ev_sub_data);
 		efx_farch_handle_tx_flush_done(efx, event);
-		efx_sriov_tx_flush_done(efx, event);
+		efx_siena_sriov_tx_flush_done(efx, event);
 		break;
 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
 			   channel->channel, ev_sub_data);
 		efx_farch_handle_rx_flush_done(efx, event);
-		efx_sriov_rx_flush_done(efx, event);
+		efx_siena_sriov_rx_flush_done(efx, event);
 		break;
 	case FSE_AZ_EVQ_INIT_DONE_EV:
 		netif_dbg(efx, hw, efx->net_dev,
@@ -1240,7 +1243,7 @@
 				  ev_sub_data);
 			efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
 		} else
-			efx_sriov_desc_fetch_err(efx, ev_sub_data);
+			efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
 		break;
 	case FSE_BZ_TX_DSC_ERROR_EV:
 		if (ev_sub_data < EFX_VI_BASE) {
@@ -1250,7 +1253,7 @@
 				  ev_sub_data);
 			efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
 		} else
-			efx_sriov_desc_fetch_err(efx, ev_sub_data);
+			efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
 		break;
 	default:
 		netif_vdbg(efx, hw, efx->net_dev,
@@ -1315,7 +1318,7 @@
 			efx_farch_handle_driver_event(channel, &event);
 			break;
 		case FSE_CZ_EV_CODE_USER_EV:
-			efx_sriov_event(channel, &event);
+			efx_siena_sriov_event(channel, &event);
 			break;
 		case FSE_CZ_EV_CODE_MCDI_EV:
 			efx_mcdi_process_event(channel, &event);
@@ -1668,6 +1671,10 @@
 {
 	unsigned vi_count, buftbl_min;
 
+#ifdef CONFIG_SFC_SRIOV
+	struct siena_nic_data *nic_data = efx->nic_data;
+#endif
+
 	/* Account for the buffer table entries backing the datapath channels
 	 * and the descriptor caches for those channels.
 	 */
@@ -1678,10 +1685,10 @@
 	vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
 
 #ifdef CONFIG_SFC_SRIOV
-	if (efx_sriov_wanted(efx)) {
+	if (efx->type->sriov_wanted(efx)) {
 		unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
 
-		efx->vf_buftbl_base = buftbl_min;
+		nic_data->vf_buftbl_base = buftbl_min;
 
 		vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
 		vi_count = max(vi_count, EFX_VI_BASE);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 5239cf9..d37928f 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1035,7 +1035,7 @@
 		/* MAC stats are gather lazily.  We can ignore this. */
 		break;
 	case MCDI_EVENT_CODE_FLR:
-		efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+		efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
 		break;
 	case MCDI_EVENT_CODE_PTP_RX:
 	case MCDI_EVENT_CODE_PTP_FAULT:
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9ede320..325dd94 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -913,13 +913,6 @@
  * @vf_count: Number of VFs intended to be enabled.
  * @vf_init_count: Number of VFs that have been fully initialised.
  * @vi_scale: log2 number of vnics per VF.
- * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
- * @vfdi_status: Common VFDI status page to be dmad to VF address space.
- * @local_addr_list: List of local addresses. Protected by %local_lock.
- * @local_page_list: List of DMA addressable pages used to broadcast
- *	%local_addr_list. Protected by %local_lock.
- * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
- * @peer_work: Work item to broadcast peer addresses to VMs.
  * @ptp_data: PTP state data
  * @vpd_sn: Serial number read from VPD
  * @monitor_work: Hardware monitor workitem
@@ -1060,17 +1053,10 @@
 	wait_queue_head_t flush_wq;
 
 #ifdef CONFIG_SFC_SRIOV
-	struct efx_channel *vfdi_channel;
 	struct efx_vf *vf;
 	unsigned vf_count;
 	unsigned vf_init_count;
 	unsigned vi_scale;
-	unsigned vf_buftbl_base;
-	struct efx_buffer vfdi_status;
-	struct list_head local_addr_list;
-	struct list_head local_page_list;
-	struct mutex local_lock;
-	struct work_struct peer_work;
 #endif
 
 	struct efx_ptp_data *ptp_data;
@@ -1344,6 +1330,11 @@
 	int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
 	int (*ptp_set_ts_config)(struct efx_nic *efx,
 				 struct hwtstamp_config *init);
+	int (*sriov_init)(struct efx_nic *efx);
+	void (*sriov_fini)(struct efx_nic *efx);
+	void (*sriov_mac_address_changed)(struct efx_nic *efx);
+	bool (*sriov_wanted)(struct efx_nic *efx);
+	void (*sriov_reset)(struct efx_nic *efx);
 
 	int revision;
 	unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f77cce0..93d10cbb 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -378,12 +378,30 @@
 
 /**
  * struct siena_nic_data - Siena NIC state
+ * @efx: Pointer back to main interface structure
  * @wol_filter_id: Wake-on-LAN packet filter id
  * @stats: Hardware statistics
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ *	%local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
  */
 struct siena_nic_data {
+	struct efx_nic *efx;
 	int wol_filter_id;
 	u64 stats[SIENA_STAT_COUNT];
+#ifdef CONFIG_SFC_SRIOV
+	struct efx_channel *vfdi_channel;
+	unsigned vf_buftbl_base;
+	struct efx_buffer vfdi_status;
+	struct list_head local_addr_list;
+	struct list_head local_page_list;
+	struct mutex local_lock;
+	struct work_struct peer_work;
+#endif
 };
 
 enum {
@@ -522,62 +540,88 @@
 
 #ifdef CONFIG_SFC_SRIOV
 
-static inline bool efx_sriov_wanted(struct efx_nic *efx)
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
 {
 	return efx->vf_count != 0;
 }
-static inline bool efx_sriov_enabled(struct efx_nic *efx)
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
 {
 	return efx->vf_init_count != 0;
 }
+
 static inline unsigned int efx_vf_size(struct efx_nic *efx)
 {
 	return 1 << efx->vi_scale;
 }
 
 int efx_init_sriov(void);
-void efx_sriov_probe(struct efx_nic *efx);
-int efx_sriov_init(struct efx_nic *efx);
-void efx_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_sriov_reset(struct efx_nic *efx);
-void efx_sriov_fini(struct efx_nic *efx);
+void efx_siena_sriov_probe(struct efx_nic *efx);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
 void efx_fini_sriov(void);
 
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
 #else
 
-static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
 static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-
 static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
-					   efx_qword_t *event) {}
-static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
-					   efx_qword_t *event) {}
-static inline void efx_sriov_event(struct efx_channel *channel,
-				   efx_qword_t *event) {}
-static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
-static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_sriov_fini(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
+static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
+						 efx_qword_t *event) {}
+static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
+						 efx_qword_t *event) {}
+static inline void efx_siena_sriov_event(struct efx_channel *channel,
+					 efx_qword_t *event) {}
+static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
+						  unsigned dmaq) {}
+static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
+static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
 static inline void efx_fini_sriov(void) {}
 
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
 #endif
 
-int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
-int efx_sriov_get_vf_config(struct net_device *dev, int vf,
-			    struct ifla_vf_info *ivf);
-int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-			      bool spoofchk);
+/* FALCON */
+static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
+
+int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
+				u16 vlan, u8 qos);
+int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
+				  struct ifla_vf_info *ivf);
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+				    bool spoofchk);
 
 struct ethtool_ts_info;
 int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ae69685..3583f02 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -251,6 +251,7 @@
 	nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
 	if (!nic_data)
 		return -ENOMEM;
+	nic_data->efx = efx;
 	efx->nic_data = nic_data;
 
 	if (efx_farch_fpga_ver(efx) != 0) {
@@ -306,7 +307,7 @@
 	if (rc)
 		goto fail5;
 
-	efx_sriov_probe(efx);
+	efx_siena_sriov_probe(efx);
 	efx_ptp_defer_probe_with_channel(efx);
 
 	return 0;
@@ -996,6 +997,11 @@
 #endif
 	.ptp_write_host_time = siena_ptp_write_host_time,
 	.ptp_set_ts_config = siena_ptp_set_ts_config,
+	.sriov_init = efx_siena_sriov_init,
+	.sriov_fini = efx_siena_sriov_fini,
+	.sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
+	.sriov_wanted = efx_siena_sriov_wanted,
+	.sriov_reset = efx_siena_sriov_reset,
 
 	.revision = EFX_REV_SIENA_A0,
 	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 43d2e64..a8bbbad 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -66,7 +66,7 @@
  * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
  *	@peer_page_addrs and @peer_page_count from simultaneous
  *	updates by the VM and consumption by
- *	efx_sriov_update_vf_addr()
+ *	efx_siena_sriov_update_vf_addr()
  * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
  * @peer_page_count: Number of entries in @peer_page_count.
  * @evq0_addrs: Array of guest pages backing evq0.
@@ -194,8 +194,8 @@
 	return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
 }
 
-static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
-			 unsigned *vi_scale_out, unsigned *vf_total_out)
+static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
+			       unsigned *vi_scale_out, unsigned *vf_total_out)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
@@ -227,18 +227,20 @@
 	return 0;
 }
 
-static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
+static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled)
 {
+	struct siena_nic_data *nic_data = efx->nic_data;
 	efx_oword_t reg;
 
 	EFX_POPULATE_OWORD_2(reg,
 			     FRF_CZ_USREV_DIS, enabled ? 0 : 1,
-			     FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
+			     FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel);
 	efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
 }
 
-static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
-			    unsigned int count)
+static int efx_siena_sriov_memcpy(struct efx_nic *efx,
+				  struct efx_memcpy_req *req,
+				  unsigned int count)
 {
 	MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
 	MCDI_DECLARE_STRUCT_PTR(record);
@@ -297,7 +299,7 @@
 /* The TX filter is entirely controlled by this driver, and is modified
  * underneath the feet of the VF
  */
-static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct efx_filter_spec filter;
@@ -341,7 +343,7 @@
 }
 
 /* The RX filter is managed here on behalf of the VF driver */
-static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
 	struct efx_filter_spec filter;
@@ -380,22 +382,26 @@
 	}
 }
 
-static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
 {
-	efx_sriov_reset_tx_filter(vf);
-	efx_sriov_reset_rx_filter(vf);
-	queue_work(vfdi_workqueue, &vf->efx->peer_work);
+	struct efx_nic *efx = vf->efx;
+	struct siena_nic_data *nic_data = efx->nic_data;
+
+	efx_siena_sriov_reset_tx_filter(vf);
+	efx_siena_sriov_reset_rx_filter(vf);
+	queue_work(vfdi_workqueue, &nic_data->peer_work);
 }
 
 /* Push the peer list to this VF. The caller must hold status_lock to interlock
  * with VFDI requests, and they must be serialised against manipulation of
  * local_page_list, either by acquiring local_lock or by running from
- * efx_sriov_peer_work()
+ * efx_siena_sriov_peer_work()
  */
-static void __efx_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
-	struct vfdi_status *status = efx->vfdi_status.addr;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct vfdi_status *status = nic_data->vfdi_status.addr;
 	struct efx_memcpy_req copy[4];
 	struct efx_endpoint_page *epp;
 	unsigned int pos, count;
@@ -421,7 +427,7 @@
 	 */
 	data_offset = offsetof(struct vfdi_status, version);
 	copy[1].from_rid = efx->pci_dev->devfn;
-	copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
+	copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset;
 	copy[1].to_rid = vf->pci_rid;
 	copy[1].to_addr = vf->status_addr + data_offset;
 	copy[1].length =  status->length - data_offset;
@@ -429,7 +435,7 @@
 	/* Copy the peer pages */
 	pos = 2;
 	count = 0;
-	list_for_each_entry(epp, &efx->local_page_list, link) {
+	list_for_each_entry(epp, &nic_data->local_page_list, link) {
 		if (count == vf->peer_page_count) {
 			/* The VF driver will know they need to provide more
 			 * pages because peer_addr_count is too large.
@@ -444,7 +450,7 @@
 		copy[pos].length = EFX_PAGE_SIZE;
 
 		if (++pos == ARRAY_SIZE(copy)) {
-			efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+			efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
 			pos = 0;
 		}
 		++count;
@@ -456,7 +462,7 @@
 	copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
 						       generation_end);
 	copy[pos].length = sizeof(status->generation_end);
-	efx_sriov_memcpy(efx, copy, pos + 1);
+	efx_siena_sriov_memcpy(efx, copy, pos + 1);
 
 	/* Notify the guest */
 	EFX_POPULATE_QWORD_3(event,
@@ -469,8 +475,8 @@
 				 &event);
 }
 
-static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
-			   u64 *addr, unsigned count)
+static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset,
+				 u64 *addr, unsigned count)
 {
 	efx_qword_t buf;
 	unsigned pos;
@@ -539,7 +545,7 @@
 		return VFDI_RC_EINVAL;
 	}
 
-	efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
+	efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
 
 	EFX_POPULATE_OWORD_3(reg,
 			     FRF_CZ_TIMER_Q_EN, 1,
@@ -584,7 +590,7 @@
 	}
 	if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
 		++vf->rxq_count;
-	efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
+	efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
 
 	label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
 	EFX_POPULATE_OWORD_6(reg,
@@ -628,7 +634,7 @@
 	if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
 		++vf->txq_count;
 	mutex_unlock(&vf->txq_lock);
-	efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
+	efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
 
 	eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
 
@@ -742,8 +748,8 @@
 		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
 				 vf_offset + index);
 	}
-	efx_sriov_bufs(efx, vf->buftbl_base, NULL,
-		       EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
+	efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL,
+			     EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
 	efx_vfdi_flush_clear(vf);
 
 	vf->evq0_count = 0;
@@ -754,6 +760,7 @@
 static int efx_vfdi_insert_filter(struct efx_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
+	struct siena_nic_data *nic_data = efx->nic_data;
 	struct vfdi_req *req = vf->buf.addr;
 	unsigned vf_rxq = req->u.mac_filter.rxq;
 	unsigned flags;
@@ -776,17 +783,20 @@
 	vf->rx_filter_qid = vf_rxq;
 	vf->rx_filtering = true;
 
-	efx_sriov_reset_rx_filter(vf);
-	queue_work(vfdi_workqueue, &efx->peer_work);
+	efx_siena_sriov_reset_rx_filter(vf);
+	queue_work(vfdi_workqueue, &nic_data->peer_work);
 
 	return VFDI_RC_SUCCESS;
 }
 
 static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
 {
+	struct efx_nic *efx = vf->efx;
+	struct siena_nic_data *nic_data = efx->nic_data;
+
 	vf->rx_filtering = false;
-	efx_sriov_reset_rx_filter(vf);
-	queue_work(vfdi_workqueue, &vf->efx->peer_work);
+	efx_siena_sriov_reset_rx_filter(vf);
+	queue_work(vfdi_workqueue, &nic_data->peer_work);
 
 	return VFDI_RC_SUCCESS;
 }
@@ -794,6 +804,7 @@
 static int efx_vfdi_set_status_page(struct efx_vf *vf)
 {
 	struct efx_nic *efx = vf->efx;
+	struct siena_nic_data *nic_data = efx->nic_data;
 	struct vfdi_req *req = vf->buf.addr;
 	u64 page_count = req->u.set_status_page.peer_page_count;
 	u64 max_page_count =
@@ -809,7 +820,7 @@
 		return VFDI_RC_EINVAL;
 	}
 
-	mutex_lock(&efx->local_lock);
+	mutex_lock(&nic_data->local_lock);
 	mutex_lock(&vf->status_lock);
 	vf->status_addr = req->u.set_status_page.dma_addr;
 
@@ -828,9 +839,9 @@
 		}
 	}
 
-	__efx_sriov_push_vf_status(vf);
+	__efx_siena_sriov_push_vf_status(vf);
 	mutex_unlock(&vf->status_lock);
-	mutex_unlock(&efx->local_lock);
+	mutex_unlock(&nic_data->local_lock);
 
 	return VFDI_RC_SUCCESS;
 }
@@ -857,7 +868,7 @@
 	[VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
 };
 
-static void efx_sriov_vfdi(struct work_struct *work)
+static void efx_siena_sriov_vfdi(struct work_struct *work)
 {
 	struct efx_vf *vf = container_of(work, struct efx_vf, req);
 	struct efx_nic *efx = vf->efx;
@@ -872,7 +883,7 @@
 	copy[0].to_rid = efx->pci_dev->devfn;
 	copy[0].to_addr = vf->buf.dma_addr;
 	copy[0].length = EFX_PAGE_SIZE;
-	rc = efx_sriov_memcpy(efx, copy, 1);
+	rc = efx_siena_sriov_memcpy(efx, copy, 1);
 	if (rc) {
 		/* If we can't get the request, we can't reply to the caller */
 		if (net_ratelimit())
@@ -916,7 +927,7 @@
 	copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
 	copy[1].length = sizeof(req->op);
 
-	(void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+	(void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
 }
 
 
@@ -925,7 +936,8 @@
  * event ring in guest memory with VFDI reset events, then (re-initialise) the
  * event queue to raise an interrupt. The guest driver will then recover.
  */
-static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
+static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+				     struct efx_buffer *buffer)
 {
 	struct efx_nic *efx = vf->efx;
 	struct efx_memcpy_req copy_req[4];
@@ -961,7 +973,7 @@
 			copy_req[k].to_addr = vf->evq0_addrs[pos + k];
 			copy_req[k].length = EFX_PAGE_SIZE;
 		}
-		rc = efx_sriov_memcpy(efx, copy_req, count);
+		rc = efx_siena_sriov_memcpy(efx, copy_req, count);
 		if (rc) {
 			if (net_ratelimit())
 				netif_err(efx, hw, efx->net_dev,
@@ -974,7 +986,7 @@
 	/* Reinitialise, arm and trigger evq0 */
 	abs_evq = abs_index(vf, 0);
 	buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
-	efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
+	efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
 
 	EFX_POPULATE_OWORD_3(reg,
 			     FRF_CZ_TIMER_Q_EN, 1,
@@ -992,19 +1004,19 @@
 	mutex_unlock(&vf->status_lock);
 }
 
-static void efx_sriov_reset_vf_work(struct work_struct *work)
+static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
 {
 	struct efx_vf *vf = container_of(work, struct efx_vf, req);
 	struct efx_nic *efx = vf->efx;
 	struct efx_buffer buf;
 
 	if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
-		efx_sriov_reset_vf(vf, &buf);
+		efx_siena_sriov_reset_vf(vf, &buf);
 		efx_nic_free_buffer(efx, &buf);
 	}
 }
 
-static void efx_sriov_handle_no_channel(struct efx_nic *efx)
+static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx)
 {
 	netif_err(efx, drv, efx->net_dev,
 		  "ERROR: IOV requires MSI-X and 1 additional interrupt"
@@ -1012,35 +1024,38 @@
 	efx->vf_count = 0;
 }
 
-static int efx_sriov_probe_channel(struct efx_channel *channel)
+static int efx_siena_sriov_probe_channel(struct efx_channel *channel)
 {
-	channel->efx->vfdi_channel = channel;
+	struct siena_nic_data *nic_data = channel->efx->nic_data;
+	nic_data->vfdi_channel = channel;
+
 	return 0;
 }
 
 static void
-efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+efx_siena_sriov_get_channel_name(struct efx_channel *channel,
+				 char *buf, size_t len)
 {
 	snprintf(buf, len, "%s-iov", channel->efx->name);
 }
 
-static const struct efx_channel_type efx_sriov_channel_type = {
-	.handle_no_channel	= efx_sriov_handle_no_channel,
-	.pre_probe		= efx_sriov_probe_channel,
+static const struct efx_channel_type efx_siena_sriov_channel_type = {
+	.handle_no_channel	= efx_siena_sriov_handle_no_channel,
+	.pre_probe		= efx_siena_sriov_probe_channel,
 	.post_remove		= efx_channel_dummy_op_void,
-	.get_name		= efx_sriov_get_channel_name,
+	.get_name		= efx_siena_sriov_get_channel_name,
 	/* no copy operation; channel must not be reallocated */
 	.keep_eventq		= true,
 };
 
-void efx_sriov_probe(struct efx_nic *efx)
+void efx_siena_sriov_probe(struct efx_nic *efx)
 {
 	unsigned count;
 
 	if (!max_vfs)
 		return;
 
-	if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
+	if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
 		return;
 	if (count > 0 && count > max_vfs)
 		count = max_vfs;
@@ -1048,17 +1063,20 @@
 	/* efx_nic_dimension_resources() will reduce vf_count as appopriate */
 	efx->vf_count = count;
 
-	efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
+	efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type;
 }
 
 /* Copy the list of individual addresses into the vfdi_status.peers
  * array and auxillary pages, protected by %local_lock. Drop that lock
  * and then broadcast the address list to every VF.
  */
-static void efx_sriov_peer_work(struct work_struct *data)
+static void efx_siena_sriov_peer_work(struct work_struct *data)
 {
-	struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
-	struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+	struct siena_nic_data *nic_data = container_of(data,
+						       struct siena_nic_data,
+						       peer_work);
+	struct efx_nic *efx = nic_data->efx;
+	struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
 	struct efx_vf *vf;
 	struct efx_local_addr *local_addr;
 	struct vfdi_endpoint *peer;
@@ -1068,11 +1086,11 @@
 	unsigned int peer_count;
 	unsigned int pos;
 
-	mutex_lock(&efx->local_lock);
+	mutex_lock(&nic_data->local_lock);
 
 	/* Move the existing peer pages off %local_page_list */
 	INIT_LIST_HEAD(&pages);
-	list_splice_tail_init(&efx->local_page_list, &pages);
+	list_splice_tail_init(&nic_data->local_page_list, &pages);
 
 	/* Populate the VF addresses starting from entry 1 (entry 0 is
 	 * the PF address)
@@ -1094,7 +1112,7 @@
 	}
 
 	/* Fill the remaining addresses */
-	list_for_each_entry(local_addr, &efx->local_addr_list, link) {
+	list_for_each_entry(local_addr, &nic_data->local_addr_list, link) {
 		ether_addr_copy(peer->mac_addr, local_addr->addr);
 		peer->tci = 0;
 		++peer;
@@ -1117,13 +1135,13 @@
 				list_del(&epp->link);
 			}
 
-			list_add_tail(&epp->link, &efx->local_page_list);
+			list_add_tail(&epp->link, &nic_data->local_page_list);
 			peer = (struct vfdi_endpoint *)epp->ptr;
 			peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
 		}
 	}
 	vfdi_status->peer_count = peer_count;
-	mutex_unlock(&efx->local_lock);
+	mutex_unlock(&nic_data->local_lock);
 
 	/* Free any now unused endpoint pages */
 	while (!list_empty(&pages)) {
@@ -1141,25 +1159,26 @@
 
 		mutex_lock(&vf->status_lock);
 		if (vf->status_addr)
-			__efx_sriov_push_vf_status(vf);
+			__efx_siena_sriov_push_vf_status(vf);
 		mutex_unlock(&vf->status_lock);
 	}
 }
 
-static void efx_sriov_free_local(struct efx_nic *efx)
+static void efx_siena_sriov_free_local(struct efx_nic *efx)
 {
+	struct siena_nic_data *nic_data = efx->nic_data;
 	struct efx_local_addr *local_addr;
 	struct efx_endpoint_page *epp;
 
-	while (!list_empty(&efx->local_addr_list)) {
-		local_addr = list_first_entry(&efx->local_addr_list,
+	while (!list_empty(&nic_data->local_addr_list)) {
+		local_addr = list_first_entry(&nic_data->local_addr_list,
 					      struct efx_local_addr, link);
 		list_del(&local_addr->link);
 		kfree(local_addr);
 	}
 
-	while (!list_empty(&efx->local_page_list)) {
-		epp = list_first_entry(&efx->local_page_list,
+	while (!list_empty(&nic_data->local_page_list)) {
+		epp = list_first_entry(&nic_data->local_page_list,
 				       struct efx_endpoint_page, link);
 		list_del(&epp->link);
 		dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
@@ -1168,7 +1187,7 @@
 	}
 }
 
-static int efx_sriov_vf_alloc(struct efx_nic *efx)
+static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
 {
 	unsigned index;
 	struct efx_vf *vf;
@@ -1185,8 +1204,8 @@
 		vf->rx_filter_id = -1;
 		vf->tx_filter_mode = VF_TX_FILTER_AUTO;
 		vf->tx_filter_id = -1;
-		INIT_WORK(&vf->req, efx_sriov_vfdi);
-		INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
+		INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
+		INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work);
 		init_waitqueue_head(&vf->flush_waitq);
 		mutex_init(&vf->status_lock);
 		mutex_init(&vf->txq_lock);
@@ -1195,7 +1214,7 @@
 	return 0;
 }
 
-static void efx_sriov_vfs_fini(struct efx_nic *efx)
+static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
 {
 	struct efx_vf *vf;
 	unsigned int pos;
@@ -1212,9 +1231,10 @@
 	}
 }
 
-static int efx_sriov_vfs_init(struct efx_nic *efx)
+static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
 {
 	struct pci_dev *pci_dev = efx->pci_dev;
+	struct siena_nic_data *nic_data = efx->nic_data;
 	unsigned index, devfn, sriov, buftbl_base;
 	u16 offset, stride;
 	struct efx_vf *vf;
@@ -1227,7 +1247,7 @@
 	pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
 	pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
 
-	buftbl_base = efx->vf_buftbl_base;
+	buftbl_base = nic_data->vf_buftbl_base;
 	devfn = pci_dev->devfn + offset;
 	for (index = 0; index < efx->vf_count; ++index) {
 		vf = efx->vf + index;
@@ -1253,13 +1273,14 @@
 	return 0;
 
 fail:
-	efx_sriov_vfs_fini(efx);
+	efx_siena_sriov_vfs_fini(efx);
 	return rc;
 }
 
-int efx_sriov_init(struct efx_nic *efx)
+int efx_siena_sriov_init(struct efx_nic *efx)
 {
 	struct net_device *net_dev = efx->net_dev;
+	struct siena_nic_data *nic_data = efx->nic_data;
 	struct vfdi_status *vfdi_status;
 	int rc;
 
@@ -1271,15 +1292,15 @@
 	if (efx->vf_count == 0)
 		return 0;
 
-	rc = efx_sriov_cmd(efx, true, NULL, NULL);
+	rc = efx_siena_sriov_cmd(efx, true, NULL, NULL);
 	if (rc)
 		goto fail_cmd;
 
-	rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
-				  GFP_KERNEL);
+	rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
+				  sizeof(*vfdi_status), GFP_KERNEL);
 	if (rc)
 		goto fail_status;
-	vfdi_status = efx->vfdi_status.addr;
+	vfdi_status = nic_data->vfdi_status.addr;
 	memset(vfdi_status, 0, sizeof(*vfdi_status));
 	vfdi_status->version = 1;
 	vfdi_status->length = sizeof(*vfdi_status);
@@ -1289,16 +1310,16 @@
 	vfdi_status->peer_count = 1 + efx->vf_count;
 	vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
 
-	rc = efx_sriov_vf_alloc(efx);
+	rc = efx_siena_sriov_vf_alloc(efx);
 	if (rc)
 		goto fail_alloc;
 
-	mutex_init(&efx->local_lock);
-	INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
-	INIT_LIST_HEAD(&efx->local_addr_list);
-	INIT_LIST_HEAD(&efx->local_page_list);
+	mutex_init(&nic_data->local_lock);
+	INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work);
+	INIT_LIST_HEAD(&nic_data->local_addr_list);
+	INIT_LIST_HEAD(&nic_data->local_page_list);
 
-	rc = efx_sriov_vfs_init(efx);
+	rc = efx_siena_sriov_vfs_init(efx);
 	if (rc)
 		goto fail_vfs;
 
@@ -1307,7 +1328,7 @@
 	efx->vf_init_count = efx->vf_count;
 	rtnl_unlock();
 
-	efx_sriov_usrev(efx, true);
+	efx_siena_sriov_usrev(efx, true);
 
 	/* At this point we must be ready to accept VFDI requests */
 
@@ -1321,34 +1342,35 @@
 	return 0;
 
 fail_pci:
-	efx_sriov_usrev(efx, false);
+	efx_siena_sriov_usrev(efx, false);
 	rtnl_lock();
 	efx->vf_init_count = 0;
 	rtnl_unlock();
-	efx_sriov_vfs_fini(efx);
+	efx_siena_sriov_vfs_fini(efx);
 fail_vfs:
-	cancel_work_sync(&efx->peer_work);
-	efx_sriov_free_local(efx);
+	cancel_work_sync(&nic_data->peer_work);
+	efx_siena_sriov_free_local(efx);
 	kfree(efx->vf);
 fail_alloc:
-	efx_nic_free_buffer(efx, &efx->vfdi_status);
+	efx_nic_free_buffer(efx, &nic_data->vfdi_status);
 fail_status:
-	efx_sriov_cmd(efx, false, NULL, NULL);
+	efx_siena_sriov_cmd(efx, false, NULL, NULL);
 fail_cmd:
 	return rc;
 }
 
-void efx_sriov_fini(struct efx_nic *efx)
+void efx_siena_sriov_fini(struct efx_nic *efx)
 {
 	struct efx_vf *vf;
 	unsigned int pos;
+	struct siena_nic_data *nic_data = efx->nic_data;
 
 	if (efx->vf_init_count == 0)
 		return;
 
 	/* Disable all interfaces to reconfiguration */
-	BUG_ON(efx->vfdi_channel->enabled);
-	efx_sriov_usrev(efx, false);
+	BUG_ON(nic_data->vfdi_channel->enabled);
+	efx_siena_sriov_usrev(efx, false);
 	rtnl_lock();
 	efx->vf_init_count = 0;
 	rtnl_unlock();
@@ -1359,19 +1381,19 @@
 		cancel_work_sync(&vf->req);
 		cancel_work_sync(&vf->reset_work);
 	}
-	cancel_work_sync(&efx->peer_work);
+	cancel_work_sync(&nic_data->peer_work);
 
 	pci_disable_sriov(efx->pci_dev);
 
 	/* Tear down back-end state */
-	efx_sriov_vfs_fini(efx);
-	efx_sriov_free_local(efx);
+	efx_siena_sriov_vfs_fini(efx);
+	efx_siena_sriov_free_local(efx);
 	kfree(efx->vf);
-	efx_nic_free_buffer(efx, &efx->vfdi_status);
-	efx_sriov_cmd(efx, false, NULL, NULL);
+	efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+	efx_siena_sriov_cmd(efx, false, NULL, NULL);
 }
 
-void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
 {
 	struct efx_nic *efx = channel->efx;
 	struct efx_vf *vf;
@@ -1428,7 +1450,7 @@
 	vf->req_seqno = seq + 1;
 }
 
-void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
 {
 	struct efx_vf *vf;
 
@@ -1445,18 +1467,19 @@
 	vf->evq0_count = 0;
 }
 
-void efx_sriov_mac_address_changed(struct efx_nic *efx)
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
 {
-	struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+	struct siena_nic_data *nic_data = efx->nic_data;
+	struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
 
 	if (!efx->vf_init_count)
 		return;
 	ether_addr_copy(vfdi_status->peers[0].mac_addr,
 			efx->net_dev->dev_addr);
-	queue_work(vfdi_workqueue, &efx->peer_work);
+	queue_work(vfdi_workqueue, &nic_data->peer_work);
 }
 
-void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 {
 	struct efx_vf *vf;
 	unsigned queue, qid;
@@ -1475,7 +1498,7 @@
 		wake_up(&vf->flush_waitq);
 }
 
-void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 {
 	struct efx_vf *vf;
 	unsigned ev_failed, queue, qid;
@@ -1500,7 +1523,7 @@
 }
 
 /* Called from napi. Schedule the reset work item */
-void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
 {
 	struct efx_vf *vf;
 	unsigned int rel;
@@ -1516,7 +1539,7 @@
 }
 
 /* Reset all VFs */
-void efx_sriov_reset(struct efx_nic *efx)
+void efx_siena_sriov_reset(struct efx_nic *efx)
 {
 	unsigned int vf_i;
 	struct efx_buffer buf;
@@ -1527,15 +1550,15 @@
 	if (efx->vf_init_count == 0)
 		return;
 
-	efx_sriov_usrev(efx, true);
-	(void)efx_sriov_cmd(efx, true, NULL, NULL);
+	efx_siena_sriov_usrev(efx, true);
+	(void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
 
 	if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
 		return;
 
 	for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
 		vf = efx->vf + vf_i;
-		efx_sriov_reset_vf(vf, &buf);
+		efx_siena_sriov_reset_vf(vf, &buf);
 	}
 
 	efx_nic_free_buffer(efx, &buf);
@@ -1543,8 +1566,8 @@
 
 int efx_init_sriov(void)
 {
-	/* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
-	 * efx_sriov_peer_work() spend almost all their time sleeping for
+	/* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and
+	 * efx_siena_sriov_peer_work() spend almost all their time sleeping for
 	 * MCDI to complete anyway
 	 */
 	vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
@@ -1559,7 +1582,7 @@
 	destroy_workqueue(vfdi_workqueue);
 }
 
-int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_vf *vf;
@@ -1570,14 +1593,14 @@
 
 	mutex_lock(&vf->status_lock);
 	ether_addr_copy(vf->addr.mac_addr, mac);
-	__efx_sriov_update_vf_addr(vf);
+	__efx_siena_sriov_update_vf_addr(vf);
 	mutex_unlock(&vf->status_lock);
 
 	return 0;
 }
 
-int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
-			  u16 vlan, u8 qos)
+int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+				u16 vlan, u8 qos)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_vf *vf;
@@ -1590,14 +1613,14 @@
 	mutex_lock(&vf->status_lock);
 	tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
 	vf->addr.tci = htons(tci);
-	__efx_sriov_update_vf_addr(vf);
+	__efx_siena_sriov_update_vf_addr(vf);
 	mutex_unlock(&vf->status_lock);
 
 	return 0;
 }
 
-int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
-			      bool spoofchk)
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+				    bool spoofchk)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_vf *vf;
@@ -1620,8 +1643,8 @@
 	return rc;
 }
 
-int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
-			    struct ifla_vf_info *ivi)
+int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+				  struct ifla_vf_info *ivi)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_vf *vf;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index b02d4a3..33b85ba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -18,56 +18,14 @@
 	depends on STMMAC_ETH
 	default y
 	---help---
-	  This selects the platform specific bus support for
-	  the stmmac device driver. This is the driver used
-	  on many embedded STM platforms based on ARM and SuperH
-	  processors.
+	  This selects the platform specific bus support for the stmmac driver.
+	  This is the driver used on several SoCs:
+	  STi, Allwinner, Amlogic Meson, Altera SOCFPGA.
+
 	  If you have a controller with this interface, say Y or M here.
 
 	  If unsure, say N.
 
-config DWMAC_MESON
-	bool "Amlogic Meson dwmac support"
-	depends on STMMAC_PLATFORM && ARCH_MESON
-	help
-	  Support for Ethernet controller on Amlogic Meson SoCs.
-
-	  This selects the Amlogic Meson SoC glue layer support for
-	  the stmmac device driver. This driver is used for Meson6 and
-	  Meson8 SoCs.
-
-config DWMAC_SOCFPGA
-	bool "SOCFPGA dwmac support"
-	depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
-	help
-	  Support for ethernet controller on Altera SOCFPGA
-
-	  This selects the Altera SOCFPGA SoC glue layer support
-	  for the stmmac device driver. This driver is used for
-	  arria5 and cyclone5 FPGA SoCs.
-
-config DWMAC_SUNXI
-	bool "Allwinner GMAC support"
-	depends on STMMAC_PLATFORM && ARCH_SUNXI
-	default y
-	---help---
-	  Support for Allwinner A20/A31 GMAC ethernet controllers.
-
-	  This selects Allwinner SoC glue layer support for the
-	  stmmac device driver. This driver is used for A20/A31
-	  GMAC 	  ethernet controller.
-
-config DWMAC_STI
-	bool "STi GMAC support"
-	depends on STMMAC_PLATFORM && ARCH_STI
-	default y
-	---help---
-	  Support for ethernet controller on STi SOCs.
-
-	  This selects STi SoC glue layer support for the stmmac
-	  device driver. This driver is used on for the STi series
-	  SOCs GMAC ethernet controller.
-
 config STMMAC_PCI
 	bool "STMMAC PCI bus support"
 	depends on STMMAC_ETH && PCI
@@ -79,22 +37,4 @@
 	  D1215994A VIRTEX FPGA board.
 
 	  If unsure, say N.
-
-config STMMAC_DEBUG_FS
-	bool "Enable monitoring via sysFS "
-	default n
-	depends on STMMAC_ETH && DEBUG_FS
-	---help---
-	  The stmmac entry in /sys reports DMA TX/RX rings
-	  or (if supported) the HW cap register.
-
-config STMMAC_DA
-	bool "STMMAC DMA arbitration scheme"
-	default n
-	---help---
-	  Selecting this option, rx has priority over Tx (only for Giga
-	  Ethernet device).
-	  By default, the DMA arbitration scheme is based on Round-robin
-	  (rx:tx priority is 1:1).
-
 endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 0533d0b..034da70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,10 +1,8 @@
 obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
 stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
-stmmac-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
-stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
-stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
-stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) +=	stmmac_platform.o dwmac-meson.o \
+					dwmac-sunxi.o dwmac-sti.o \
+					dwmac-socfpga.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o	\
 	      chain_mode.o dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o \
 	      dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 593e6c4..12a174e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -341,6 +341,9 @@
 	int (*get_rx_timestamp_status) (void *desc, u32 ats);
 };
 
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
 struct stmmac_dma_ops {
 	/* DMA core initialization */
 	int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
@@ -410,6 +413,8 @@
 	 u64(*get_systime) (void __iomem *ioaddr);
 };
 
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+
 struct mac_link {
 	int port;
 	int duplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 0c2058a..59d92e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -70,10 +70,6 @@
 	if (mb)
 		value |= DMA_BUS_MODE_MB;
 
-#ifdef CONFIG_STMMAC_DA
-	value |= DMA_BUS_MODE_DA;	/* Rx has priority over tx */
-#endif
-
 	if (atds)
 		value |= DMA_BUS_MODE_ATDS;
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c3c4065..709798b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -122,9 +122,7 @@
 int stmmac_mdio_register(struct net_device *ndev);
 int stmmac_mdio_reset(struct mii_bus *mii);
 void stmmac_set_ethtool_ops(struct net_device *netdev);
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
+
 int stmmac_ptp_register(struct stmmac_priv *priv);
 void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_resume(struct net_device *ndev);
@@ -137,20 +135,13 @@
 bool stmmac_eee_init(struct stmmac_priv *priv);
 
 #ifdef CONFIG_STMMAC_PLATFORM
-#ifdef CONFIG_DWMAC_MESON
 extern const struct stmmac_of_data meson6_dwmac_data;
-#endif
-#ifdef CONFIG_DWMAC_SUNXI
 extern const struct stmmac_of_data sun7i_gmac_data;
-#endif
-#ifdef CONFIG_DWMAC_STI
 extern const struct stmmac_of_data stih4xx_dwmac_data;
 extern const struct stmmac_of_data stid127_dwmac_data;
-#endif
-#ifdef CONFIG_DWMAC_SOCFPGA
 extern const struct stmmac_of_data socfpga_gmac_data;
-#endif
 extern struct platform_driver stmmac_pltfr_driver;
+
 static inline int stmmac_register_platform(void)
 {
 	int err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 18c46bb..fa598b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -44,10 +44,10 @@
 #include <linux/slab.h>
 #include <linux/prefetch.h>
 #include <linux/pinctrl/consumer.h>
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
-#endif /* CONFIG_STMMAC_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS */
 #include <linux/net_tstamp.h>
 #include "stmmac_ptp.h"
 #include "stmmac.h"
@@ -116,7 +116,7 @@
 
 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 static int stmmac_init_fs(struct net_device *dev);
 static void stmmac_exit_fs(void);
 #endif
@@ -1688,7 +1688,7 @@
 	if (ret && ret != -EOPNOTSUPP)
 		pr_warn("%s: failed PTP initialisation\n", __func__);
 
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 	ret = stmmac_init_fs(dev);
 	if (ret < 0)
 		pr_warn("%s: failed debugFS registration\n", __func__);
@@ -1870,7 +1870,7 @@
 
 	netif_carrier_off(dev);
 
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 	stmmac_exit_fs();
 #endif
 
@@ -2457,7 +2457,7 @@
 	return ret;
 }
 
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 static struct dentry *stmmac_fs_dir;
 static struct dentry *stmmac_rings_status;
 static struct dentry *stmmac_dma_cap;
@@ -2642,7 +2642,7 @@
 	debugfs_remove(stmmac_dma_cap);
 	debugfs_remove(stmmac_fs_dir);
 }
-#endif /* CONFIG_STMMAC_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS */
 
 static const struct net_device_ops stmmac_netdev_ops = {
 	.ndo_open = stmmac_open,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index e17a970..5084699 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -71,64 +71,46 @@
 static int stmmac_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *id)
 {
-	int ret = 0;
-	void __iomem *addr = NULL;
-	struct stmmac_priv *priv = NULL;
+	struct stmmac_priv *priv;
 	int i;
+	int ret;
 
 	/* Enable pci device */
-	ret = pci_enable_device(pdev);
+	ret = pcim_enable_device(pdev);
 	if (ret) {
-		pr_err("%s : ERROR: failed to enable %s device\n", __func__,
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+			__func__);
 		return ret;
 	}
-	if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
-		pr_err("%s: ERROR: failed to get PCI region\n", __func__);
-		ret = -ENODEV;
-		goto err_out_req_reg_failed;
-	}
 
 	/* Get the base address of device */
-	for (i = 0; i <= 5; i++) {
+	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
 		if (pci_resource_len(pdev, i) == 0)
 			continue;
-		addr = pci_iomap(pdev, i, 0);
-		if (addr == NULL) {
-			pr_err("%s: ERROR: cannot map register memory aborting",
-			       __func__);
-			ret = -EIO;
-			goto err_out_map_failed;
-		}
+		ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
+		if (ret)
+			return ret;
 		break;
 	}
+
 	pci_set_master(pdev);
 
 	stmmac_default_data();
 
-	priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
+	priv = stmmac_dvr_probe(&pdev->dev, &plat_dat,
+				pcim_iomap_table(pdev)[i]);
 	if (IS_ERR(priv)) {
-		pr_err("%s: main driver probe failed", __func__);
-		ret = PTR_ERR(priv);
-		goto err_out;
+		dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
+		return PTR_ERR(priv);
 	}
 	priv->dev->irq = pdev->irq;
 	priv->wol_irq = pdev->irq;
 
 	pci_set_drvdata(pdev, priv->dev);
 
-	pr_debug("STMMAC platform driver registration completed");
+	dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
 
 	return 0;
-
-err_out:
-	pci_clear_master(pdev);
-err_out_map_failed:
-	pci_release_regions(pdev);
-err_out_req_reg_failed:
-	pci_disable_device(pdev);
-
-	return ret;
 }
 
 /**
@@ -141,39 +123,30 @@
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
 	struct net_device *ndev = pci_get_drvdata(pdev);
-	struct stmmac_priv *priv = netdev_priv(ndev);
 
 	stmmac_dvr_remove(ndev);
-
-	pci_iounmap(pdev, priv->ioaddr);
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
 }
 
-#ifdef CONFIG_PM
-static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int stmmac_pci_suspend(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct net_device *ndev = pci_get_drvdata(pdev);
-	int ret;
 
-	ret = stmmac_suspend(ndev);
-	pci_save_state(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
-	return ret;
+	return stmmac_suspend(ndev);
 }
 
-static int stmmac_pci_resume(struct pci_dev *pdev)
+static int stmmac_pci_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct net_device *ndev = pci_get_drvdata(pdev);
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-
 	return stmmac_resume(ndev);
 }
 #endif
 
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+
 #define STMMAC_VENDOR_ID 0x700
 #define STMMAC_DEVICE_ID 0x1108
 
@@ -190,10 +163,9 @@
 	.id_table = stmmac_id_table,
 	.probe = stmmac_pci_probe,
 	.remove = stmmac_pci_remove,
-#ifdef CONFIG_PM
-	.suspend = stmmac_pci_suspend,
-	.resume = stmmac_pci_resume,
-#endif
+	.driver         = {
+		.pm     = &stmmac_pm_ops,
+	},
 };
 
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index db56fa7..f4fe854 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -30,22 +30,14 @@
 #include "stmmac.h"
 
 static const struct of_device_id stmmac_dt_ids[] = {
-#ifdef CONFIG_DWMAC_MESON
+	/* SoC specific glue layers should come before generic bindings */
 	{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
-#endif
-#ifdef CONFIG_DWMAC_SUNXI
 	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
-#endif
-#ifdef CONFIG_DWMAC_STI
 	{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
 	{ .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
 	{ .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
 	{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
-#endif
-#ifdef CONFIG_DWMAC_SOCFPGA
 	{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
-#endif
-	/* SoC specific glue layers should come before generic bindings */
 	{ .compatible = "st,spear600-gmac"},
 	{ .compatible = "snps,dwmac-3.610"},
 	{ .compatible = "snps,dwmac-3.70a"},
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3652afd..5c5fb59 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -40,6 +40,8 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
+#define	VNET_MAX_TXQS		16
+
 /* Heuristic for the number of times to exponentially backoff and
  * retry sending an LDC trigger when EAGAIN is encountered
  */
@@ -311,9 +313,7 @@
 
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += len;
-
-	netif_rx(skb);
-
+	napi_gro_receive(&port->napi, skb);
 	return 0;
 
 out_free_skb:
@@ -430,6 +430,7 @@
 	struct vio_driver_state *vio = &port->vio;
 	int err;
 
+	BUG_ON(desc == NULL);
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
 
@@ -456,10 +457,11 @@
 }
 
 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
-			u32 start, u32 end)
+			u32 start, u32 end, int *npkts, int budget)
 {
 	struct vio_driver_state *vio = &port->vio;
 	int ack_start = -1, ack_end = -1;
+	bool send_ack = true;
 
 	end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
 
@@ -471,6 +473,7 @@
 			return err;
 		if (err != 0)
 			break;
+		(*npkts)++;
 		if (ack_start == -1)
 			ack_start = start;
 		ack_end = start;
@@ -482,13 +485,26 @@
 				return err;
 			ack_start = -1;
 		}
+		if ((*npkts) >= budget) {
+			send_ack = false;
+			break;
+		}
 	}
 	if (unlikely(ack_start == -1))
 		ack_start = ack_end = prev_idx(start, dr);
-	return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+	if (send_ack) {
+		port->napi_resume = false;
+		return vnet_send_ack(port, dr, ack_start, ack_end,
+				     VIO_DRING_STOPPED);
+	} else  {
+		port->napi_resume = true;
+		port->napi_stop_idx = ack_end;
+		return 1;
+	}
 }
 
-static int vnet_rx(struct vnet_port *port, void *msgbuf)
+static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
+		   int budget)
 {
 	struct vio_dring_data *pkt = msgbuf;
 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
@@ -505,11 +521,13 @@
 		return 0;
 	}
 
-	dr->rcv_nxt++;
+	if (!port->napi_resume)
+		dr->rcv_nxt++;
 
 	/* XXX Validate pkt->start_idx and pkt->end_idx XXX */
 
-	return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+	return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
+			    npkts, budget);
 }
 
 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
@@ -535,6 +553,8 @@
 	struct vnet *vp;
 	u32 end;
 	struct vio_net_desc *desc;
+	struct netdev_queue *txq;
+
 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
 		return 0;
 
@@ -542,9 +562,12 @@
 	if (unlikely(!idx_is_pending(dr, end)))
 		return 0;
 
+	vp = port->vp;
+	dev = vp->dev;
 	/* sync for race conditions with vnet_start_xmit() and tell xmit it
 	 * is time to send a trigger.
 	 */
+	netif_tx_lock(dev);
 	dr->cons = next_idx(end, dr);
 	desc = vio_dring_entry(dr, dr->cons);
 	if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
@@ -559,11 +582,10 @@
 	} else {
 		port->start_cons = true;
 	}
+	netif_tx_unlock(dev);
 
-
-	vp = port->vp;
-	dev = vp->dev;
-	if (unlikely(netif_queue_stopped(dev) &&
+	txq = netdev_get_tx_queue(dev, port->q_index);
+	if (unlikely(netif_tx_queue_stopped(txq) &&
 		     vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
 		return 1;
 
@@ -591,58 +613,62 @@
 	return 0;
 }
 
-static void maybe_tx_wakeup(unsigned long param)
+/* Got back a STOPPED LDC message on port. If the queue is stopped,
+ * wake it up so that we'll send out another START message at the
+ * next TX.
+ */
+static void maybe_tx_wakeup(struct vnet_port *port)
 {
-	struct vnet *vp = (struct vnet *)param;
-	struct net_device *dev = vp->dev;
+	struct netdev_queue *txq;
 
-	netif_tx_lock(dev);
-	if (likely(netif_queue_stopped(dev))) {
-		struct vnet_port *port;
-		int wake = 1;
+	txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
+	__netif_tx_lock(txq, smp_processor_id());
+	if (likely(netif_tx_queue_stopped(txq))) {
+		struct vio_dring_state *dr;
 
-		list_for_each_entry(port, &vp->port_list, list) {
-			struct vio_dring_state *dr;
-
-			dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-			if (vnet_tx_dring_avail(dr) <
-			    VNET_TX_WAKEUP_THRESH(dr)) {
-				wake = 0;
-				break;
-			}
-		}
-		if (wake)
-			netif_wake_queue(dev);
+		dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+		netif_tx_wake_queue(txq);
 	}
-	netif_tx_unlock(dev);
+	__netif_tx_unlock(txq);
 }
 
-static void vnet_event(void *arg, int event)
+static inline bool port_is_up(struct vnet_port *vnet)
 {
-	struct vnet_port *port = arg;
+	struct vio_driver_state *vio = &vnet->vio;
+
+	return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
+static int vnet_event_napi(struct vnet_port *port, int budget)
+{
 	struct vio_driver_state *vio = &port->vio;
-	unsigned long flags;
 	int tx_wakeup, err;
+	int npkts = 0;
+	int event = (port->rx_event & LDC_EVENT_RESET);
 
-	spin_lock_irqsave(&vio->lock, flags);
-
+ldc_ctrl:
 	if (unlikely(event == LDC_EVENT_RESET ||
 		     event == LDC_EVENT_UP)) {
 		vio_link_state_change(vio, event);
-		spin_unlock_irqrestore(&vio->lock, flags);
 
 		if (event == LDC_EVENT_RESET) {
 			port->rmtu = 0;
 			vio_port_up(vio);
 		}
-		return;
+		port->rx_event = 0;
+		return 0;
 	}
+	/* We may have multiple LDC events in rx_event. Unroll send_events() */
+	event = (port->rx_event & LDC_EVENT_UP);
+	port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
+	if (event == LDC_EVENT_UP)
+		goto ldc_ctrl;
+	event = port->rx_event;
+	if (!(event & LDC_EVENT_DATA_READY))
+		return 0;
 
-	if (unlikely(event != LDC_EVENT_DATA_READY)) {
-		pr_warn("Unexpected LDC event %d\n", event);
-		spin_unlock_irqrestore(&vio->lock, flags);
-		return;
-	}
+	/* we dont expect any other bits than RESET, UP, DATA_READY */
+	BUG_ON(event != LDC_EVENT_DATA_READY);
 
 	tx_wakeup = err = 0;
 	while (1) {
@@ -651,6 +677,20 @@
 			u64 raw[8];
 		} msgbuf;
 
+		if (port->napi_resume) {
+			struct vio_dring_data *pkt =
+				(struct vio_dring_data *)&msgbuf;
+			struct vio_dring_state *dr =
+				&port->vio.drings[VIO_DRIVER_RX_RING];
+
+			pkt->tag.type = VIO_TYPE_DATA;
+			pkt->tag.stype = VIO_SUBTYPE_INFO;
+			pkt->tag.stype_env = VIO_DRING_DATA;
+			pkt->seq = dr->rcv_nxt;
+			pkt->start_idx = next_idx(port->napi_stop_idx, dr);
+			pkt->end_idx = -1;
+			goto napi_resume;
+		}
 		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
 		if (unlikely(err < 0)) {
 			if (err == -ECONNRESET)
@@ -667,10 +707,22 @@
 		err = vio_validate_sid(vio, &msgbuf.tag);
 		if (err < 0)
 			break;
-
+napi_resume:
 		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
 			if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
-				err = vnet_rx(port, &msgbuf);
+				if (!port_is_up(port)) {
+					/* failures like handshake_failure()
+					 * may have cleaned up dring, but
+					 * NAPI polling may bring us here.
+					 */
+					err = -ECONNRESET;
+					break;
+				}
+				err = vnet_rx(port, &msgbuf, &npkts, budget);
+				if (npkts >= budget)
+					break;
+				if (npkts == 0)
+					break;
 			} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
 				err = vnet_ack(port, &msgbuf);
 				if (err > 0)
@@ -691,15 +743,34 @@
 		if (err == -ECONNRESET)
 			break;
 	}
-	spin_unlock(&vio->lock);
-	/* Kick off a tasklet to wake the queue.  We cannot call
-	 * maybe_tx_wakeup directly here because we could deadlock on
-	 * netif_tx_lock() with dev_watchdog()
-	 */
 	if (unlikely(tx_wakeup && err != -ECONNRESET))
-		tasklet_schedule(&port->vp->vnet_tx_wakeup);
+		maybe_tx_wakeup(port);
+	return npkts;
+}
 
-	local_irq_restore(flags);
+static int vnet_poll(struct napi_struct *napi, int budget)
+{
+	struct vnet_port *port = container_of(napi, struct vnet_port, napi);
+	struct vio_driver_state *vio = &port->vio;
+	int processed = vnet_event_napi(port, budget);
+
+	if (processed < budget) {
+		napi_complete(napi);
+		port->rx_event &= ~LDC_EVENT_DATA_READY;
+		vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+	}
+	return processed;
+}
+
+static void vnet_event(void *arg, int event)
+{
+	struct vnet_port *port = arg;
+	struct vio_driver_state *vio = &port->vio;
+
+	port->rx_event |= event;
+	vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+	napi_schedule(&port->napi);
+
 }
 
 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
@@ -746,26 +817,19 @@
 	return err;
 }
 
-static inline bool port_is_up(struct vnet_port *vnet)
-{
-	struct vio_driver_state *vio = &vnet->vio;
-
-	return !!(vio->hs_state & VIO_HS_COMPLETE);
-}
-
 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
 	unsigned int hash = vnet_hashfn(skb->data);
 	struct hlist_head *hp = &vp->port_hash[hash];
 	struct vnet_port *port;
 
-	hlist_for_each_entry(port, hp, hash) {
+	hlist_for_each_entry_rcu(port, hp, hash) {
 		if (!port_is_up(port))
 			continue;
 		if (ether_addr_equal(port->raddr, skb->data))
 			return port;
 	}
-	list_for_each_entry(port, &vp->port_list, list) {
+	list_for_each_entry_rcu(port, &vp->port_list, list) {
 		if (!port->switch_port)
 			continue;
 		if (!port_is_up(port))
@@ -775,18 +839,6 @@
 	return NULL;
 }
 
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
-	struct vnet_port *ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&vp->lock, flags);
-	ret = __tx_port_find(vp, skb);
-	spin_unlock_irqrestore(&vp->lock, flags);
-
-	return ret;
-}
-
 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
 					  unsigned *pending)
 {
@@ -847,11 +899,10 @@
 	struct vnet_port *port = (struct vnet_port *)port0;
 	struct sk_buff *freeskbs;
 	unsigned pending;
-	unsigned long flags;
 
-	spin_lock_irqsave(&port->vio.lock, flags);
+	netif_tx_lock(port->vp->dev);
 	freeskbs = vnet_clean_tx_ring(port, &pending);
-	spin_unlock_irqrestore(&port->vio.lock, flags);
+	netif_tx_unlock(port->vp->dev);
 
 	vnet_free_skbs(freeskbs);
 
@@ -898,28 +949,39 @@
 	return skb;
 }
 
+static u16
+vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+		  void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct vnet *vp = netdev_priv(dev);
+	struct vnet_port *port = __tx_port_find(vp, skb);
+
+	return port->q_index;
+}
+
 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct vnet *vp = netdev_priv(dev);
-	struct vnet_port *port = tx_port_find(vp, skb);
+	struct vnet_port *port = NULL;
 	struct vio_dring_state *dr;
 	struct vio_net_desc *d;
-	unsigned long flags;
 	unsigned int len;
 	struct sk_buff *freeskbs = NULL;
 	int i, err, txi;
 	void *start = NULL;
 	int nlen = 0;
 	unsigned pending = 0;
-
-	if (unlikely(!port))
-		goto out_dropped;
+	struct netdev_queue *txq;
 
 	skb = vnet_skb_shape(skb, &start, &nlen);
-
 	if (unlikely(!skb))
 		goto out_dropped;
 
+	rcu_read_lock();
+	port = __tx_port_find(vp, skb);
+	if (unlikely(!port))
+		goto out_dropped;
+
 	if (skb->len > port->rmtu) {
 		unsigned long localmtu = port->rmtu - ETH_HLEN;
 
@@ -937,6 +999,7 @@
 			fl4.saddr = ip_hdr(skb)->saddr;
 
 			rt = ip_route_output_key(dev_net(dev), &fl4);
+			rcu_read_unlock();
 			if (!IS_ERR(rt)) {
 				skb_dst_set(skb, &rt->dst);
 				icmp_send(skb, ICMP_DEST_UNREACH,
@@ -951,18 +1014,18 @@
 		goto out_dropped;
 	}
 
-	spin_lock_irqsave(&port->vio.lock, flags);
-
 	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+	i = skb_get_queue_mapping(skb);
+	txq = netdev_get_tx_queue(dev, i);
 	if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
-		if (!netif_queue_stopped(dev)) {
-			netif_stop_queue(dev);
+		if (!netif_tx_queue_stopped(txq)) {
+			netif_tx_stop_queue(txq);
 
 			/* This is a hard error, log it. */
 			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 			dev->stats.tx_errors++;
 		}
-		spin_unlock_irqrestore(&port->vio.lock, flags);
+		rcu_read_unlock();
 		return NETDEV_TX_BUSY;
 	}
 
@@ -986,7 +1049,7 @@
 			     (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
 	if (err < 0) {
 		netdev_info(dev, "tx buffer map error %d\n", err);
-		goto out_dropped_unlock;
+		goto out_dropped;
 	}
 	port->tx_bufs[txi].ncookies = err;
 
@@ -1039,7 +1102,7 @@
 		netdev_info(dev, "TX trigger error %d\n", err);
 		d->hdr.state = VIO_DESC_FREE;
 		dev->stats.tx_carrier_errors++;
-		goto out_dropped_unlock;
+		goto out_dropped;
 	}
 
 ldc_start_done:
@@ -1050,31 +1113,29 @@
 
 	dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
 	if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
-		netif_stop_queue(dev);
+		netif_tx_stop_queue(txq);
 		if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
-			netif_wake_queue(dev);
+			netif_tx_wake_queue(txq);
 	}
 
-	spin_unlock_irqrestore(&port->vio.lock, flags);
+	(void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
+	rcu_read_unlock();
 
 	vnet_free_skbs(freeskbs);
 
-	(void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
-
 	return NETDEV_TX_OK;
 
-out_dropped_unlock:
-	spin_unlock_irqrestore(&port->vio.lock, flags);
-
 out_dropped:
-	if (skb)
-		dev_kfree_skb(skb);
-	vnet_free_skbs(freeskbs);
 	if (pending)
 		(void)mod_timer(&port->clean_timer,
 				jiffies + VNET_CLEAN_TIMEOUT);
 	else if (port)
 		del_timer(&port->clean_timer);
+	if (port)
+		rcu_read_unlock();
+	if (skb)
+		dev_kfree_skb(skb);
+	vnet_free_skbs(freeskbs);
 	dev->stats.tx_dropped++;
 	return NETDEV_TX_OK;
 }
@@ -1087,14 +1148,14 @@
 static int vnet_open(struct net_device *dev)
 {
 	netif_carrier_on(dev);
-	netif_start_queue(dev);
+	netif_tx_start_all_queues(dev);
 
 	return 0;
 }
 
 static int vnet_close(struct net_device *dev)
 {
-	netif_stop_queue(dev);
+	netif_tx_stop_all_queues(dev);
 	netif_carrier_off(dev);
 
 	return 0;
@@ -1204,18 +1265,17 @@
 {
 	struct vnet *vp = netdev_priv(dev);
 	struct vnet_port *port;
-	unsigned long flags;
 
-	spin_lock_irqsave(&vp->lock, flags);
-	if (!list_empty(&vp->port_list)) {
-		port = list_entry(vp->port_list.next, struct vnet_port, list);
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &vp->port_list, list) {
 
 		if (port->switch_port) {
 			__update_mc_list(vp, dev);
 			__send_mc_list(vp, port);
+			break;
 		}
 	}
-	spin_unlock_irqrestore(&vp->lock, flags);
+	rcu_read_unlock();
 }
 
 static int vnet_change_mtu(struct net_device *dev, int new_mtu)
@@ -1342,6 +1402,21 @@
 	return err;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vnet_poll_controller(struct net_device *dev)
+{
+	struct vnet *vp = netdev_priv(dev);
+	struct vnet_port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vp->lock, flags);
+	if (!list_empty(&vp->port_list)) {
+		port = list_entry(vp->port_list.next, struct vnet_port, list);
+		napi_schedule(&port->napi);
+	}
+	spin_unlock_irqrestore(&vp->lock, flags);
+}
+#endif
 static LIST_HEAD(vnet_list);
 static DEFINE_MUTEX(vnet_list_mutex);
 
@@ -1354,6 +1429,10 @@
 	.ndo_tx_timeout		= vnet_tx_timeout,
 	.ndo_change_mtu		= vnet_change_mtu,
 	.ndo_start_xmit		= vnet_start_xmit,
+	.ndo_select_queue	= vnet_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= vnet_poll_controller,
+#endif
 };
 
 static struct vnet *vnet_new(const u64 *local_mac)
@@ -1362,7 +1441,7 @@
 	struct vnet *vp;
 	int err, i;
 
-	dev = alloc_etherdev(sizeof(*vp));
+	dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
 	if (!dev)
 		return ERR_PTR(-ENOMEM);
 	dev->needed_headroom = VNET_PACKET_SKIP + 8;
@@ -1374,7 +1453,6 @@
 	vp = netdev_priv(dev);
 
 	spin_lock_init(&vp->lock);
-	tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
 	vp->dev = dev;
 
 	INIT_LIST_HEAD(&vp->port_list);
@@ -1434,7 +1512,6 @@
 		vp = list_first_entry(&vnet_list, struct vnet, list);
 		list_del(&vp->list);
 		dev = vp->dev;
-		tasklet_kill(&vp->vnet_tx_wakeup);
 		/* vio_unregister_driver() should have cleaned up port_list */
 		BUG_ON(!list_empty(&vp->port_list));
 		unregister_netdev(dev);
@@ -1489,6 +1566,25 @@
 
 const char *remote_macaddr_prop = "remote-mac-address";
 
+static void
+vnet_port_add_txq(struct vnet_port *port)
+{
+	struct vnet *vp = port->vp;
+	int n;
+
+	n = vp->nports++;
+	n = n & (VNET_MAX_TXQS - 1);
+	port->q_index = n;
+	netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
+}
+
+static void
+vnet_port_rm_txq(struct vnet_port *port)
+{
+	port->vp->nports--;
+	netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
+}
+
 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 {
 	struct mdesc_handle *hp;
@@ -1536,6 +1632,8 @@
 	if (err)
 		goto err_out_free_port;
 
+	netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
+
 	err = vnet_port_alloc_tx_bufs(port);
 	if (err)
 		goto err_out_free_ldc;
@@ -1550,10 +1648,12 @@
 
 	spin_lock_irqsave(&vp->lock, flags);
 	if (switch_port)
-		list_add(&port->list, &vp->port_list);
+		list_add_rcu(&port->list, &vp->port_list);
 	else
-		list_add_tail(&port->list, &vp->port_list);
-	hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+		list_add_tail_rcu(&port->list, &vp->port_list);
+	hlist_add_head_rcu(&port->hash,
+			   &vp->port_hash[vnet_hashfn(port->raddr)]);
+	vnet_port_add_txq(port);
 	spin_unlock_irqrestore(&vp->lock, flags);
 
 	dev_set_drvdata(&vdev->dev, port);
@@ -1564,6 +1664,7 @@
 	setup_timer(&port->clean_timer, vnet_clean_timer_expire,
 		    (unsigned long)port);
 
+	napi_enable(&port->napi);
 	vio_port_up(&port->vio);
 
 	mdesc_release(hp);
@@ -1571,6 +1672,7 @@
 	return 0;
 
 err_out_free_ldc:
+	netif_napi_del(&port->napi);
 	vio_ldc_free(&port->vio);
 
 err_out_free_port:
@@ -1586,17 +1688,18 @@
 	struct vnet_port *port = dev_get_drvdata(&vdev->dev);
 
 	if (port) {
-		struct vnet *vp = port->vp;
-		unsigned long flags;
 
 		del_timer_sync(&port->vio.timer);
+
+		napi_disable(&port->napi);
+
+		list_del_rcu(&port->list);
+		hlist_del_rcu(&port->hash);
+
+		synchronize_rcu();
 		del_timer_sync(&port->clean_timer);
-
-		spin_lock_irqsave(&vp->lock, flags);
-		list_del(&port->list);
-		hlist_del(&port->hash);
-		spin_unlock_irqrestore(&vp->lock, flags);
-
+		vnet_port_rm_txq(port);
+		netif_napi_del(&port->napi);
 		vnet_port_free_tx_bufs(port);
 		vio_ldc_free(&port->vio);
 
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index c911045..cd5d343 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -56,6 +56,12 @@
 	struct timer_list	clean_timer;
 
 	u64			rmtu;
+
+	struct napi_struct	napi;
+	u32			napi_stop_idx;
+	bool			napi_resume;
+	int			rx_event;
+	u16			q_index;
 };
 
 static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
@@ -97,7 +103,7 @@
 	struct list_head	list;
 	u64			local_mac;
 
-	struct tasklet_struct	vnet_tx_wakeup;
+	int			nports;
 };
 
 #endif /* _SUNVNET_H */
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0f56b1c..70a930a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -638,14 +638,12 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem)
-		return -ENXIO;
-	mem_size = resource_size(mem);
-
 	priv->base = devm_ioremap_resource(&pdev->dev, mem);
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
+	mem_size = resource_size(mem);
+
 	spin_lock_init(&priv->reg_lock);
 	priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
 	if (priv->indirect) {
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index f961f14..7974b7d 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -558,14 +558,12 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem)
-		return -ENXIO;
-	mem_size = resource_size(mem);
-
 	priv->base = devm_ioremap_resource(&pdev->dev, mem);
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
+	mem_size = resource_size(mem);
+
 	spin_lock_init(&priv->reg_lock);
 	priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
 	if (priv->indirect) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index c3c4051..daca0de 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -675,8 +675,7 @@
 	kfree(xbuff);
 	kfree(rbuff);
 
-	if (dev)
-		free_netdev(dev);
+	free_netdev(dev);
 
 out:
 	return err;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 78ec33f..3295e4e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -193,7 +193,9 @@
 	struct flow_keys flow;
 	int data_len;
 
-	if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP))
+	if (!skb_flow_dissect(skb, &flow) ||
+	    !(flow.n_proto == htons(ETH_P_IP) ||
+	      flow.n_proto == htons(ETH_P_IPV6)))
 		return false;
 
 	if (flow.ip_proto == IPPROTO_TCP)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2b86f0b..ccce6f2 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -728,7 +728,8 @@
 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
 	rssp->flag = 0;
 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
-			 NDIS_HASH_TCP_IPV4;
+			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
+			 NDIS_HASH_TCP_IPV6;
 	rssp->indirect_tabsize = 4*ITAB_NUM;
 	rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
 	rssp->hashkey_size = HASH_KEYLEN;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 75472cf7..b4b0f80 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -26,7 +26,7 @@
 
 config AMD_XGBE_PHY
 	tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
-	depends on OF
+	depends on OF && HAS_IOMEM
 	---help---
 	  Currently supports the AMD 10GbE PHY
 
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index c456559..37b9f3f 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -992,7 +992,8 @@
 	if (ret & MDIO_CTRL1_RESET)
 		return -ETIMEDOUT;
 
-	return 0;
+	/* Make sure the XPCS and SerDes are in compatible states */
+	return amd_xgbe_phy_xgmii_mode(phydev);
 }
 
 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 225c033..bb4d780 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -54,6 +54,9 @@
 #define MII_M1145_PHY_EXT_CR		0x14
 #define MII_M1145_RGMII_RX_DELAY	0x0080
 #define MII_M1145_RGMII_TX_DELAY	0x0002
+#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK	0x4
+#define MII_M1145_HWCFG_MODE_MASK		0xf
+#define MII_M1145_HWCFG_FIBER_COPPER_AUTO	0x8000
 
 #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK	0x4
 #define MII_M1145_HWCFG_MODE_MASK		0xf
@@ -123,6 +126,9 @@
 
 #define MII_M1116R_CONTROL_REG_MAC	21
 
+#define MII_88E3016_PHY_SPEC_CTRL	0x10
+#define MII_88E3016_DISABLE_SCRAMBLER	0x0200
+#define MII_88E3016_AUTO_MDIX_CROSSOVER	0x0030
 
 MODULE_DESCRIPTION("Marvell PHY driver");
 MODULE_AUTHOR("Andy Fleming");
@@ -439,6 +445,25 @@
 	return 0;
 }
 
+static int m88e3016_config_init(struct phy_device *phydev)
+{
+	int reg;
+
+	/* Enable Scrambler and Auto-Crossover */
+	reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
+	if (reg < 0)
+		return reg;
+
+	reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
+	reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
+
+	reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
+	if (reg < 0)
+		return reg;
+
+	return 0;
+}
+
 static int m88e1111_config_init(struct phy_device *phydev)
 {
 	int err;
@@ -625,6 +650,7 @@
 static int m88e1145_config_init(struct phy_device *phydev)
 {
 	int err;
+	int temp;
 
 	/* Take care of errata E0 & E1 */
 	err = phy_write(phydev, 0x1d, 0x001b);
@@ -682,7 +708,7 @@
 	}
 
 	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-		int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+		temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
 		if (temp < 0)
 			return temp;
 
@@ -789,6 +815,12 @@
 	return 0;
 }
 
+static int marvell_aneg_done(struct phy_device *phydev)
+{
+	int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
+	return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
+}
+
 static int m88e1121_did_interrupt(struct phy_device *phydev)
 {
 	int imask;
@@ -1069,6 +1101,23 @@
 		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
+	{
+		.phy_id = MARVELL_PHY_ID_88E3016,
+		.phy_id_mask = MARVELL_PHY_ID_MASK,
+		.name = "Marvell 88E3016",
+		.features = PHY_BASIC_FEATURES,
+		.flags = PHY_HAS_INTERRUPT,
+		.config_aneg = &genphy_config_aneg,
+		.config_init = &m88e3016_config_init,
+		.aneg_done = &marvell_aneg_done,
+		.read_status = &marvell_read_status,
+		.ack_interrupt = &marvell_ack_interrupt,
+		.config_intr = &marvell_config_intr,
+		.did_interrupt = &m88e1121_did_interrupt,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
+		.driver = { .owner = THIS_MODULE },
+	},
 };
 
 static int __init marvell_init(void)
@@ -1098,6 +1147,7 @@
 	{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
 	{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
 	{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
+	{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
 	{ }
 };
 
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index eab57fc..4653015 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -353,7 +353,9 @@
 
 static int ks8995_remove(struct spi_device *spi)
 {
-	sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+	struct ks8995_switch *ks = spi_get_drvdata(spi);
+
+	sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
 
 	return 0;
 }
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 6c9c16d..443cbbf 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -981,7 +981,7 @@
 
 	if (skb) {
 		total_len = min_t(size_t, total_len, skb->len);
-		error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+		error = skb_copy_datagram_msg(skb, 0, m, total_len);
 		if (error == 0) {
 			consume_skb(skb);
 			return total_len;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c6554c7..66b139a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -461,11 +461,7 @@
 
 /* Define these values to match your device */
 #define VENDOR_ID_REALTEK		0x0bda
-#define PRODUCT_ID_RTL8152		0x8152
-#define PRODUCT_ID_RTL8153		0x8153
-
 #define VENDOR_ID_SAMSUNG		0x04e8
-#define PRODUCT_ID_SAMSUNG		0xa101
 
 #define MCU_TYPE_PLA			0x0100
 #define MCU_TYPE_USB			0x0000
@@ -486,7 +482,7 @@
 	__le64	rx_broadcast;
 	__le32	rx_multicast;
 	__le16	tx_aborted;
-	__le16	tx_underun;
+	__le16	tx_underrun;
 };
 
 struct rx_desc {
@@ -690,6 +686,9 @@
 		}
 	}
 
+	if (ret == -ENODEV)
+		set_bit(RTL8152_UNPLUG, &tp->flags);
+
 	return ret;
 }
 
@@ -757,6 +756,9 @@
 	}
 
 error1:
+	if (ret == -ENODEV)
+		set_bit(RTL8152_UNPLUG, &tp->flags);
+
 	return ret;
 }
 
@@ -1083,6 +1085,7 @@
 
 	result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
 	if (result == -ENODEV) {
+		set_bit(RTL8152_UNPLUG, &tp->flags);
 		netif_device_detach(tp->netdev);
 	} else if (result) {
 		spin_lock(&tp->rx_lock);
@@ -1190,11 +1193,13 @@
 
 resubmit:
 	res = usb_submit_urb(urb, GFP_ATOMIC);
-	if (res == -ENODEV)
+	if (res == -ENODEV) {
+		set_bit(RTL8152_UNPLUG, &tp->flags);
 		netif_device_detach(tp->netdev);
-	else if (res)
+	} else if (res) {
 		netif_err(tp, intr, tp->netdev,
 			  "can't resubmit intr, status %d\n", res);
+	}
 }
 
 static inline void *rx_agg_align(void *data)
@@ -1758,6 +1763,7 @@
 			struct net_device *netdev = tp->netdev;
 
 			if (res == -ENODEV) {
+				set_bit(RTL8152_UNPLUG, &tp->flags);
 				netif_device_detach(netdev);
 			} else {
 				struct net_device_stats *stats = &netdev->stats;
@@ -2933,6 +2939,8 @@
 		netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
 			   res);
 		free_all_mem(tp);
+	} else {
+		tasklet_enable(&tp->tl);
 	}
 
 	mutex_unlock(&tp->control);
@@ -2948,6 +2956,7 @@
 	struct r8152 *tp = netdev_priv(netdev);
 	int res = 0;
 
+	tasklet_disable(&tp->tl);
 	clear_bit(WORK_ENABLE, &tp->flags);
 	usb_kill_urb(tp->intr_urb);
 	cancel_delayed_work_sync(&tp->schedule);
@@ -2965,9 +2974,7 @@
 		 */
 		rtl_runtime_suspend_enable(tp, false);
 
-		tasklet_disable(&tp->tl);
 		tp->rtl_ops.down(tp);
-		tasklet_enable(&tp->tl);
 
 		mutex_unlock(&tp->control);
 
@@ -3427,7 +3434,7 @@
 	data[9] = le64_to_cpu(tally.rx_broadcast);
 	data[10] = le32_to_cpu(tally.rx_multicast);
 	data[11] = le16_to_cpu(tally.tx_aborted);
-	data[12] = le16_to_cpu(tally.tx_underun);
+	data[12] = le16_to_cpu(tally.tx_underrun);
 }
 
 static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3565,11 +3572,33 @@
 	return ret;
 }
 
+static int rtl8152_nway_reset(struct net_device *dev)
+{
+	struct r8152 *tp = netdev_priv(dev);
+	int ret;
+
+	ret = usb_autopm_get_interface(tp->intf);
+	if (ret < 0)
+		goto out;
+
+	mutex_lock(&tp->control);
+
+	ret = mii_nway_restart(&tp->mii);
+
+	mutex_unlock(&tp->control);
+
+	usb_autopm_put_interface(tp->intf);
+
+out:
+	return ret;
+}
+
 static struct ethtool_ops ops = {
 	.get_drvinfo = rtl8152_get_drvinfo,
 	.get_settings = rtl8152_get_settings,
 	.set_settings = rtl8152_set_settings,
 	.get_link = ethtool_op_get_link,
+	.nway_reset = rtl8152_nway_reset,
 	.get_msglevel = rtl8152_get_msglevel,
 	.set_msglevel = rtl8152_set_msglevel,
 	.get_wol = rtl8152_get_wol,
@@ -3709,66 +3738,43 @@
 	r8153_power_cut_en(tp, false);
 }
 
-static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+static int rtl_ops_init(struct r8152 *tp)
 {
 	struct rtl_ops *ops = &tp->rtl_ops;
-	int ret = -ENODEV;
+	int ret = 0;
 
-	switch (id->idVendor) {
-	case VENDOR_ID_REALTEK:
-		switch (id->idProduct) {
-		case PRODUCT_ID_RTL8152:
-			ops->init		= r8152b_init;
-			ops->enable		= rtl8152_enable;
-			ops->disable		= rtl8152_disable;
-			ops->up			= rtl8152_up;
-			ops->down		= rtl8152_down;
-			ops->unload		= rtl8152_unload;
-			ops->eee_get		= r8152_get_eee;
-			ops->eee_set		= r8152_set_eee;
-			ret = 0;
-			break;
-		case PRODUCT_ID_RTL8153:
-			ops->init		= r8153_init;
-			ops->enable		= rtl8153_enable;
-			ops->disable		= rtl8153_disable;
-			ops->up			= rtl8153_up;
-			ops->down		= rtl8153_down;
-			ops->unload		= rtl8153_unload;
-			ops->eee_get		= r8153_get_eee;
-			ops->eee_set		= r8153_set_eee;
-			ret = 0;
-			break;
-		default:
-			break;
-		}
+	switch (tp->version) {
+	case RTL_VER_01:
+	case RTL_VER_02:
+		ops->init		= r8152b_init;
+		ops->enable		= rtl8152_enable;
+		ops->disable		= rtl8152_disable;
+		ops->up			= rtl8152_up;
+		ops->down		= rtl8152_down;
+		ops->unload		= rtl8152_unload;
+		ops->eee_get		= r8152_get_eee;
+		ops->eee_set		= r8152_set_eee;
 		break;
 
-	case VENDOR_ID_SAMSUNG:
-		switch (id->idProduct) {
-		case PRODUCT_ID_SAMSUNG:
-			ops->init		= r8153_init;
-			ops->enable		= rtl8153_enable;
-			ops->disable		= rtl8153_disable;
-			ops->up			= rtl8153_up;
-			ops->down		= rtl8153_down;
-			ops->unload		= rtl8153_unload;
-			ops->eee_get		= r8153_get_eee;
-			ops->eee_set		= r8153_set_eee;
-			ret = 0;
-			break;
-		default:
-			break;
-		}
+	case RTL_VER_03:
+	case RTL_VER_04:
+	case RTL_VER_05:
+		ops->init		= r8153_init;
+		ops->enable		= rtl8153_enable;
+		ops->disable		= rtl8153_disable;
+		ops->up			= rtl8153_up;
+		ops->down		= rtl8153_down;
+		ops->unload		= rtl8153_unload;
+		ops->eee_get		= r8153_get_eee;
+		ops->eee_set		= r8153_set_eee;
 		break;
 
 	default:
+		ret = -ENODEV;
+		netif_err(tp, probe, tp->netdev, "Unknown Device\n");
 		break;
 	}
 
-	if (ret)
-		netif_err(tp, probe, tp->netdev, "Unknown Device\n");
-
 	return ret;
 }
 
@@ -3800,7 +3806,8 @@
 	tp->netdev = netdev;
 	tp->intf = intf;
 
-	ret = rtl_ops_init(tp, id);
+	r8152b_get_version(tp);
+	ret = rtl_ops_init(tp);
 	if (ret)
 		goto out;
 
@@ -3833,11 +3840,9 @@
 	tp->mii.phy_id_mask = 0x3f;
 	tp->mii.reg_num_mask = 0x1f;
 	tp->mii.phy_id = R8152_PHY_ID;
-	tp->mii.supports_gmii = 0;
 
 	intf->needs_remote_wakeup = 1;
 
-	r8152b_get_version(tp);
 	tp->rtl_ops.init(tp);
 	set_ethernet_addr(tp);
 
@@ -3855,12 +3860,15 @@
 	else
 		device_set_wakeup_enable(&udev->dev, false);
 
+	tasklet_disable(&tp->tl);
+
 	netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
 
 	return 0;
 
 out1:
 	usb_set_intfdata(intf, NULL);
+	tasklet_kill(&tp->tl);
 out:
 	free_netdev(netdev);
 	return ret;
@@ -3886,9 +3894,9 @@
 
 /* table of devices that work with this driver */
 static struct usb_device_id rtl8152_table[] = {
-	{USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
-	{USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
-	{USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
+	{USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
+	{USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+	{USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
 	{}
 };
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca30982..0ab4114 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2233,6 +2233,9 @@
 	[IFLA_VXLAN_L2MISS]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_L3MISS]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_PORT]	= { .type = NLA_U16 },
+	[IFLA_VXLAN_UDP_CSUM]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]	= { .type = NLA_U8 },
 };
 
 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 895fe84..a6a32d3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -235,10 +235,10 @@
 
 	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
 		queue = &vif->queues[queue_index];
-		napi_disable(&queue->napi);
 		disable_irq(queue->tx_irq);
 		if (queue->tx_irq != queue->rx_irq)
 			disable_irq(queue->rx_irq);
+		napi_disable(&queue->napi);
 		del_timer_sync(&queue->credit_timeout);
 	}
 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6563f07..4a509f7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -82,6 +82,16 @@
 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
 module_param(fatal_skb_slots, uint, 0444);
 
+/* The amount to copy out of the first guest Tx slot into the skb's
+ * linear area.  If the first slot has more data, it will be mapped
+ * and put into the first frag.
+ *
+ * This is sized to avoid pulling headers from the frags for most
+ * TCP/IP packets.
+ */
+#define XEN_NETBACK_TX_COPY_LEN 128
+
+
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 			       u8 status);
 
@@ -125,13 +135,6 @@
 			    pending_tx_info[0]);
 }
 
-/* This is a miniumum size for the linear area to avoid lots of
- * calls to __pskb_pull_tail() as we set up checksum offsets. The
- * value 128 was chosen as it covers all IPv4 and most likely
- * IPv6 headers.
- */
-#define PKT_PROT_LEN 128
-
 static u16 frag_get_pending_idx(skb_frag_t *frag)
 {
 	return (u16)frag->page_offset;
@@ -1446,9 +1449,9 @@
 		index = pending_index(queue->pending_cons);
 		pending_idx = queue->pending_ring[index];
 
-		data_len = (txreq.size > PKT_PROT_LEN &&
+		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
 			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
-			PKT_PROT_LEN : txreq.size;
+			XEN_NETBACK_TX_COPY_LEN : txreq.size;
 
 		skb = xenvif_alloc_skb(data_len);
 		if (unlikely(skb == NULL)) {
@@ -1550,7 +1553,7 @@
 		unsigned int len;
 
 		BUG_ON(i >= MAX_SKB_FRAGS);
-		page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+		page = alloc_page(GFP_ATOMIC);
 		if (!page) {
 			int j;
 			skb->truesize += skb->data_len;
@@ -1653,11 +1656,6 @@
 			}
 		}
 
-		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
-			int target = min_t(int, skb->len, PKT_PROT_LEN);
-			__pskb_pull_tail(skb, target - skb_headlen(skb));
-		}
-
 		skb->dev      = queue->vif->dev;
 		skb->protocol = eth_type_trans(skb, skb->dev);
 		skb_reset_network_header(skb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cca8713..88a70f5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -77,7 +77,9 @@
 
 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
+
+/* Minimum number of Rx slots (includes slot for GSO metadata). */
+#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
 
 /* Queue name is interface name with "-qNNN" appended */
 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
@@ -137,13 +139,6 @@
 	struct xen_netif_rx_front_ring rx;
 	int rx_ring_ref;
 
-	/* Receive-ring batched refills. */
-#define RX_MIN_TARGET 8
-#define RX_DFL_MIN_TARGET 64
-#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
-	unsigned rx_min_target, rx_max_target, rx_target;
-	struct sk_buff_head rx_batch;
-
 	struct timer_list rx_refill_timer;
 
 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
@@ -251,7 +246,7 @@
 static int netfront_tx_slot_available(struct netfront_queue *queue)
 {
 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
-		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
+		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
 }
 
 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -265,77 +260,55 @@
 		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 }
 
-static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+
+static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 {
-	unsigned short id;
 	struct sk_buff *skb;
 	struct page *page;
-	int i, batch_target, notify;
+
+	skb = __netdev_alloc_skb(queue->info->netdev,
+				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
+				 GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!skb))
+		return NULL;
+
+	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+	if (!page) {
+		kfree_skb(skb);
+		return NULL;
+	}
+	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+
+	/* Align ip header to a 16 bytes boundary */
+	skb_reserve(skb, NET_IP_ALIGN);
+	skb->dev = queue->info->netdev;
+
+	return skb;
+}
+
+
+static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+{
 	RING_IDX req_prod = queue->rx.req_prod_pvt;
-	grant_ref_t ref;
-	unsigned long pfn;
-	void *vaddr;
-	struct xen_netif_rx_request *req;
+	int notify;
 
 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 		return;
 
-	/*
-	 * Allocate skbuffs greedily, even though we batch updates to the
-	 * receive ring. This creates a less bursty demand on the memory
-	 * allocator, so should reduce the chance of failed allocation requests
-	 * both for ourself and for other kernel subsystems.
-	 */
-	batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
-	for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
-		skb = __netdev_alloc_skb(queue->info->netdev,
-					 RX_COPY_THRESHOLD + NET_IP_ALIGN,
-					 GFP_ATOMIC | __GFP_NOWARN);
-		if (unlikely(!skb))
-			goto no_skb;
+	for (req_prod = queue->rx.req_prod_pvt;
+	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
+	     req_prod++) {
+		struct sk_buff *skb;
+		unsigned short id;
+		grant_ref_t ref;
+		unsigned long pfn;
+		struct xen_netif_rx_request *req;
 
-		/* Align ip header to a 16 bytes boundary */
-		skb_reserve(skb, NET_IP_ALIGN);
-
-		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-		if (!page) {
-			kfree_skb(skb);
-no_skb:
-			/* Could not allocate any skbuffs. Try again later. */
-			mod_timer(&queue->rx_refill_timer,
-				  jiffies + (HZ/10));
-
-			/* Any skbuffs queued for refill? Force them out. */
-			if (i != 0)
-				goto refill;
-			break;
-		}
-
-		skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
-		__skb_queue_tail(&queue->rx_batch, skb);
-	}
-
-	/* Is the batch large enough to be worthwhile? */
-	if (i < (queue->rx_target/2)) {
-		if (req_prod > queue->rx.sring->req_prod)
-			goto push;
-		return;
-	}
-
-	/* Adjust our fill target if we risked running out of buffers. */
-	if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
-	    ((queue->rx_target *= 2) > queue->rx_max_target))
-		queue->rx_target = queue->rx_max_target;
-
- refill:
-	for (i = 0; ; i++) {
-		skb = __skb_dequeue(&queue->rx_batch);
-		if (skb == NULL)
+		skb = xennet_alloc_one_rx_buffer(queue);
+		if (!skb)
 			break;
 
-		skb->dev = queue->info->netdev;
-
-		id = xennet_rxidx(req_prod + i);
+		id = xennet_rxidx(req_prod);
 
 		BUG_ON(queue->rx_skbs[id]);
 		queue->rx_skbs[id] = skb;
@@ -345,9 +318,8 @@
 		queue->grant_rx_ref[id] = ref;
 
 		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
-		vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 
-		req = RING_GET_REQUEST(&queue->rx, req_prod + i);
+		req = RING_GET_REQUEST(&queue->rx, req_prod);
 		gnttab_grant_foreign_access_ref(ref,
 						queue->info->xbdev->otherend_id,
 						pfn_to_mfn(pfn),
@@ -357,11 +329,16 @@
 		req->gref = ref;
 	}
 
+	queue->rx.req_prod_pvt = req_prod;
+
+	/* Not enough requests? Try again later. */
+	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
+		return;
+	}
+
 	wmb();		/* barrier so backend seens requests */
 
-	/* Above is a suitable barrier to ensure backend will see requests. */
-	queue->rx.req_prod_pvt = req_prod + i;
- push:
 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 	if (notify)
 		notify_remote_via_irq(queue->rx_irq);
@@ -1070,13 +1047,6 @@
 
 	work_done -= handle_incoming_queue(queue, &rxq);
 
-	/* If we get a callback with very few responses, reduce fill target. */
-	/* NB. Note exponential increase, linear decrease. */
-	if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
-	     ((3*queue->rx_target) / 4)) &&
-	    (--queue->rx_target < queue->rx_min_target))
-		queue->rx_target = queue->rx_min_target;
-
 	xennet_alloc_rx_buffers(queue);
 
 	if (work_done < budget) {
@@ -1643,11 +1613,6 @@
 	spin_lock_init(&queue->tx_lock);
 	spin_lock_init(&queue->rx_lock);
 
-	skb_queue_head_init(&queue->rx_batch);
-	queue->rx_target     = RX_DFL_MIN_TARGET;
-	queue->rx_min_target = RX_DFL_MIN_TARGET;
-	queue->rx_max_target = RX_MAX_TARGET;
-
 	init_timer(&queue->rx_refill_timer);
 	queue->rx_refill_timer.data = (unsigned long)queue;
 	queue->rx_refill_timer.function = rx_refill_timeout;
@@ -1670,7 +1635,7 @@
 	}
 
 	/* A grant for every tx ring slot */
-	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
 					  &queue->gref_tx_head) < 0) {
 		pr_alert("can't alloc tx grant refs\n");
 		err = -ENOMEM;
@@ -1678,7 +1643,7 @@
 	}
 
 	/* A grant for every rx ring slot */
-	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
 					  &queue->gref_rx_head) < 0) {
 		pr_alert("can't alloc rx grant refs\n");
 		err = -ENOMEM;
@@ -2146,30 +2111,18 @@
 };
 
 #ifdef CONFIG_SYSFS
-static ssize_t show_rxbuf_min(struct device *dev,
-			      struct device_attribute *attr, char *buf)
+static ssize_t show_rxbuf(struct device *dev,
+			  struct device_attribute *attr, char *buf)
 {
-	struct net_device *netdev = to_net_dev(dev);
-	struct netfront_info *info = netdev_priv(netdev);
-	unsigned int num_queues = netdev->real_num_tx_queues;
-
-	if (num_queues)
-		return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
-	else
-		return sprintf(buf, "%u\n", RX_MIN_TARGET);
+	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
 }
 
-static ssize_t store_rxbuf_min(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t len)
+static ssize_t store_rxbuf(struct device *dev,
+			   struct device_attribute *attr,
+			   const char *buf, size_t len)
 {
-	struct net_device *netdev = to_net_dev(dev);
-	struct netfront_info *np = netdev_priv(netdev);
-	unsigned int num_queues = netdev->real_num_tx_queues;
 	char *endp;
 	unsigned long target;
-	unsigned int i;
-	struct netfront_queue *queue;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -2178,97 +2131,15 @@
 	if (endp == buf)
 		return -EBADMSG;
 
-	if (target < RX_MIN_TARGET)
-		target = RX_MIN_TARGET;
-	if (target > RX_MAX_TARGET)
-		target = RX_MAX_TARGET;
+	/* rxbuf_min and rxbuf_max are no longer configurable. */
 
-	for (i = 0; i < num_queues; ++i) {
-		queue = &np->queues[i];
-		spin_lock_bh(&queue->rx_lock);
-		if (target > queue->rx_max_target)
-			queue->rx_max_target = target;
-		queue->rx_min_target = target;
-		if (target > queue->rx_target)
-			queue->rx_target = target;
-
-		xennet_alloc_rx_buffers(queue);
-
-		spin_unlock_bh(&queue->rx_lock);
-	}
 	return len;
 }
 
-static ssize_t show_rxbuf_max(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct net_device *netdev = to_net_dev(dev);
-	struct netfront_info *info = netdev_priv(netdev);
-	unsigned int num_queues = netdev->real_num_tx_queues;
-
-	if (num_queues)
-		return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
-	else
-		return sprintf(buf, "%u\n", RX_MAX_TARGET);
-}
-
-static ssize_t store_rxbuf_max(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t len)
-{
-	struct net_device *netdev = to_net_dev(dev);
-	struct netfront_info *np = netdev_priv(netdev);
-	unsigned int num_queues = netdev->real_num_tx_queues;
-	char *endp;
-	unsigned long target;
-	unsigned int i = 0;
-	struct netfront_queue *queue = NULL;
-
-	if (!capable(CAP_NET_ADMIN))
-		return -EPERM;
-
-	target = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
-		return -EBADMSG;
-
-	if (target < RX_MIN_TARGET)
-		target = RX_MIN_TARGET;
-	if (target > RX_MAX_TARGET)
-		target = RX_MAX_TARGET;
-
-	for (i = 0; i < num_queues; ++i) {
-		queue = &np->queues[i];
-		spin_lock_bh(&queue->rx_lock);
-		if (target < queue->rx_min_target)
-			queue->rx_min_target = target;
-		queue->rx_max_target = target;
-		if (target < queue->rx_target)
-			queue->rx_target = target;
-
-		xennet_alloc_rx_buffers(queue);
-
-		spin_unlock_bh(&queue->rx_lock);
-	}
-	return len;
-}
-
-static ssize_t show_rxbuf_cur(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct net_device *netdev = to_net_dev(dev);
-	struct netfront_info *info = netdev_priv(netdev);
-	unsigned int num_queues = netdev->real_num_tx_queues;
-
-	if (num_queues)
-		return sprintf(buf, "%u\n", info->queues[0].rx_target);
-	else
-		return sprintf(buf, "0\n");
-}
-
 static struct device_attribute xennet_attrs[] = {
-	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
-	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
-	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
+	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
+	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
 };
 
 static int xennet_sysfs_addif(struct net_device *netdev)
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 8b3f559..f1b5111 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -71,7 +71,7 @@
 config QETH
 	def_tristate y
 	prompt "Gigabit Ethernet device support"
-	depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
+	depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
 	help
 	  This driver supports the IBM System z OSA Express adapters
 	  in QDIO mode (all media types), HiperSockets interfaces and z/VM
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 6bcfbbb..47773c4 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -44,8 +44,8 @@
 		return -ENODEV;
 	}
 
-	rc = sscanf(buf, "%u", &bs1);
-	if (rc != 1)
+	rc = kstrtouint(buf, 0, &bs1);
+	if (rc)
 		goto einval;
 	if (bs1 > CTCM_BUFSIZE_LIMIT)
 					goto einval;
@@ -151,8 +151,8 @@
 
 	if (!priv)
 		return -ENODEV;
-	rc = sscanf(buf, "%d", &value);
-	if ((rc != 1) ||
+	rc = kstrtoint(buf, 0, &value);
+	if (rc ||
 	    !((value == CTCM_PROTO_S390)  ||
 	      (value == CTCM_PROTO_LINUX) ||
 	      (value == CTCM_PROTO_MPC) ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0a7d87c..92190aa 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1943,15 +1943,16 @@
 lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
         struct lcs_card *card;
-	int value, rc;
+	int rc;
+	s16 value;
 
 	card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
 
-	rc = sscanf(buf, "%d", &value);
-	if (rc != 1)
+	rc = kstrtos16(buf, 0, &value);
+	if (rc)
 		return -EINVAL;
         /* TODO: sanity checks */
         card->portno = value;
@@ -2007,8 +2008,8 @@
         if (!card)
                 return 0;
 
-	rc = sscanf(buf, "%u", &value);
-	if (rc != 1)
+	rc = kstrtouint(buf, 0, &value);
+	if (rc)
 		return -EINVAL;
         /* TODO: sanity checks */
         card->lancmd_timeout = value;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7646ce..7a8bb9f 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -380,11 +380,6 @@
 #define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
 #define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/
 
-static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
-{
-	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
-}
-
 enum qeth_qdio_buffer_states {
 	/*
 	 * inbound: read out by driver; owned by hardware in order to be filled
@@ -843,13 +838,6 @@
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
-static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
-{
-	struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
-		dev_get_drvdata(&cdev->dev))->dev);
-	return card;
-}
-
 static inline int qeth_get_micros(void)
 {
 	return (int) (get_tod_clock() >> 12);
@@ -894,7 +882,6 @@
 int qeth_realloc_buffer_pool(struct qeth_card *, int);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
-void qeth_buffer_reclaim_work(struct work_struct *);
 
 /* exports for qeth discipline device drivers */
 extern struct qeth_card_list_struct qeth_core_card_list;
@@ -913,7 +900,6 @@
 void qeth_print_status_message(struct qeth_card *);
 int qeth_init_qdio_queues(struct qeth_card *);
 int qeth_send_startlan(struct qeth_card *);
-int qeth_send_stoplan(struct qeth_card *);
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
 		  int (*reply_cb)
 		  (struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -954,8 +940,6 @@
 int qeth_query_oat_command(struct qeth_card *, char __user *);
 int qeth_query_switch_attributes(struct qeth_card *card,
 				  struct qeth_switch_info *sw_info);
-int qeth_query_card_info(struct qeth_card *card,
-	struct carrier_info *carrier_info);
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
 	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
 	void *reply_param);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fd22c81..f407e37 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -718,6 +718,13 @@
 	return 0;
 }
 
+static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
+		dev_get_drvdata(&cdev->dev))->dev);
+	return card;
+}
+
 static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
 		__u32 len)
 {
@@ -1431,6 +1438,7 @@
 	}
 }
 
+static void qeth_buffer_reclaim_work(struct work_struct *);
 static int qeth_setup_card(struct qeth_card *card)
 {
 
@@ -3232,7 +3240,7 @@
 }
 EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
 
-void qeth_buffer_reclaim_work(struct work_struct *work)
+static void qeth_buffer_reclaim_work(struct work_struct *work)
 {
 	struct qeth_card *card = container_of(work, struct qeth_card,
 		buffer_reclaim_work.work);
@@ -4126,7 +4134,7 @@
 
 	qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
 	if (cmd->hdr.return_code) {
-		QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
 		setparms->data.mode = SET_PROMISC_MODE_OFF;
 	}
 	card->info.promisc_mode = setparms->data.mode;
@@ -4493,13 +4501,13 @@
 	snmp = &cmd->data.setadapterparms.data.snmp;
 
 	if (cmd->hdr.return_code) {
-		QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
 		return 0;
 	}
 	if (cmd->data.setadapterparms.hdr.return_code) {
 		cmd->hdr.return_code =
 			cmd->data.setadapterparms.hdr.return_code;
-		QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
 		return 0;
 	}
 	data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
@@ -4717,7 +4725,7 @@
 	return 0;
 }
 
-int qeth_query_card_info(struct qeth_card *card,
+static int qeth_query_card_info(struct qeth_card *card,
 				struct carrier_info *carrier_info)
 {
 	struct qeth_cmd_buffer *iob;
@@ -4730,7 +4738,6 @@
 	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
 					(void *)carrier_info);
 }
-EXPORT_SYMBOL_GPL(qeth_query_card_info);
 
 static inline int qeth_get_qdio_q_format(struct qeth_card *card)
 {
@@ -5113,6 +5120,11 @@
 	return 0;
 }
 
+static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
+{
+	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
+}
+
 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
 		struct qeth_qdio_buffer *qethbuffer,
 		struct qdio_buffer_element **__element, int *__offset,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c2679bf..d02cd1a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1512,7 +1512,7 @@
 
 	QETH_CARD_TEXT(card, 2, "brstchng");
 	if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
-		QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length);
+		QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
 		return;
 	}
 	extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 29c1c00..551a4b4 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -42,10 +42,6 @@
 };
 
 
-void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
-int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
-void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
-int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
 int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
 int qeth_l3_create_device_attributes(struct device *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index afebb97..625227ad 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -55,12 +55,12 @@
 	return 1;
 }
 
-void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
+static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
 {
 	sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
 }
 
-int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
+static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
 {
 	int count = 0, rc = 0;
 	unsigned int in[4];
@@ -78,12 +78,12 @@
 	return 0;
 }
 
-void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
+static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
 {
 	sprintf(buf, "%pI6", addr);
 }
 
-int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
+static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
 {
 	const char *end, *end_tmp, *start;
 	__u16 *in;
@@ -2502,7 +2502,7 @@
 			rc = -EFAULT;
 			goto free_and_out;
 		}
-		QETH_CARD_TEXT_(card, 4, "qacts");
+		QETH_CARD_TEXT(card, 4, "qacts");
 	}
 free_and_out:
 	kfree(qinfo.udata);
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
index b631284..3c82760 100644
--- a/include/asm-generic/hash.h
+++ b/include/asm-generic/hash.h
@@ -1,9 +1,41 @@
 #ifndef __ASM_GENERIC_HASH_H
 #define __ASM_GENERIC_HASH_H
 
-struct fast_hash_ops;
-static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
+#include <linux/jhash.h>
+
+/**
+ *	arch_fast_hash - Caclulates a hash over a given buffer that can have
+ *			 arbitrary size. This function will eventually use an
+ *			 architecture-optimized hashing implementation if
+ *			 available, and trades off distribution for speed.
+ *
+ *	@data: buffer to hash
+ *	@len: length of buffer in bytes
+ *	@seed: start seed
+ *
+ *	Returns 32bit hash.
+ */
+static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed)
 {
+	return jhash(data, len, seed);
+}
+
+/**
+ *	arch_fast_hash2 - Caclulates a hash over a given buffer that has a
+ *			  size that is of a multiple of 32bit words. This
+ *			  function will eventually use an architecture-
+ *			  optimized hashing implementation if available,
+ *			  and trades off distribution for speed.
+ *
+ *	@data: buffer to hash (must be 32bit padded)
+ *	@len: number of 32bit words
+ *	@seed: start seed
+ *
+ *	Returns 32bit hash.
+ */
+static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
+{
+	return jhash2(data, len, seed);
 }
 
 #endif /* __ASM_GENERIC_HASH_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c3..6e8fb02 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -84,38 +84,4 @@
 	return (u32)val;
 }
 
-struct fast_hash_ops {
-	u32 (*hash)(const void *data, u32 len, u32 seed);
-	u32 (*hash2)(const u32 *data, u32 len, u32 seed);
-};
-
-/**
- *	arch_fast_hash - Caclulates a hash over a given buffer that can have
- *			 arbitrary size. This function will eventually use an
- *			 architecture-optimized hashing implementation if
- *			 available, and trades off distribution for speed.
- *
- *	@data: buffer to hash
- *	@len: length of buffer in bytes
- *	@seed: start seed
- *
- *	Returns 32bit hash.
- */
-extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
-
-/**
- *	arch_fast_hash2 - Caclulates a hash over a given buffer that has a
- *			  size that is of a multiple of 32bit words. This
- *			  function will eventually use an architecture-
- *			  optimized hashing implementation if available,
- *			  and trades off distribution for speed.
- *
- *	@data: buffer to hash (must be 32bit padded)
- *	@len: number of 32bit words
- *	@seed: start seed
- *
- *	Returns 32bit hash.
- */
-extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
-
 #endif /* _LINUX_HASH_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ff56053..c694e7b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -42,6 +42,7 @@
 	__s32		accept_ra_from_local;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	__s32		optimistic_dad;
+	__s32		use_optimistic;
 #endif
 #ifdef CONFIG_IPV6_MROUTE
 	__s32		mc_forwarding;
@@ -316,14 +317,4 @@
 #define tcp_twsk_ipv6only(__sk)		0
 #define inet_v6_ipv6only(__sk)		0
 #endif /* IS_ENABLED(CONFIG_IPV6) */
-
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)	\
-	(((__sk)->sk_portpair == (__ports))			&&	\
-	 ((__sk)->sk_family == AF_INET6)			&&	\
-	 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))		&&	\
-	 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))	&&	\
-	 (!(__sk)->sk_bound_dev_if	||				\
-	   ((__sk)->sk_bound_dev_if == (__dif))) 		&&	\
-	 net_eq(sock_net(__sk), (__net)))
-
 #endif /* _IPV6_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 8e9a029..e6982ac 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
 #define MARVELL_PHY_ID_88E1318S		0x01410e90
 #define MARVELL_PHY_ID_88E1116R		0x01410e40
 #define MARVELL_PHY_ID_88E1510		0x01410dd0
+#define MARVELL_PHY_ID_88E3016		0x01410e60
 
 /* struct phy_device dev_flags definitions */
 #define MARVELL_PHY_M1145_FLAGS_RESISTANCE	0x00000001
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 379c026..64d2594 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -67,6 +67,8 @@
 	MLX4_CMD_MAP_ICM_AUX	 = 0xffc,
 	MLX4_CMD_UNMAP_ICM_AUX	 = 0xffb,
 	MLX4_CMD_SET_ICM_SIZE	 = 0xffd,
+	MLX4_CMD_ACCESS_REG	 = 0x3b,
+
 	/*master notify fw on finish for slave's flr*/
 	MLX4_CMD_INFORM_FLR_DONE = 0x5b,
 	MLX4_CMD_GET_OP_REQ      = 0x59,
@@ -197,6 +199,33 @@
 	MLX4_CMD_NATIVE
 };
 
+/*
+ * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
+ * Receive checksum value is reported in CQE also for non TCP/UDP packets.
+ *
+ * MLX4_RX_CSUM_MODE_L4 -
+ * L4_CSUM bit in CQE, which indicates whether or not L4 checksum
+ * was validated correctly, is supported.
+ *
+ * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
+ * IP_OK CQE's field is supported also for non TCP/UDP IP packets.
+ *
+ * MLX4_RX_CSUM_MODE_MULTI_VLAN -
+ * Receive Checksum offload is supported for packets with more than 2 vlan headers.
+ */
+enum mlx4_rx_csum_mode {
+	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP		= 1UL << 0,
+	MLX4_RX_CSUM_MODE_L4				= 1UL << 1,
+	MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP		= 1UL << 2,
+	MLX4_RX_CSUM_MODE_MULTI_VLAN			= 1UL << 3
+};
+
+struct mlx4_config_dev_params {
+	u16	vxlan_udp_dport;
+	u8	rx_csum_flags_port_1;
+	u8	rx_csum_flags_port_2;
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
@@ -248,6 +277,8 @@
 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+			      struct mlx4_config_dev_params *params);
 /*
  * mlx4_get_slave_default_vlan -
  * return true if VST ( default vlan)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 37e4404..5cc5eac 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,7 +186,10 @@
 	MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS	= 1LL <<  10,
 	MLX4_DEV_CAP_FLAG2_MAD_DEMUX		= 1LL <<  11,
 	MLX4_DEV_CAP_FLAG2_CQE_STRIDE		= 1LL <<  12,
-	MLX4_DEV_CAP_FLAG2_EQE_STRIDE		= 1LL <<  13
+	MLX4_DEV_CAP_FLAG2_EQE_STRIDE		= 1LL <<  13,
+	MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL        = 1LL <<  14,
+	MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP	= 1LL <<  15,
+	MLX4_DEV_CAP_FLAG2_CONFIG_DEV		= 1LL <<  16
 };
 
 enum {
@@ -379,6 +382,13 @@
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
 			     MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
 
+enum mlx4_module_id {
+	MLX4_MODULE_ID_SFP              = 0x3,
+	MLX4_MODULE_ID_QSFP             = 0xC,
+	MLX4_MODULE_ID_QSFP_PLUS        = 0xD,
+	MLX4_MODULE_ID_QSFP28           = 0x11,
+};
+
 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 {
 	return (major << 32) | (minor << 16) | subminor;
@@ -799,6 +809,26 @@
 	u64			si_guid;
 };
 
+#define MAD_IFC_DATA_SZ 192
+/* MAD IFC Mailbox */
+struct mlx4_mad_ifc {
+	u8	base_version;
+	u8	mgmt_class;
+	u8	class_version;
+	u8	method;
+	__be16	status;
+	__be16	class_specific;
+	__be64	tid;
+	__be16	attr_id;
+	__be16	resv;
+	__be32	attr_mod;
+	__be64	mkey;
+	__be16	dr_slid;
+	__be16	dr_dlid;
+	u8	reserved[28];
+	u8	data[MAD_IFC_DATA_SZ];
+} __packed;
+
 #define mlx4_foreach_port(port, dev, type)				\
 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
 		if ((type) == (dev)->caps.port_mask[(port)])
@@ -1283,10 +1313,50 @@
 			    u64 iova, u64 size, int npages,
 			    int page_shift, struct mlx4_mpt_entry *mpt_entry);
 
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+			 u16 offset, u16 size, u8 *data);
+
 /* Returns true if running in low memory profile (kdump kernel) */
 static inline bool mlx4_low_memory_profile(void)
 {
 	return is_kdump_kernel();
 }
 
+/* ACCESS REG commands */
+enum mlx4_access_reg_method {
+	MLX4_ACCESS_REG_QUERY = 0x1,
+	MLX4_ACCESS_REG_WRITE = 0x2,
+};
+
+/* ACCESS PTYS Reg command */
+enum mlx4_ptys_proto {
+	MLX4_PTYS_IB = 1<<0,
+	MLX4_PTYS_EN = 1<<2,
+};
+
+struct mlx4_ptys_reg {
+	u8 resrvd1;
+	u8 local_port;
+	u8 resrvd2;
+	u8 proto_mask;
+	__be32 resrvd3[2];
+	__be32 eth_proto_cap;
+	__be16 ib_width_cap;
+	__be16 ib_speed_cap;
+	__be32 resrvd4;
+	__be32 eth_proto_admin;
+	__be16 ib_width_admin;
+	__be16 ib_speed_admin;
+	__be32 resrvd5;
+	__be32 eth_proto_oper;
+	__be16 ib_width_oper;
+	__be16 ib_speed_oper;
+	__be32 resrvd6;
+	__be32 eth_proto_lp_adv;
+} __packed;
+
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+			 enum mlx4_access_reg_method method,
+			 struct mlx4_ptys_reg *ptys_reg);
+
 #endif /* MLX4_DEVICE_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dcfdecb..8e30685 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -47,9 +47,9 @@
 	NETIF_F_GSO_SIT_BIT,		/* ... SIT tunnel with TSO */
 	NETIF_F_GSO_UDP_TUNNEL_BIT,	/* ... UDP TUNNEL with TSO */
 	NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
-	NETIF_F_GSO_MPLS_BIT,		/* ... MPLS segmentation */
+	NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
 	/**/NETIF_F_GSO_LAST =		/* last bit, see GSO_MASK */
-		NETIF_F_GSO_MPLS_BIT,
+		NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
 
 	NETIF_F_FCOE_CRC_BIT,		/* FCoE CRC32 */
 	NETIF_F_SCTP_CSUM_BIT,		/* SCTP checksum offload */
@@ -118,7 +118,7 @@
 #define NETIF_F_GSO_SIT		__NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL	__NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
-#define NETIF_F_GSO_MPLS	__NETIF_F(GSO_MPLS)
+#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX	__NETIF_F(HW_VLAN_STAG_RX)
 #define NETIF_F_HW_VLAN_STAG_TX	__NETIF_F(HW_VLAN_STAG_TX)
@@ -181,7 +181,6 @@
 				 NETIF_F_GSO_IPIP |			\
 				 NETIF_F_GSO_SIT |			\
 				 NETIF_F_GSO_UDP_TUNNEL |		\
-				 NETIF_F_GSO_UDP_TUNNEL_CSUM |		\
-				 NETIF_F_GSO_MPLS)
+				 NETIF_F_GSO_UDP_TUNNEL_CSUM)
 
 #endif	/* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 74fd5d3..90ac959 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -386,6 +386,7 @@
 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
 
 void __napi_schedule(struct napi_struct *n);
+void __napi_schedule_irqoff(struct napi_struct *n);
 
 static inline bool napi_disable_pending(struct napi_struct *n)
 {
@@ -420,6 +421,18 @@
 		__napi_schedule(n);
 }
 
+/**
+ *	napi_schedule_irqoff - schedule NAPI poll
+ *	@n: napi context
+ *
+ * Variant of napi_schedule(), assuming hard irqs are masked.
+ */
+static inline void napi_schedule_irqoff(struct napi_struct *n)
+{
+	if (napi_schedule_prep(n))
+		__napi_schedule_irqoff(n);
+}
+
 /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
 static inline bool napi_reschedule(struct napi_struct *napi)
 {
@@ -2316,10 +2329,7 @@
  * Incoming packets are placed on per-cpu queues
  */
 struct softnet_data {
-	struct Qdisc		*output_queue;
-	struct Qdisc		**output_queue_tailp;
 	struct list_head	poll_list;
-	struct sk_buff		*completion_queue;
 	struct sk_buff_head	process_queue;
 
 	/* stats */
@@ -2327,10 +2337,17 @@
 	unsigned int		time_squeeze;
 	unsigned int		cpu_collision;
 	unsigned int		received_rps;
-
 #ifdef CONFIG_RPS
 	struct softnet_data	*rps_ipi_list;
+#endif
+#ifdef CONFIG_NET_FLOW_LIMIT
+	struct sd_flow_limit __rcu *flow_limit;
+#endif
+	struct Qdisc		*output_queue;
+	struct Qdisc		**output_queue_tailp;
+	struct sk_buff		*completion_queue;
 
+#ifdef CONFIG_RPS
 	/* Elements below can be accessed between CPUs for RPS */
 	struct call_single_data	csd ____cacheline_aligned_in_smp;
 	struct softnet_data	*rps_ipi_next;
@@ -2342,9 +2359,6 @@
 	struct sk_buff_head	input_pkt_queue;
 	struct napi_struct	backlog;
 
-#ifdef CONFIG_NET_FLOW_LIMIT
-	struct sd_flow_limit __rcu *flow_limit;
-#endif
 };
 
 static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -3569,7 +3583,7 @@
 	BUILD_BUG_ON(SKB_GSO_SIT     != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
-	BUILD_BUG_ON(SKB_GSO_MPLS    != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
 
 	return (features & feature) == feature;
 }
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
index 18d75e7..e1ab6e8 100644
--- a/include/linux/pxa168_eth.h
+++ b/include/linux/pxa168_eth.h
@@ -4,6 +4,8 @@
 #ifndef __LINUX_PXA168_ETH_H
 #define __LINUX_PXA168_ETH_H
 
+#include <linux/phy.h>
+
 struct pxa168_eth_platform_data {
 	int	port_number;
 	int	phy_addr;
@@ -13,6 +15,7 @@
 	 */
 	int	speed;		/* 0, SPEED_10, SPEED_100 */
 	int	duplex;		/* DUPLEX_HALF or DUPLEX_FULL */
+	phy_interface_t intf;
 
 	/*
 	 * Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6c8b6f6..53f4f6c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -20,6 +20,8 @@
 #include <linux/time.h>
 #include <linux/bug.h>
 #include <linux/cache.h>
+#include <linux/rbtree.h>
+#include <linux/socket.h>
 
 #include <linux/atomic.h>
 #include <asm/types.h>
@@ -370,8 +372,7 @@
 
 	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
 
-	SKB_GSO_MPLS = 1 << 12,
-
+	SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
 };
 
 #if BITS_PER_LONG > 32
@@ -440,6 +441,7 @@
  *	@next: Next buffer in list
  *	@prev: Previous buffer in list
  *	@tstamp: Time we arrived/left
+ *	@rbnode: RB tree node, alternative to next/prev for netem/tcp
  *	@sk: Socket we are owned by
  *	@dev: Device we arrived on/are leaving by
  *	@cb: Control buffer. Free for use by every layer. Put private vars here
@@ -504,15 +506,19 @@
  */
 
 struct sk_buff {
-	/* These two members must be first. */
-	struct sk_buff		*next;
-	struct sk_buff		*prev;
-
 	union {
-		ktime_t		tstamp;
-		struct skb_mstamp skb_mstamp;
-	};
+		struct {
+			/* These two members must be first. */
+			struct sk_buff		*next;
+			struct sk_buff		*prev;
 
+			union {
+				ktime_t		tstamp;
+				struct skb_mstamp skb_mstamp;
+			};
+		};
+		struct rb_node	rbnode; /* used in netem & tcp stack */
+	};
 	struct sock		*sk;
 	struct net_device	*dev;
 
@@ -597,7 +603,8 @@
 #endif
 	__u8			ipvs_property:1;
 	__u8			inner_protocol_type:1;
-	/* 4 or 6 bit hole */
+	__u8			remcsum_offload:1;
+	/* 3 or 5 bit hole */
 
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
@@ -2631,6 +2638,11 @@
 			   struct poll_table_struct *wait);
 int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
 			    struct iovec *to, int size);
+static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
+					struct msghdr *msg, int size)
+{
+	return skb_copy_datagram_iovec(from, offset, msg->msg_iov, size);
+}
 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
 				     struct iovec *iov);
 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index c2dee7d..f566b85 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -204,10 +204,10 @@
 
 	u16	urg_data;	/* Saved octet of OOB data and control flags */
 	u8	ecn_flags;	/* ECN status bits.			*/
-	u8	reordering;	/* Packet reordering metric.		*/
+	u8	keepalive_probes; /* num of allowed keep alive probes	*/
+	u32	reordering;	/* Packet reordering metric.		*/
 	u32	snd_up;		/* Urgent pointer		*/
 
-	u8	keepalive_probes; /* num of allowed keep alive probes	*/
 /*
  *      Options received (usually on last packet, some only on SYN packets).
  */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b765592..ed3c34b 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -38,6 +38,9 @@
 	struct device	*host_dev;
 	int		sw_addr;
 
+	/* set to size of eeprom if supported by the switch */
+	int		eeprom_len;
+
 	/* Device tree node pointer for this specific switch chip
 	 * used during switch setup in case additional properties
 	 * and resources needs to be used
@@ -139,6 +142,14 @@
 	 */
 	struct device		*master_dev;
 
+#ifdef CONFIG_NET_DSA_HWMON
+	/*
+	 * Hardware monitoring information
+	 */
+	char			hwmon_name[IFNAMSIZ + 8];
+	struct device		*hwmon_dev;
+#endif
+
 	/*
 	 * Slave mii_bus and devices for the individual ports.
 	 */
@@ -242,6 +253,28 @@
 			   struct ethtool_eee *e);
 	int	(*get_eee)(struct dsa_switch *ds, int port,
 			   struct ethtool_eee *e);
+
+#ifdef CONFIG_NET_DSA_HWMON
+	/* Hardware monitoring */
+	int	(*get_temp)(struct dsa_switch *ds, int *temp);
+	int	(*get_temp_limit)(struct dsa_switch *ds, int *temp);
+	int	(*set_temp_limit)(struct dsa_switch *ds, int temp);
+	int	(*get_temp_alarm)(struct dsa_switch *ds, bool *alarm);
+#endif
+
+	/* EEPROM access */
+	int	(*get_eeprom_len)(struct dsa_switch *ds);
+	int	(*get_eeprom)(struct dsa_switch *ds,
+			      struct ethtool_eeprom *eeprom, u8 *data);
+	int	(*set_eeprom)(struct dsa_switch *ds,
+			      struct ethtool_eeprom *eeprom, u8 *data);
+
+	/*
+	 * Register access.
+	 */
+	int	(*get_regs_len)(struct dsa_switch *ds, int port);
+	void	(*get_regs)(struct dsa_switch *ds, int port,
+			    struct ethtool_regs *regs, void *p);
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/fou.h b/include/net/fou.h
new file mode 100644
index 0000000..25b26ff
--- /dev/null
+++ b/include/net/fou.h
@@ -0,0 +1,38 @@
+#ifndef __NET_FOU_H
+#define __NET_FOU_H
+
+#include <linux/skbuff.h>
+
+#include <net/flow.h>
+#include <net/gue.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+		     u8 *protocol, struct flowi4 *fl4);
+int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+		     u8 *protocol, struct flowi4 *fl4);
+
+static size_t fou_encap_hlen(struct ip_tunnel_encap *e)
+{
+	return sizeof(struct udphdr);
+}
+
+static size_t gue_encap_hlen(struct ip_tunnel_encap *e)
+{
+	size_t len;
+	bool need_priv = false;
+
+	len = sizeof(struct udphdr) + sizeof(struct guehdr);
+
+	if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
+		len += GUE_PLEN_REMCSUM;
+		need_priv = true;
+	}
+
+	len += need_priv ? GUE_LEN_PRIV : 0;
+
+	return len;
+}
+
+#endif
diff --git a/include/net/gue.h b/include/net/gue.h
index b6c3327..3f28ec7 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -1,23 +1,116 @@
 #ifndef __NET_GUE_H
 #define __NET_GUE_H
 
+/* Definitions for the GUE header, standard and private flags, lengths
+ * of optional fields are below.
+ *
+ * Diagram of GUE header:
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|C|  Hlen   | Proto/ctype   |        Standard flags       |P|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                                                               |
+ * ~                      Fields (optional)                        ~
+ * |                                                               |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |            Private flags (optional, P bit is set)             |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                                                               |
+ * ~                   Private fields (optional)                   ~
+ * |                                                               |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C bit indicates contol message when set, data message when unset.
+ * For a control message, proto/ctype is interpreted as a type of
+ * control message. For data messages, proto/ctype is the IP protocol
+ * of the next header.
+ *
+ * P bit indicates private flags field is present. The private flags
+ * may refer to options placed after this field.
+ */
+
 struct guehdr {
 	union {
 		struct {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
-			__u8	hlen:4,
-			version:4;
+			__u8	hlen:5,
+				control:1,
+				version:2;
 #elif defined (__BIG_ENDIAN_BITFIELD)
-			__u8	version:4,
-				hlen:4;
+			__u8	version:2,
+				control:1,
+				hlen:5;
 #else
 #error  "Please fix <asm/byteorder.h>"
 #endif
-			__u8    next_hdr;
+			__u8    proto_ctype;
 			__u16   flags;
 		};
 		__u32 word;
 	};
 };
 
+/* Standard flags in GUE header */
+
+#define GUE_FLAG_PRIV	htons(1<<0)	/* Private flags are in options */
+#define GUE_LEN_PRIV	4
+
+#define GUE_FLAGS_ALL	(GUE_FLAG_PRIV)
+
+/* Private flags in the private option extension */
+
+#define GUE_PFLAG_REMCSUM	htonl(1 << 31)
+#define GUE_PLEN_REMCSUM	4
+
+#define GUE_PFLAGS_ALL	(GUE_PFLAG_REMCSUM)
+
+/* Functions to compute options length corresponding to flags.
+ * If we ever have a lot of flags this can be potentially be
+ * converted to a more optimized algorithm (table lookup
+ * for instance).
+ */
+static inline size_t guehdr_flags_len(__be16 flags)
+{
+	return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0);
+}
+
+static inline size_t guehdr_priv_flags_len(__be32 flags)
+{
+	return 0;
+}
+
+/* Validate standard and private flags. Returns non-zero (meaning invalid)
+ * if there is an unknown standard or private flags, or the options length for
+ * the flags exceeds the options length specific in hlen of the GUE header.
+ */
+static inline int validate_gue_flags(struct guehdr *guehdr,
+				     size_t optlen)
+{
+	size_t len;
+	__be32 flags = guehdr->flags;
+
+	if (flags & ~GUE_FLAGS_ALL)
+		return 1;
+
+	len = guehdr_flags_len(flags);
+	if (len > optlen)
+		return 1;
+
+	if (flags & GUE_FLAG_PRIV) {
+		/* Private flags are last four bytes accounted in
+		 * guehdr_flags_len
+		 */
+		flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV);
+
+		if (flags & ~GUE_PFLAGS_ALL)
+			return 1;
+
+		len += guehdr_priv_flags_len(flags);
+		if (len > optlen)
+			return 1;
+	}
+
+	return 0;
+}
+
 #endif
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index d1d2728..9201afe0 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -99,4 +99,14 @@
 			  const struct in6_addr *daddr, const __be16 dport,
 			  const int dif);
 #endif /* IS_ENABLED(CONFIG_IPV6) */
+
+#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)	\
+	(((__sk)->sk_portpair == (__ports))			&&	\
+	 ((__sk)->sk_family == AF_INET6)			&&	\
+	 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))		&&	\
+	 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))	&&	\
+	 (!(__sk)->sk_bound_dev_if	||				\
+	   ((__sk)->sk_bound_dev_if == (__dif))) 		&&	\
+	 net_eq(sock_net(__sk), (__net)))
+
 #endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index a5593da..9326c41 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -65,7 +65,8 @@
 void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
 		const struct in6_addr *raddr);
-int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+		     const struct in6_addr *raddr);
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
 			     const struct in6_addr *raddr);
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 0143180..320f47b 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -42,6 +42,9 @@
 	struct ipx_address	ipx_source __packed;
 };
 
+/* From af_ipx.c */
+extern int sysctl_ipx_pprop_broadcasting;
+
 static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
 {
 	return (struct ipxhdr *)skb_transport_header(skb);
diff --git a/include/net/mpls.h b/include/net/mpls.h
new file mode 100644
index 0000000..5b3b5ad
--- /dev/null
+++ b/include/net/mpls.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_H
+#define _NET_MPLS_H 1
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#define MPLS_HLEN 4
+
+static inline bool eth_p_mpls(__be16 eth_type)
+{
+	return eth_type == htons(ETH_P_MPLS_UC) ||
+		eth_type == htons(ETH_P_MPLS_MC);
+}
+
+/*
+ * For non-MPLS skbs this will correspond to the network header.
+ * For MPLS skbs it will be before the network_header as the MPLS
+ * label stack lies between the end of the mac header and the network
+ * header. That is, for MPLS skbs the end of the mac header
+ * is the top of the MPLS label stack.
+ */
+static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+{
+	return skb_mac_header(skb) + skb->mac_len;
+}
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f60558d..dedfb18 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -69,7 +69,7 @@
 	struct net *net;
 #endif
 	struct net_device *dev;
-	struct neigh_parms *next;
+	struct list_head list;
 	int	(*neigh_setup)(struct neighbour *);
 	void	(*neigh_cleanup)(struct neighbour *);
 	struct neigh_table *tbl;
@@ -203,6 +203,7 @@
 	void			(*proxy_redo)(struct sk_buff *skb);
 	char			*id;
 	struct neigh_parms	parms;
+	struct list_head	parms_list;
 	int			gc_interval;
 	int			gc_thresh1;
 	int			gc_thresh2;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 7b903e1..6415835 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1185,4 +1185,14 @@
 #define nla_for_each_nested(pos, nla, rem) \
 	nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
 
+/**
+ * nla_is_last - Test if attribute is last in stream
+ * @nla: attribute to test
+ * @rem: bytes remaining in stream
+ */
+static inline bool nla_is_last(const struct nlattr *nla, int rem)
+{
+	return nla->nla_len == rem;
+}
+
 #endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4ff3f67..806e3b5 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1116,7 +1116,6 @@
 sctp_scope_t sctp_scope(const union sctp_addr *);
 int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
 int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
-int sctp_addr_is_valid(const union sctp_addr *addr);
 int sctp_is_ep_boundall(struct sock *sk);
 
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 7db3db1..6767d75 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2280,9 +2280,6 @@
  *	Enable debug/info messages
  */
 extern int net_msg_warn;
-#define NETDEBUG(fmt, args...) \
-	do { if (net_msg_warn) printk(fmt,##args); } while (0)
-
 #define LIMIT_NETDEBUG(fmt, args...) \
 	do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4062b4f..f50f29faf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -55,9 +55,9 @@
 #define MAX_TCP_HEADER	(128 + MAX_HEADER)
 #define MAX_TCP_OPTION_SPACE 40
 
-/* 
+/*
  * Never offer a window over 32767 without using window scaling. Some
- * poor stacks do signed 16bit maths! 
+ * poor stacks do signed 16bit maths!
  */
 #define MAX_TCP_WINDOW		32767U
 
@@ -70,9 +70,6 @@
 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
 #define TCP_FASTRETRANS_THRESH 3
 
-/* Maximal reordering. */
-#define TCP_MAX_REORDERING	127
-
 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
 #define TCP_MAX_QUICKACKS	16U
 
@@ -167,7 +164,7 @@
 /*
  *	TCP option
  */
- 
+
 #define TCPOPT_NOP		1	/* Padding */
 #define TCPOPT_EOL		0	/* End of options */
 #define TCPOPT_MSS		2	/* Segment size negotiating */
@@ -252,6 +249,7 @@
 extern int sysctl_tcp_max_orphans;
 extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
+extern int sysctl_tcp_max_reordering;
 extern int sysctl_tcp_dsack;
 extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
@@ -492,17 +490,16 @@
 			      u16 *mssp);
 __u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
 			      __u16 *mss);
-#endif
-
 __u32 cookie_init_timestamp(struct request_sock *req);
-bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
-			    bool *ecn_ok);
+bool cookie_timestamp_decode(struct tcp_options_received *opt);
+bool cookie_ecn_ok(const struct tcp_options_received *opt,
+		   const struct net *net, const struct dst_entry *dst);
 
 /* From net/ipv6/syncookies.c */
 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 		      u32 cookie);
 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
-#ifdef CONFIG_SYN_COOKIES
+
 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 			      const struct tcphdr *th, u16 *mssp);
 __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
@@ -1104,16 +1101,16 @@
 		space - (space>>sysctl_tcp_adv_win_scale);
 }
 
-/* Note: caller must be prepared to deal with negative returns */ 
+/* Note: caller must be prepared to deal with negative returns */
 static inline int tcp_space(const struct sock *sk)
 {
 	return tcp_win_from_space(sk->sk_rcvbuf -
 				  atomic_read(&sk->sk_rmem_alloc));
-} 
+}
 
 static inline int tcp_full_space(const struct sock *sk)
 {
-	return tcp_win_from_space(sk->sk_rcvbuf); 
+	return tcp_win_from_space(sk->sk_rcvbuf);
 }
 
 static inline void tcp_openreq_init(struct request_sock *req,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 99b4305..eb2095b 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1213,6 +1213,10 @@
 #define SUPPORTED_40000baseCR4_Full	(1 << 24)
 #define SUPPORTED_40000baseSR4_Full	(1 << 25)
 #define SUPPORTED_40000baseLR4_Full	(1 << 26)
+#define SUPPORTED_56000baseKR4_Full	(1 << 27)
+#define SUPPORTED_56000baseCR4_Full	(1 << 28)
+#define SUPPORTED_56000baseSR4_Full	(1 << 29)
+#define SUPPORTED_56000baseLR4_Full	(1 << 30)
 
 #define ADVERTISED_10baseT_Half		(1 << 0)
 #define ADVERTISED_10baseT_Full		(1 << 1)
@@ -1241,6 +1245,10 @@
 #define ADVERTISED_40000baseCR4_Full	(1 << 24)
 #define ADVERTISED_40000baseSR4_Full	(1 << 25)
 #define ADVERTISED_40000baseLR4_Full	(1 << 26)
+#define ADVERTISED_56000baseKR4_Full	(1 << 27)
+#define ADVERTISED_56000baseCR4_Full	(1 << 28)
+#define ADVERTISED_56000baseSR4_Full	(1 << 29)
+#define ADVERTISED_56000baseLR4_Full	(1 << 30)
 
 /* The following are all involved in forcing a particular link
  * mode for the device for setting things.  When getting the
@@ -1248,12 +1256,16 @@
  * it was forced up into this mode or autonegotiated.
  */
 
-/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
 #define SPEED_10		10
 #define SPEED_100		100
 #define SPEED_1000		1000
 #define SPEED_2500		2500
 #define SPEED_10000		10000
+#define SPEED_20000		20000
+#define SPEED_40000		40000
+#define SPEED_56000		56000
+
 #define SPEED_UNKNOWN		-1
 
 /* Duplex, half or full. */
@@ -1343,6 +1355,10 @@
 #define ETH_MODULE_SFF_8079_LEN		256
 #define ETH_MODULE_SFF_8472		0x2
 #define ETH_MODULE_SFF_8472_LEN		512
+#define ETH_MODULE_SFF_8636		0x3
+#define ETH_MODULE_SFF_8636_LEN		256
+#define ETH_MODULE_SFF_8436		0x4
+#define ETH_MODULE_SFF_8436_LEN		256
 
 /* Reset flags */
 /* The reset() operation must clear the flags for the components which
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 0bdb77e..7072d83 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -243,6 +243,7 @@
 	IFLA_BRPORT_FAST_LEAVE,	/* multicast fast leave    */
 	IFLA_BRPORT_LEARNING,	/* mac learning */
 	IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
+	IFLA_BRPORT_PROXYARP,	/* proxy ARP */
 	__IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 280d9e0..bd3cc11 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -69,6 +69,7 @@
 
 #define TUNNEL_ENCAP_FLAG_CSUM		(1<<0)
 #define TUNNEL_ENCAP_FLAG_CSUM6		(1<<1)
+#define TUNNEL_ENCAP_FLAG_REMCSUM	(1<<2)
 
 /* SIT-mode i_flags */
 #define	SIT_ISATAP	0x0001
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index efa2666..e863d08 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -164,6 +164,7 @@
 	DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
 	DEVCONF_SUPPRESS_FRAG_NDISC,
 	DEVCONF_ACCEPT_RA_FROM_LOCAL,
+	DEVCONF_USE_OPTIMISTIC,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 435eabc..26c36c4 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -293,6 +293,9 @@
 	OVS_KEY_ATTR_DP_HASH,      /* u32 hash value. Value 0 indicates the hash
 				   is not computed by the datapath. */
 	OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
+	OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
+				 * The implementation may restrict
+				 * the accepted length of the array. */
 
 #ifdef __KERNEL__
 	OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ovs_tunnel_info */
@@ -340,6 +343,10 @@
 	__u8	 eth_dst[ETH_ALEN];
 };
 
+struct ovs_key_mpls {
+	__be32 mpls_lse;
+};
+
 struct ovs_key_ipv4 {
 	__be32 ipv4_src;
 	__be32 ipv4_dst;
@@ -393,9 +400,9 @@
 };
 
 struct ovs_key_nd {
-	__u32 nd_target[4];
-	__u8  nd_sll[ETH_ALEN];
-	__u8  nd_tll[ETH_ALEN];
+	__be32	nd_target[4];
+	__u8	nd_sll[ETH_ALEN];
+	__u8	nd_tll[ETH_ALEN];
 };
 
 /**
@@ -484,6 +491,19 @@
 #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
 
 /**
+ * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument.
+ * @mpls_lse: MPLS label stack entry to push.
+ * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame.
+ *
+ * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and
+ * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected.
+ */
+struct ovs_action_push_mpls {
+	__be32 mpls_lse;
+	__be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */
+};
+
+/**
  * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
  * @vlan_tpid: Tag protocol identifier (TPID) to push.
  * @vlan_tci: Tag control identifier (TCI) to push.  The CFI bit must be set
@@ -534,6 +554,15 @@
  * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
  * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
  * the nested %OVS_SAMPLE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
+ * top of the packets MPLS label stack.  Set the ethertype of the
+ * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to
+ * indicate the new packet contents.
+ * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the
+ * packet's MPLS label stack.  Set the encapsulating frame's ethertype to
+ * indicate the new packet contents. This could potentially still be
+ * %ETH_P_MPLS if the resulting MPLS label stack is not empty.  If there
+ * is no MPLS label stack, as determined by ethertype, no action is taken.
  *
  * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
  * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -550,6 +579,9 @@
 	OVS_ACTION_ATTR_SAMPLE,       /* Nested OVS_SAMPLE_ATTR_*. */
 	OVS_ACTION_ATTR_RECIRC,       /* u32 recirc_id. */
 	OVS_ACTION_ATTR_HASH,	      /* struct ovs_action_hash. */
+	OVS_ACTION_ATTR_PUSH_MPLS,    /* struct ovs_action_push_mpls. */
+	OVS_ACTION_ATTR_POP_MPLS,     /* __be16 ethertype. */
+
 	__OVS_ACTION_ATTR_MAX
 };
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9f81818..b6a1f7c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -153,22 +153,19 @@
 
 enum bpf_stack_slot_type {
 	STACK_INVALID,    /* nothing was stored in this stack slot */
-	STACK_SPILL,      /* 1st byte of register spilled into stack */
-	STACK_SPILL_PART, /* other 7 bytes of register spill */
+	STACK_SPILL,      /* register spilled into stack */
 	STACK_MISC	  /* BPF program wrote some data into this slot */
 };
 
-struct bpf_stack_slot {
-	enum bpf_stack_slot_type stype;
-	struct reg_state reg_st;
-};
+#define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
 
 /* state of the program:
  * type of all registers and stack info
  */
 struct verifier_state {
 	struct reg_state regs[MAX_BPF_REG];
-	struct bpf_stack_slot stack[MAX_BPF_STACK];
+	u8 stack_slot_type[MAX_BPF_STACK];
+	struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
 };
 
 /* linked list of verifier states used to prune search */
@@ -259,10 +256,10 @@
 				env->cur_state.regs[i].map_ptr->key_size,
 				env->cur_state.regs[i].map_ptr->value_size);
 	}
-	for (i = 0; i < MAX_BPF_STACK; i++) {
-		if (env->cur_state.stack[i].stype == STACK_SPILL)
+	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+		if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
 			verbose(" fp%d=%s", -MAX_BPF_STACK + i,
-				reg_type_str[env->cur_state.stack[i].reg_st.type]);
+				reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
 	}
 	verbose("\n");
 }
@@ -539,8 +536,10 @@
 static int check_stack_write(struct verifier_state *state, int off, int size,
 			     int value_regno)
 {
-	struct bpf_stack_slot *slot;
 	int i;
+	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
+	 * so it's aligned access and [off, off + size) are within stack limits
+	 */
 
 	if (value_regno >= 0 &&
 	    (state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
@@ -548,30 +547,24 @@
 	     state->regs[value_regno].type == PTR_TO_CTX)) {
 
 		/* register containing pointer is being spilled into stack */
-		if (size != 8) {
+		if (size != BPF_REG_SIZE) {
 			verbose("invalid size of register spill\n");
 			return -EACCES;
 		}
 
-		slot = &state->stack[MAX_BPF_STACK + off];
-		slot->stype = STACK_SPILL;
 		/* save register state */
-		slot->reg_st = state->regs[value_regno];
-		for (i = 1; i < 8; i++) {
-			slot = &state->stack[MAX_BPF_STACK + off + i];
-			slot->stype = STACK_SPILL_PART;
-			slot->reg_st.type = UNKNOWN_VALUE;
-			slot->reg_st.map_ptr = NULL;
-		}
-	} else {
+		state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
+			state->regs[value_regno];
 
+		for (i = 0; i < BPF_REG_SIZE; i++)
+			state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
+	} else {
 		/* regular write of data into stack */
-		for (i = 0; i < size; i++) {
-			slot = &state->stack[MAX_BPF_STACK + off + i];
-			slot->stype = STACK_MISC;
-			slot->reg_st.type = UNKNOWN_VALUE;
-			slot->reg_st.map_ptr = NULL;
-		}
+		state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
+			(struct reg_state) {};
+
+		for (i = 0; i < size; i++)
+			state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
 	}
 	return 0;
 }
@@ -579,19 +572,18 @@
 static int check_stack_read(struct verifier_state *state, int off, int size,
 			    int value_regno)
 {
+	u8 *slot_type;
 	int i;
-	struct bpf_stack_slot *slot;
 
-	slot = &state->stack[MAX_BPF_STACK + off];
+	slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
 
-	if (slot->stype == STACK_SPILL) {
-		if (size != 8) {
+	if (slot_type[0] == STACK_SPILL) {
+		if (size != BPF_REG_SIZE) {
 			verbose("invalid size of register spill\n");
 			return -EACCES;
 		}
-		for (i = 1; i < 8; i++) {
-			if (state->stack[MAX_BPF_STACK + off + i].stype !=
-			    STACK_SPILL_PART) {
+		for (i = 1; i < BPF_REG_SIZE; i++) {
+			if (slot_type[i] != STACK_SPILL) {
 				verbose("corrupted spill memory\n");
 				return -EACCES;
 			}
@@ -599,12 +591,12 @@
 
 		if (value_regno >= 0)
 			/* restore register state from stack */
-			state->regs[value_regno] = slot->reg_st;
+			state->regs[value_regno] =
+				state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
 		return 0;
 	} else {
 		for (i = 0; i < size; i++) {
-			if (state->stack[MAX_BPF_STACK + off + i].stype !=
-			    STACK_MISC) {
+			if (slot_type[i] != STACK_MISC) {
 				verbose("invalid read from stack off %d+%d size %d\n",
 					off, i, size);
 				return -EACCES;
@@ -747,7 +739,7 @@
 	}
 
 	for (i = 0; i < access_size; i++) {
-		if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) {
+		if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
 			verbose("invalid indirect read from stack off %d+%d size %d\n",
 				off, i, access_size);
 			return -EACCES;
@@ -1417,12 +1409,33 @@
 	}
 
 	for (i = 0; i < MAX_BPF_STACK; i++) {
-		if (memcmp(&old->stack[i], &cur->stack[i],
-			   sizeof(old->stack[0])) != 0) {
-			if (old->stack[i].stype == STACK_INVALID)
-				continue;
+		if (old->stack_slot_type[i] == STACK_INVALID)
+			continue;
+		if (old->stack_slot_type[i] != cur->stack_slot_type[i])
+			/* Ex: old explored (safe) state has STACK_SPILL in
+			 * this stack slot, but current has has STACK_MISC ->
+			 * this verifier states are not equivalent,
+			 * return false to continue verification of this path
+			 */
 			return false;
-		}
+		if (i % BPF_REG_SIZE)
+			continue;
+		if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
+			   &cur->spilled_regs[i / BPF_REG_SIZE],
+			   sizeof(old->spilled_regs[0])))
+			/* when explored and current stack slot types are
+			 * the same, check that stored pointers types
+			 * are the same as well.
+			 * Ex: explored safe path could have stored
+			 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
+			 * but current path has stored:
+			 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
+			 * such verifier states are not equivalent.
+			 * return false to continue verification of this path
+			 */
+			return false;
+		else
+			continue;
 	}
 	return true;
 }
diff --git a/lib/Makefile b/lib/Makefile
index 7512dc9..04e53dd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@
 	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
 	 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
-	 percpu-refcount.o percpu_ida.o hash.o rhashtable.o
+	 percpu-refcount.o percpu_ida.o rhashtable.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += kstrtox.o
diff --git a/lib/hash.c b/lib/hash.c
deleted file mode 100644
index fea973f..0000000
--- a/lib/hash.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* General purpose hashing library
- *
- * That's a start of a kernel hashing library, which can be extended
- * with further algorithms in future. arch_fast_hash{2,}() will
- * eventually resolve to an architecture optimized implementation.
- *
- * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
- * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
- * Copyright 2013 Thomas Graf <tgraf@redhat.com>
- * Licensed under the GNU General Public License, version 2.0 (GPLv2)
- */
-
-#include <linux/jhash.h>
-#include <linux/hash.h>
-#include <linux/cache.h>
-
-static struct fast_hash_ops arch_hash_ops __read_mostly = {
-	.hash  = jhash,
-	.hash2 = jhash2,
-};
-
-u32 arch_fast_hash(const void *data, u32 len, u32 seed)
-{
-	return arch_hash_ops.hash(data, len, seed);
-}
-EXPORT_SYMBOL_GPL(arch_fast_hash);
-
-u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
-{
-	return arch_hash_ops.hash2(data, len, seed);
-}
-EXPORT_SYMBOL_GPL(arch_fast_hash2);
-
-static int __init hashlib_init(void)
-{
-	setup_arch_fast_hash(&arch_hash_ops);
-	return 0;
-}
-early_initcall(hashlib_init);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 23e070b..3f167d2 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1756,6 +1756,49 @@
 		{ },
 		{ { 0, 1 } }
 	},
+	{
+		"nmap reduced",
+		.u.insns_int = {
+			BPF_MOV64_REG(R6, R1),
+			BPF_LD_ABS(BPF_H, 12),
+			BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
+			BPF_LD_ABS(BPF_H, 12),
+			BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
+			BPF_MOV32_IMM(R0, 18),
+			BPF_STX_MEM(BPF_W, R10, R0, -64),
+			BPF_LDX_MEM(BPF_W, R7, R10, -64),
+			BPF_LD_IND(BPF_W, R7, 14),
+			BPF_STX_MEM(BPF_W, R10, R0, -60),
+			BPF_MOV32_IMM(R0, 280971478),
+			BPF_STX_MEM(BPF_W, R10, R0, -56),
+			BPF_LDX_MEM(BPF_W, R7, R10, -56),
+			BPF_LDX_MEM(BPF_W, R0, R10, -60),
+			BPF_ALU32_REG(BPF_SUB, R0, R7),
+			BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
+			BPF_LD_ABS(BPF_H, 12),
+			BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
+			BPF_MOV32_IMM(R0, 22),
+			BPF_STX_MEM(BPF_W, R10, R0, -56),
+			BPF_LDX_MEM(BPF_W, R7, R10, -56),
+			BPF_LD_IND(BPF_H, R7, 14),
+			BPF_STX_MEM(BPF_W, R10, R0, -52),
+			BPF_MOV32_IMM(R0, 17366),
+			BPF_STX_MEM(BPF_W, R10, R0, -48),
+			BPF_LDX_MEM(BPF_W, R7, R10, -48),
+			BPF_LDX_MEM(BPF_W, R0, R10, -52),
+			BPF_ALU32_REG(BPF_SUB, R0, R7),
+			BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
+			BPF_MOV32_IMM(R0, 256),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
+		  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		  0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
+		{ { 38, 256 } }
+	},
 };
 
 static struct net_device dev;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index c00897f..425942d 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1758,7 +1758,7 @@
 		copied = size;
 		msg->msg_flags |= MSG_TRUNC;
 	}
-	err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, offset, msg, copied);
 
 	if (!err && msg->msg_name) {
 		DECLARE_SOCKADDR(struct sockaddr_at *, sat, msg->msg_name);
diff --git a/net/atm/common.c b/net/atm/common.c
index 6a76515..9cd1cca 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -554,7 +554,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	error = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (error)
 		return error;
 	sock_recv_ts_and_drops(msg, sk, skb);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c35c3f4..f4f835e 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1634,7 +1634,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	if (msg->msg_name) {
 		ax25_digi digi;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 339c74a..0a7cc56 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -237,7 +237,7 @@
 	}
 
 	skb_reset_transport_header(skb);
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err == 0) {
 		sock_recv_ts_and_drops(msg, sk, skb);
 
@@ -328,7 +328,7 @@
 		}
 
 		chunk = min_t(unsigned int, skb->len, size);
-		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
+		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
 			skb_queue_head(&sk->sk_receive_queue, skb);
 			if (!copied)
 				copied = -EFAULT;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 115f149..29e1ec7 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -878,7 +878,7 @@
 	}
 
 	skb_reset_transport_header(skb);
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	switch (hci_pi(sk)->channel) {
 	case HCI_CHANNEL_RAW:
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 44cb786..f96933a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -184,6 +184,11 @@
 		/* Do not flood unicast traffic to ports that turn it off */
 		if (unicast && !(p->flags & BR_FLOOD))
 			continue;
+
+		/* Do not flood to ports that enable proxy ARP */
+		if (p->flags & BR_PROXYARP)
+			continue;
+
 		prev = maybe_deliver(prev, p, skb, __packet_hook);
 		if (IS_ERR(prev))
 			goto out;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6fd5522..1f1de71 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -16,6 +16,8 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/netfilter_bridge.h>
+#include <linux/neighbour.h>
+#include <net/arp.h>
 #include <linux/export.h>
 #include <linux/rculist.h>
 #include "br_private.h"
@@ -57,6 +59,60 @@
 		       netif_receive_skb);
 }
 
+static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
+			    u16 vid)
+{
+	struct net_device *dev = br->dev;
+	struct neighbour *n;
+	struct arphdr *parp;
+	u8 *arpptr, *sha;
+	__be32 sip, tip;
+
+	if (dev->flags & IFF_NOARP)
+		return;
+
+	if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
+		dev->stats.tx_dropped++;
+		return;
+	}
+	parp = arp_hdr(skb);
+
+	if (parp->ar_pro != htons(ETH_P_IP) ||
+	    parp->ar_op != htons(ARPOP_REQUEST) ||
+	    parp->ar_hln != dev->addr_len ||
+	    parp->ar_pln != 4)
+		return;
+
+	arpptr = (u8 *)parp + sizeof(struct arphdr);
+	sha = arpptr;
+	arpptr += dev->addr_len;	/* sha */
+	memcpy(&sip, arpptr, sizeof(sip));
+	arpptr += sizeof(sip);
+	arpptr += dev->addr_len;	/* tha */
+	memcpy(&tip, arpptr, sizeof(tip));
+
+	if (ipv4_is_loopback(tip) ||
+	    ipv4_is_multicast(tip))
+		return;
+
+	n = neigh_lookup(&arp_tbl, &tip, dev);
+	if (n) {
+		struct net_bridge_fdb_entry *f;
+
+		if (!(n->nud_state & NUD_VALID)) {
+			neigh_release(n);
+			return;
+		}
+
+		f = __br_fdb_get(br, n->ha, vid);
+		if (f)
+			arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
+				 sha, n->ha, sha);
+
+		neigh_release(n);
+	}
+}
+
 /* note: already called with rcu_read_lock */
 int br_handle_frame_finish(struct sk_buff *skb)
 {
@@ -98,6 +154,10 @@
 	dst = NULL;
 
 	if (is_broadcast_ether_addr(dest)) {
+		if (p->flags & BR_PROXYARP &&
+		    skb->protocol == htons(ETH_P_ARP))
+			br_do_proxy_arp(skb, br, vid);
+
 		skb2 = skb;
 		unicast = false;
 	} else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 2ff9706..86c239b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,7 +60,8 @@
 	    nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
-	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)))
+	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
+	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)))
 		return -EMSGSIZE;
 
 	return 0;
@@ -332,6 +333,7 @@
 	br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
 	br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
 	br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
+	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
 
 	if (tb[IFLA_BRPORT_COST]) {
 		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 4d783d0..8f3f081 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -172,6 +172,7 @@
 #define BR_FLOOD		0x00000040
 #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
 #define BR_PROMISC		0x00000080
+#define BR_PROXYARP		0x00000100
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	struct bridge_mcast_own_query	ip4_own_query;
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index e561cd5..2de5d91 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -170,6 +170,7 @@
 BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
 BRPORT_ATTR_FLAG(learning, BR_LEARNING);
 BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
+BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -213,6 +214,7 @@
 	&brport_attr_multicast_router,
 	&brport_attr_multicast_fast_leave,
 #endif
+	&brport_attr_proxyarp,
 	NULL
 };
 
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 43f750e..fbcd156 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -293,7 +293,7 @@
 		copylen = len;
 	}
 
-	ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
+	ret = skb_copy_datagram_msg(skb, 0, m, copylen);
 	if (ret)
 		goto out_free;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 945bbd0..70bb609 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -118,6 +118,7 @@
 #include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <net/ip.h>
+#include <net/mpls.h>
 #include <linux/ipv6.h>
 #include <linux/in.h>
 #include <linux/jhash.h>
@@ -2530,7 +2531,7 @@
 					   netdev_features_t features,
 					   __be16 type)
 {
-	if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
+	if (eth_p_mpls(type))
 		features &= skb->dev->mpls_features;
 
 	return features;
@@ -4316,20 +4317,28 @@
 		local_irq_enable();
 }
 
+static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+	return sd->rps_ipi_list != NULL;
+#else
+	return false;
+#endif
+}
+
 static int process_backlog(struct napi_struct *napi, int quota)
 {
 	int work = 0;
 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
 
-#ifdef CONFIG_RPS
 	/* Check if we have pending ipi, its better to send them now,
 	 * not waiting net_rx_action() end.
 	 */
-	if (sd->rps_ipi_list) {
+	if (sd_has_rps_ipi_waiting(sd)) {
 		local_irq_disable();
 		net_rps_action_and_irq_enable(sd);
 	}
-#endif
+
 	napi->weight = weight_p;
 	local_irq_disable();
 	while (1) {
@@ -4356,7 +4365,6 @@
 			 * We can use a plain write instead of clear_bit(),
 			 * and we dont need an smp_mb() memory barrier.
 			 */
-			list_del(&napi->poll_list);
 			napi->state = 0;
 			rps_unlock(sd);
 
@@ -4376,7 +4384,8 @@
  * __napi_schedule - schedule for receive
  * @n: entry to schedule
  *
- * The entry's receive function will be scheduled to run
+ * The entry's receive function will be scheduled to run.
+ * Consider using __napi_schedule_irqoff() if hard irqs are masked.
  */
 void __napi_schedule(struct napi_struct *n)
 {
@@ -4388,12 +4397,24 @@
 }
 EXPORT_SYMBOL(__napi_schedule);
 
+/**
+ * __napi_schedule_irqoff - schedule for receive
+ * @n: entry to schedule
+ *
+ * Variant of __napi_schedule() assuming hard irqs are masked
+ */
+void __napi_schedule_irqoff(struct napi_struct *n)
+{
+	____napi_schedule(this_cpu_ptr(&softnet_data), n);
+}
+EXPORT_SYMBOL(__napi_schedule_irqoff);
+
 void __napi_complete(struct napi_struct *n)
 {
 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 	BUG_ON(n->gro_list);
 
-	list_del(&n->poll_list);
+	list_del_init(&n->poll_list);
 	smp_mb__before_atomic();
 	clear_bit(NAPI_STATE_SCHED, &n->state);
 }
@@ -4411,9 +4432,15 @@
 		return;
 
 	napi_gro_flush(n, false);
-	local_irq_save(flags);
-	__napi_complete(n);
-	local_irq_restore(flags);
+
+	if (likely(list_empty(&n->poll_list))) {
+		WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
+	} else {
+		/* If n->poll_list is not empty, we need to mask irqs */
+		local_irq_save(flags);
+		__napi_complete(n);
+		local_irq_restore(flags);
+	}
 }
 EXPORT_SYMBOL(napi_complete);
 
@@ -4507,29 +4534,28 @@
 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
 	unsigned long time_limit = jiffies + 2;
 	int budget = netdev_budget;
+	LIST_HEAD(list);
+	LIST_HEAD(repoll);
 	void *have;
 
 	local_irq_disable();
+	list_splice_init(&sd->poll_list, &list);
+	local_irq_enable();
 
-	while (!list_empty(&sd->poll_list)) {
+	while (!list_empty(&list)) {
 		struct napi_struct *n;
 		int work, weight;
 
-		/* If softirq window is exhuasted then punt.
+		/* If softirq window is exhausted then punt.
 		 * Allow this to run for 2 jiffies since which will allow
 		 * an average latency of 1.5/HZ.
 		 */
 		if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
 			goto softnet_break;
 
-		local_irq_enable();
 
-		/* Even though interrupts have been re-enabled, this
-		 * access is safe because interrupts can only add new
-		 * entries to the tail of this list, and only ->poll()
-		 * calls can remove this head entry from the list.
-		 */
-		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
+		n = list_first_entry(&list, struct napi_struct, poll_list);
+		list_del_init(&n->poll_list);
 
 		have = netpoll_poll_lock(n);
 
@@ -4551,8 +4577,6 @@
 
 		budget -= work;
 
-		local_irq_disable();
-
 		/* Drivers must not modify the NAPI state if they
 		 * consume the entire weight.  In such cases this code
 		 * still "owns" the NAPI instance and therefore can
@@ -4560,32 +4584,40 @@
 		 */
 		if (unlikely(work == weight)) {
 			if (unlikely(napi_disable_pending(n))) {
-				local_irq_enable();
 				napi_complete(n);
-				local_irq_disable();
 			} else {
 				if (n->gro_list) {
 					/* flush too old packets
 					 * If HZ < 1000, flush all packets.
 					 */
-					local_irq_enable();
 					napi_gro_flush(n, HZ >= 1000);
-					local_irq_disable();
 				}
-				list_move_tail(&n->poll_list, &sd->poll_list);
+				list_add_tail(&n->poll_list, &repoll);
 			}
 		}
 
 		netpoll_poll_unlock(have);
 	}
+
+	if (!sd_has_rps_ipi_waiting(sd) &&
+	    list_empty(&list) &&
+	    list_empty(&repoll))
+		return;
 out:
+	local_irq_disable();
+
+	list_splice_tail_init(&sd->poll_list, &list);
+	list_splice_tail(&repoll, &list);
+	list_splice(&list, &sd->poll_list);
+	if (!list_empty(&sd->poll_list))
+		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
+
 	net_rps_action_and_irq_enable(sd);
 
 	return;
 
 softnet_break:
 	sd->time_squeeze++;
-	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
 	goto out;
 }
 
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 06dfb29..b0f84f5 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -84,7 +84,6 @@
 	[NETIF_F_GSO_IPIP_BIT] =	 "tx-ipip-segmentation",
 	[NETIF_F_GSO_SIT_BIT] =		 "tx-sit-segmentation",
 	[NETIF_F_GSO_UDP_TUNNEL_BIT] =	 "tx-udp_tnl-segmentation",
-	[NETIF_F_GSO_MPLS_BIT] =	 "tx-mpls-segmentation",
 
 	[NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
 	[NETIF_F_SCTP_CSUM_BIT] =        "tx-checksum-sctp",
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ef31fef..edd0411 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -773,7 +773,7 @@
 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 		struct neigh_parms *p;
 		tbl->last_rand = jiffies;
-		for (p = &tbl->parms; p; p = p->next)
+		list_for_each_entry(p, &tbl->parms_list, list)
 			p->reachable_time =
 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 	}
@@ -1446,7 +1446,7 @@
 {
 	struct neigh_parms *p;
 
-	for (p = &tbl->parms; p; p = p->next) {
+	list_for_each_entry(p, &tbl->parms_list, list) {
 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
 			return p;
@@ -1481,8 +1481,7 @@
 		}
 
 		write_lock_bh(&tbl->lock);
-		p->next		= tbl->parms.next;
-		tbl->parms.next = p;
+		list_add(&p->list, &tbl->parms.list);
 		write_unlock_bh(&tbl->lock);
 
 		neigh_parms_data_state_cleanall(p);
@@ -1501,24 +1500,15 @@
 
 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
 {
-	struct neigh_parms **p;
-
 	if (!parms || parms == &tbl->parms)
 		return;
 	write_lock_bh(&tbl->lock);
-	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
-		if (*p == parms) {
-			*p = parms->next;
-			parms->dead = 1;
-			write_unlock_bh(&tbl->lock);
-			if (parms->dev)
-				dev_put(parms->dev);
-			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
-			return;
-		}
-	}
+	list_del(&parms->list);
+	parms->dead = 1;
 	write_unlock_bh(&tbl->lock);
-	neigh_dbg(1, "%s: not found\n", __func__);
+	if (parms->dev)
+		dev_put(parms->dev);
+	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
 }
 EXPORT_SYMBOL(neigh_parms_release);
 
@@ -1535,6 +1525,8 @@
 	unsigned long now = jiffies;
 	unsigned long phsize;
 
+	INIT_LIST_HEAD(&tbl->parms_list);
+	list_add(&tbl->parms.list, &tbl->parms_list);
 	write_pnet(&tbl->parms.net, &init_net);
 	atomic_set(&tbl->parms.refcnt, 1);
 	tbl->parms.reachable_time =
@@ -2154,7 +2146,9 @@
 				       NLM_F_MULTI) <= 0)
 			break;
 
-		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
+		nidx = 0;
+		p = list_next_entry(&tbl->parms, list);
+		list_for_each_entry_from(p, &tbl->parms_list, list) {
 			if (!net_eq(neigh_parms_net(p), net))
 				continue;
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c16615b..7001896 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3013,7 +3013,7 @@
 		if (nskb->len == len + doffset)
 			goto perform_csum_check;
 
-		if (!sg) {
+		if (!sg && !nskb->remcsum_offload) {
 			nskb->ip_summed = CHECKSUM_NONE;
 			nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
 							    skb_put(nskb, len),
@@ -3085,7 +3085,7 @@
 		nskb->truesize += nskb->data_len;
 
 perform_csum_check:
-		if (!csum) {
+		if (!csum && !nskb->remcsum_offload) {
 			nskb->csum = skb_checksum(nskb, doffset,
 						  nskb->len - doffset, 0);
 			nskb->ip_summed = CHECKSUM_NONE;
@@ -3099,6 +3099,16 @@
 	 * (see validate_xmit_skb_list() for example)
 	 */
 	segs->prev = tail;
+
+	/* Following permits correct backpressure, for protocols
+	 * using skb_set_owner_w().
+	 * Idea is to tranfert ownership from head_skb to last segment.
+	 */
+	if (head_skb->destructor == sock_wfree) {
+		swap(tail->truesize, head_skb->truesize);
+		swap(tail->destructor, head_skb->destructor);
+		swap(tail->sk, head_skb->sk);
+	}
 	return segs;
 
 err:
diff --git a/net/core/sock.c b/net/core/sock.c
index 15e0c67..ac56dd0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2457,7 +2457,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 		copied = len;
 	}
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto out_free_skb;
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 5ab6627..8e6ae94 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -896,7 +896,7 @@
 		else if (len < skb->len)
 			msg->msg_flags |= MSG_TRUNC;
 
-		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
+		if (skb_copy_datagram_msg(skb, 0, msg, len)) {
 			/* Exception. Bailout! */
 			len = -EFAULT;
 			break;
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index a585fd6..5f8ac40 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -11,6 +11,17 @@
 
 if NET_DSA
 
+config NET_DSA_HWMON
+	bool "Distributed Switch Architecture HWMON support"
+	default y
+	depends on HWMON && !(NET_DSA=y && HWMON=m)
+	---help---
+	  Say Y if you want to expose thermal sensor data on switches supported
+	  by the Distributed Switch Architecture.
+
+	  Some of those switches contain thermal sensors. This data is available
+	  via the hwmon sysfs interface and exposes the onboard sensors.
+
 # tagging formats
 config NET_DSA_TAG_BRCM
 	bool
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 6317b41..dd646a8 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -9,6 +9,9 @@
  * (at your option) any later version.
  */
 
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -17,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
+#include <linux/sysfs.h>
 #include "dsa_priv.h"
 
 char dsa_driver_version[] = "0.1";
@@ -71,6 +75,104 @@
 	return ret;
 }
 
+/* hwmon support ************************************************************/
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static ssize_t temp1_input_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct dsa_switch *ds = dev_get_drvdata(dev);
+	int temp, ret;
+
+	ret = ds->drv->get_temp(ds, &temp);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", temp * 1000);
+}
+static DEVICE_ATTR_RO(temp1_input);
+
+static ssize_t temp1_max_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct dsa_switch *ds = dev_get_drvdata(dev);
+	int temp, ret;
+
+	ret = ds->drv->get_temp_limit(ds, &temp);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", temp * 1000);
+}
+
+static ssize_t temp1_max_store(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+	struct dsa_switch *ds = dev_get_drvdata(dev);
+	int temp, ret;
+
+	ret = kstrtoint(buf, 0, &temp);
+	if (ret < 0)
+		return ret;
+
+	ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store);
+
+static ssize_t temp1_max_alarm_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct dsa_switch *ds = dev_get_drvdata(dev);
+	bool alarm;
+	int ret;
+
+	ret = ds->drv->get_temp_alarm(ds, &alarm);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", alarm);
+}
+static DEVICE_ATTR_RO(temp1_max_alarm);
+
+static struct attribute *dsa_hwmon_attrs[] = {
+	&dev_attr_temp1_input.attr,	/* 0 */
+	&dev_attr_temp1_max.attr,	/* 1 */
+	&dev_attr_temp1_max_alarm.attr,	/* 2 */
+	NULL
+};
+
+static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
+				       struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct dsa_switch *ds = dev_get_drvdata(dev);
+	struct dsa_switch_driver *drv = ds->drv;
+	umode_t mode = attr->mode;
+
+	if (index == 1) {
+		if (!drv->get_temp_limit)
+			mode = 0;
+		else if (drv->set_temp_limit)
+			mode |= S_IWUSR;
+	} else if (index == 2 && !drv->get_temp_alarm) {
+		mode = 0;
+	}
+	return mode;
+}
+
+static const struct attribute_group dsa_hwmon_group = {
+	.attrs = dsa_hwmon_attrs,
+	.is_visible = dsa_hwmon_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(dsa_hwmon);
+
+#endif /* CONFIG_NET_DSA_HWMON */
 
 /* basic switch operations **************************************************/
 static struct dsa_switch *
@@ -228,6 +330,31 @@
 		ds->ports[i] = slave_dev;
 	}
 
+#ifdef CONFIG_NET_DSA_HWMON
+	/* If the switch provides a temperature sensor,
+	 * register with hardware monitoring subsystem.
+	 * Treat registration error as non-fatal and ignore it.
+	 */
+	if (drv->get_temp) {
+		const char *netname = netdev_name(dst->master_netdev);
+		char hname[IFNAMSIZ + 1];
+		int i, j;
+
+		/* Create valid hwmon 'name' attribute */
+		for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) {
+			if (isalnum(netname[i]))
+				hname[j++] = netname[i];
+		}
+		hname[j] = '\0';
+		scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d",
+			  hname, index);
+		ds->hwmon_dev = hwmon_device_register_with_groups(NULL,
+					ds->hwmon_name, ds, dsa_hwmon_groups);
+		if (IS_ERR(ds->hwmon_dev))
+			ds->hwmon_dev = NULL;
+	}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 	return ds;
 
 out_free:
@@ -239,6 +366,10 @@
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
 {
+#ifdef CONFIG_NET_DSA_HWMON
+	if (ds->hwmon_dev)
+		hwmon_device_unregister(ds->hwmon_dev);
+#endif
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -447,6 +578,7 @@
 	const char *port_name;
 	int chip_index, port_index;
 	const unsigned int *sw_addr, *port_reg;
+	u32 eeprom_len;
 	int ret;
 
 	mdio = of_parse_phandle(np, "dsa,mii-bus", 0);
@@ -498,6 +630,9 @@
 		if (cd->sw_addr > PHY_MAX_ADDR)
 			continue;
 
+		if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
+			cd->eeprom_len = eeprom_len;
+
 		for_each_available_child_of_node(child, port) {
 			port_reg = of_get_property(port, "reg", NULL);
 			if (!port_reg)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ab03e00..0ea466d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -249,6 +249,27 @@
 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
 }
 
+static int dsa_slave_get_regs_len(struct net_device *dev)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+
+	if (ds->drv->get_regs_len)
+		return ds->drv->get_regs_len(ds, p->port);
+
+	return -EOPNOTSUPP;
+}
+
+static void
+dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+
+	if (ds->drv->get_regs)
+		ds->drv->get_regs(ds, p->port, regs, _p);
+}
+
 static int dsa_slave_nway_reset(struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
@@ -271,6 +292,44 @@
 	return -EOPNOTSUPP;
 }
 
+static int dsa_slave_get_eeprom_len(struct net_device *dev)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+
+	if (ds->pd->eeprom_len)
+		return ds->pd->eeprom_len;
+
+	if (ds->drv->get_eeprom_len)
+		return ds->drv->get_eeprom_len(ds);
+
+	return 0;
+}
+
+static int dsa_slave_get_eeprom(struct net_device *dev,
+				struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+
+	if (ds->drv->get_eeprom)
+		return ds->drv->get_eeprom(ds, eeprom, data);
+
+	return -EOPNOTSUPP;
+}
+
+static int dsa_slave_set_eeprom(struct net_device *dev,
+				struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+
+	if (ds->drv->set_eeprom)
+		return ds->drv->set_eeprom(ds, eeprom, data);
+
+	return -EOPNOTSUPP;
+}
+
 static void dsa_slave_get_strings(struct net_device *dev,
 				  uint32_t stringset, uint8_t *data)
 {
@@ -385,8 +444,13 @@
 	.get_settings		= dsa_slave_get_settings,
 	.set_settings		= dsa_slave_set_settings,
 	.get_drvinfo		= dsa_slave_get_drvinfo,
+	.get_regs_len		= dsa_slave_get_regs_len,
+	.get_regs		= dsa_slave_get_regs,
 	.nway_reset		= dsa_slave_nway_reset,
 	.get_link		= dsa_slave_get_link,
+	.get_eeprom_len		= dsa_slave_get_eeprom_len,
+	.get_eeprom		= dsa_slave_get_eeprom,
+	.set_eeprom		= dsa_slave_set_eeprom,
 	.get_strings		= dsa_slave_get_strings,
 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
 	.get_sset_count		= dsa_slave_get_sset_count,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index ce90c8b..2dab270 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -63,8 +63,6 @@
 		dsa_header[3] = 0x00;
 	}
 
-	skb->protocol = htons(ETH_P_DSA);
-
 	skb->dev = p->parent->dst->master_netdev;
 	dev_queue_xmit(skb);
 
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 94fcce7..9aeda59 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -76,8 +76,6 @@
 		edsa_header[7] = 0x00;
 	}
 
-	skb->protocol = htons(ETH_P_EDSA);
-
 	skb->dev = p->parent->dst->master_netdev;
 	dev_queue_xmit(skb);
 
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 115fdca..e268f9d 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -57,8 +57,6 @@
 	trailer[2] = 0x10;
 	trailer[3] = 0x00;
 
-	nskb->protocol = htons(ETH_P_TRAILER);
-
 	nskb->dev = p->parent->dst->master_netdev;
 	dev_queue_xmit(nskb);
 
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index ef2ad8a..fc9193e 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -324,7 +324,7 @@
 	}
 
 	/* FIXME: skip headers if necessary ?! */
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto done;
 
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 9d1f648..73a4d53 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -195,7 +195,7 @@
 		copied = len;
 	}
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto done;
 
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index e682b48..bd29016 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -322,6 +322,15 @@
 	  network mechanisms and optimizations for UDP (such as ECMP
 	  and RSS) can be leveraged to provide better service.
 
+config NET_FOU_IP_TUNNELS
+	bool "IP: FOU encapsulation of IP tunnels"
+	depends on NET_IPIP || NET_IPGRE || IPV6_SIT
+	select NET_FOU
+	---help---
+	  Allow configuration of FOU or GUE encapsulation for IP tunnels.
+	  When this option is enabled IP tunnels can be configured to use
+	  FOU or GUE encapsulation.
+
 config GENEVE
 	tristate "Generic Network Virtualization Encapsulation (Geneve)"
 	depends on INET
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8b7fe5b..3a096bb 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1222,7 +1222,7 @@
 		       SKB_GSO_TCPV6 |
 		       SKB_GSO_UDP_TUNNEL |
 		       SKB_GSO_UDP_TUNNEL_CSUM |
-		       SKB_GSO_MPLS |
+		       SKB_GSO_TUNNEL_REMCSUM |
 		       0)))
 		goto out;
 
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 4715f25..5160c71 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -50,7 +50,7 @@
 #include <net/netlabel.h>
 #include <net/cipso_ipv4.h>
 #include <linux/atomic.h>
-#include <asm/bug.h>
+#include <linux/bug.h>
 #include <asm/unaligned.h>
 
 /* List of available DOI definitions */
@@ -72,6 +72,7 @@
 	u32 size;
 	struct list_head list;
 };
+
 struct cipso_v4_map_cache_entry {
 	u32 hash;
 	unsigned char *key;
@@ -82,7 +83,8 @@
 	u32 activity;
 	struct list_head list;
 };
-static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL;
+
+static struct cipso_v4_map_cache_bkt *cipso_v4_cache;
 
 /* Restricted bitmap (tag #1) flags */
 int cipso_v4_rbm_optfmt = 0;
@@ -539,7 +541,7 @@
 
 /**
  * cipso_v4_doi_free - Frees a DOI definition
- * @entry: the entry's RCU field
+ * @doi_def: the DOI definition
  *
  * Description:
  * This function frees all of the memory associated with a DOI definition.
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 360b565..60173d4 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -392,8 +392,10 @@
 	if (elen <= 0)
 		goto out;
 
-	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+	err = skb_cow_data(skb, 0, &trailer);
+	if (err < 0)
 		goto out;
+
 	nfrags = err;
 
 	assoclen = sizeof(*esph);
@@ -601,12 +603,12 @@
 		BUG_ON(!aalg_desc);
 
 		err = -EINVAL;
-		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
+		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
 		    crypto_aead_authsize(aead)) {
-			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
-				 x->aalg->alg_name,
-				 crypto_aead_authsize(aead),
-				 aalg_desc->uinfo.auth.icv_fullbits/8);
+			pr_info("ESP: %s digestsize %u != %hu\n",
+				x->aalg->alg_name,
+				crypto_aead_authsize(aead),
+				aalg_desc->uinfo.auth.icv_fullbits / 8);
 			goto free_key;
 		}
 
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 32e7892..740ae09 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -38,21 +38,17 @@
 	return sk->sk_user_data;
 }
 
-static int fou_udp_encap_recv_deliver(struct sk_buff *skb,
-				      u8 protocol, size_t len)
+static void fou_recv_pull(struct sk_buff *skb, size_t len)
 {
 	struct iphdr *iph = ip_hdr(skb);
 
 	/* Remove 'len' bytes from the packet (UDP header and
-	 * FOU header if present), modify the protocol to the one
-	 * we found, and then call rcv_encap.
+	 * FOU header if present).
 	 */
 	iph->tot_len = htons(ntohs(iph->tot_len) - len);
 	__skb_pull(skb, len);
 	skb_postpull_rcsum(skb, udp_hdr(skb), len);
 	skb_reset_transport_header(skb);
-
-	return -protocol;
 }
 
 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
@@ -62,16 +58,78 @@
 	if (!fou)
 		return 1;
 
-	return fou_udp_encap_recv_deliver(skb, fou->protocol,
-					  sizeof(struct udphdr));
+	fou_recv_pull(skb, sizeof(struct udphdr));
+
+	return -fou->protocol;
+}
+
+static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
+				  void *data, int hdrlen, u8 ipproto)
+{
+	__be16 *pd = data;
+	u16 start = ntohs(pd[0]);
+	u16 offset = ntohs(pd[1]);
+	u16 poffset = 0;
+	u16 plen;
+	__wsum csum, delta;
+	__sum16 *psum;
+
+	if (skb->remcsum_offload) {
+		/* Already processed in GRO path */
+		skb->remcsum_offload = 0;
+		return guehdr;
+	}
+
+	if (start > skb->len - hdrlen ||
+	    offset > skb->len - hdrlen - sizeof(u16))
+		return NULL;
+
+	if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
+		__skb_checksum_complete(skb);
+
+	plen = hdrlen + offset + sizeof(u16);
+	if (!pskb_may_pull(skb, plen))
+		return NULL;
+	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+	if (ipproto == IPPROTO_IP && sizeof(struct iphdr) < plen) {
+		struct iphdr *ip = (struct iphdr *)(skb->data + hdrlen);
+
+		/* If next header happens to be IP we can skip that for the
+		 * checksum calculation since the IP header checksum is zero
+		 * if correct.
+		 */
+		poffset = ip->ihl * 4;
+	}
+
+	csum = csum_sub(skb->csum, skb_checksum(skb, poffset + hdrlen,
+						start - poffset - hdrlen, 0));
+
+	/* Set derived checksum in packet */
+	psum = (__sum16 *)(skb->data + hdrlen + offset);
+	delta = csum_sub(csum_fold(csum), *psum);
+	*psum = csum_fold(csum);
+
+	/* Adjust skb->csum since we changed the packet */
+	skb->csum = csum_add(skb->csum, delta);
+
+	return guehdr;
+}
+
+static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
+{
+	/* No support yet */
+	kfree_skb(skb);
+	return 0;
 }
 
 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
 {
 	struct fou *fou = fou_from_sock(sk);
-	size_t len;
+	size_t len, optlen, hdrlen;
 	struct guehdr *guehdr;
-	struct udphdr *uh;
+	void *data;
+	u16 doffset = 0;
 
 	if (!fou)
 		return 1;
@@ -80,25 +138,61 @@
 	if (!pskb_may_pull(skb, len))
 		goto drop;
 
-	uh = udp_hdr(skb);
-	guehdr = (struct guehdr *)&uh[1];
+	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-	len += guehdr->hlen << 2;
+	optlen = guehdr->hlen << 2;
+	len += optlen;
+
 	if (!pskb_may_pull(skb, len))
 		goto drop;
 
-	uh = udp_hdr(skb);
-	guehdr = (struct guehdr *)&uh[1];
+	/* guehdr may change after pull */
+	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-	if (guehdr->version != 0)
+	hdrlen = sizeof(struct guehdr) + optlen;
+
+	if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
 		goto drop;
 
-	if (guehdr->flags) {
-		/* No support yet */
-		goto drop;
+	hdrlen = sizeof(struct guehdr) + optlen;
+
+	ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
+
+	/* Pull UDP header now, skb->data points to guehdr */
+	__skb_pull(skb, sizeof(struct udphdr));
+
+	/* Pull csum through the guehdr now . This can be used if
+	 * there is a remote checksum offload.
+	 */
+	skb_postpull_rcsum(skb, udp_hdr(skb), len);
+
+	data = &guehdr[1];
+
+	if (guehdr->flags & GUE_FLAG_PRIV) {
+		__be32 flags = *(__be32 *)(data + doffset);
+
+		doffset += GUE_LEN_PRIV;
+
+		if (flags & GUE_PFLAG_REMCSUM) {
+			guehdr = gue_remcsum(skb, guehdr, data + doffset,
+					     hdrlen, guehdr->proto_ctype);
+			if (!guehdr)
+				goto drop;
+
+			data = &guehdr[1];
+
+			doffset += GUE_PLEN_REMCSUM;
+		}
 	}
 
-	return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len);
+	if (unlikely(guehdr->control))
+		return gue_control_message(skb, guehdr);
+
+	__skb_pull(skb, hdrlen);
+	skb_reset_transport_header(skb);
+
+	return -guehdr->proto_ctype;
+
 drop:
 	kfree_skb(skb);
 	return 0;
@@ -147,6 +241,66 @@
 	return err;
 }
 
+static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
+				      struct guehdr *guehdr, void *data,
+				      size_t hdrlen, u8 ipproto)
+{
+	__be16 *pd = data;
+	u16 start = ntohs(pd[0]);
+	u16 offset = ntohs(pd[1]);
+	u16 poffset = 0;
+	u16 plen;
+	void *ptr;
+	__wsum csum, delta;
+	__sum16 *psum;
+
+	if (skb->remcsum_offload)
+		return guehdr;
+
+	if (start > skb_gro_len(skb) - hdrlen ||
+	    offset > skb_gro_len(skb) - hdrlen - sizeof(u16) ||
+	    !NAPI_GRO_CB(skb)->csum_valid || skb->remcsum_offload)
+		return NULL;
+
+	plen = hdrlen + offset + sizeof(u16);
+
+	/* Pull checksum that will be written */
+	if (skb_gro_header_hard(skb, off + plen)) {
+		guehdr = skb_gro_header_slow(skb, off + plen, off);
+		if (!guehdr)
+			return NULL;
+	}
+
+	ptr = (void *)guehdr + hdrlen;
+
+	if (ipproto == IPPROTO_IP &&
+	    (hdrlen + sizeof(struct iphdr) < plen)) {
+		struct iphdr *ip = (struct iphdr *)(ptr + hdrlen);
+
+		/* If next header happens to be IP we can skip
+		 * that for the checksum calculation since the
+		 * IP header checksum is zero if correct.
+		 */
+		poffset = ip->ihl * 4;
+	}
+
+	csum = csum_sub(NAPI_GRO_CB(skb)->csum,
+			csum_partial(ptr + poffset, start - poffset, 0));
+
+	/* Set derived checksum in packet */
+	psum = (__sum16 *)(ptr + offset);
+	delta = csum_sub(csum_fold(csum), *psum);
+	*psum = csum_fold(csum);
+
+	/* Adjust skb->csum since we changed the packet */
+	skb->csum = csum_add(skb->csum, delta);
+	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+	skb->remcsum_offload = 1;
+
+	return guehdr;
+}
+
 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
 					struct sk_buff *skb)
 {
@@ -154,38 +308,64 @@
 	const struct net_offload *ops;
 	struct sk_buff **pp = NULL;
 	struct sk_buff *p;
-	u8 proto;
 	struct guehdr *guehdr;
-	unsigned int hlen, guehlen;
-	unsigned int off;
+	size_t len, optlen, hdrlen, off;
+	void *data;
+	u16 doffset = 0;
 	int flush = 1;
 
 	off = skb_gro_offset(skb);
-	hlen = off + sizeof(*guehdr);
+	len = off + sizeof(*guehdr);
+
 	guehdr = skb_gro_header_fast(skb, off);
-	if (skb_gro_header_hard(skb, hlen)) {
-		guehdr = skb_gro_header_slow(skb, hlen, off);
+	if (skb_gro_header_hard(skb, len)) {
+		guehdr = skb_gro_header_slow(skb, len, off);
 		if (unlikely(!guehdr))
 			goto out;
 	}
 
-	proto = guehdr->next_hdr;
+	optlen = guehdr->hlen << 2;
+	len += optlen;
 
-	rcu_read_lock();
-	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
-	ops = rcu_dereference(offloads[proto]);
-	if (WARN_ON(!ops || !ops->callbacks.gro_receive))
-		goto out_unlock;
-
-	guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
-
-	hlen = off + guehlen;
-	if (skb_gro_header_hard(skb, hlen)) {
-		guehdr = skb_gro_header_slow(skb, hlen, off);
+	if (skb_gro_header_hard(skb, len)) {
+		guehdr = skb_gro_header_slow(skb, len, off);
 		if (unlikely(!guehdr))
-			goto out_unlock;
+			goto out;
 	}
 
+	if (unlikely(guehdr->control) || guehdr->version != 0 ||
+	    validate_gue_flags(guehdr, optlen))
+		goto out;
+
+	hdrlen = sizeof(*guehdr) + optlen;
+
+	/* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
+	 * this is needed if there is a remote checkcsum offload.
+	 */
+	skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
+
+	data = &guehdr[1];
+
+	if (guehdr->flags & GUE_FLAG_PRIV) {
+		__be32 flags = *(__be32 *)(data + doffset);
+
+		doffset += GUE_LEN_PRIV;
+
+		if (flags & GUE_PFLAG_REMCSUM) {
+			guehdr = gue_gro_remcsum(skb, off, guehdr,
+						 data + doffset, hdrlen,
+						 guehdr->proto_ctype);
+			if (!guehdr)
+				goto out;
+
+			data = &guehdr[1];
+
+			doffset += GUE_PLEN_REMCSUM;
+		}
+	}
+
+	skb_gro_pull(skb, hdrlen);
+
 	flush = 0;
 
 	for (p = *head; p; p = p->next) {
@@ -197,7 +377,7 @@
 		guehdr2 = (struct guehdr *)(p->data + off);
 
 		/* Compare base GUE header to be equal (covers
-		 * hlen, version, next_hdr, and flags.
+		 * hlen, version, proto_ctype, and flags.
 		 */
 		if (guehdr->word != guehdr2->word) {
 			NAPI_GRO_CB(p)->same_flow = 0;
@@ -212,10 +392,11 @@
 		}
 	}
 
-	skb_gro_pull(skb, guehlen);
-
-	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
-	skb_gro_postpull_rcsum(skb, guehdr, guehlen);
+	rcu_read_lock();
+	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+	ops = rcu_dereference(offloads[guehdr->proto_ctype]);
+	if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+		goto out_unlock;
 
 	pp = ops->callbacks.gro_receive(head, skb);
 
@@ -236,7 +417,7 @@
 	u8 proto;
 	int err = -ENOENT;
 
-	proto = guehdr->next_hdr;
+	proto = guehdr->proto_ctype;
 
 	guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
 
@@ -487,6 +668,125 @@
 	},
 };
 
+static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
+			  struct flowi4 *fl4, u8 *protocol, __be16 sport)
+{
+	struct udphdr *uh;
+
+	skb_push(skb, sizeof(struct udphdr));
+	skb_reset_transport_header(skb);
+
+	uh = udp_hdr(skb);
+
+	uh->dest = e->dport;
+	uh->source = sport;
+	uh->len = htons(skb->len);
+	uh->check = 0;
+	udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
+		     fl4->saddr, fl4->daddr, skb->len);
+
+	*protocol = IPPROTO_UDP;
+}
+
+int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+		     u8 *protocol, struct flowi4 *fl4)
+{
+	bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
+	int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+	__be16 sport;
+
+	skb = iptunnel_handle_offloads(skb, csum, type);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
+					       skb, 0, 0, false);
+	fou_build_udp(skb, e, fl4, protocol, sport);
+
+	return 0;
+}
+EXPORT_SYMBOL(fou_build_header);
+
+int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+		     u8 *protocol, struct flowi4 *fl4)
+{
+	bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
+	int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+	struct guehdr *guehdr;
+	size_t hdrlen, optlen = 0;
+	__be16 sport;
+	void *data;
+	bool need_priv = false;
+
+	if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
+	    skb->ip_summed == CHECKSUM_PARTIAL) {
+		csum = false;
+		optlen += GUE_PLEN_REMCSUM;
+		type |= SKB_GSO_TUNNEL_REMCSUM;
+		need_priv = true;
+	}
+
+	optlen += need_priv ? GUE_LEN_PRIV : 0;
+
+	skb = iptunnel_handle_offloads(skb, csum, type);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	/* Get source port (based on flow hash) before skb_push */
+	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
+					       skb, 0, 0, false);
+
+	hdrlen = sizeof(struct guehdr) + optlen;
+
+	skb_push(skb, hdrlen);
+
+	guehdr = (struct guehdr *)skb->data;
+
+	guehdr->control = 0;
+	guehdr->version = 0;
+	guehdr->hlen = optlen >> 2;
+	guehdr->flags = 0;
+	guehdr->proto_ctype = *protocol;
+
+	data = &guehdr[1];
+
+	if (need_priv) {
+		__be32 *flags = data;
+
+		guehdr->flags |= GUE_FLAG_PRIV;
+		*flags = 0;
+		data += GUE_LEN_PRIV;
+
+		if (type & SKB_GSO_TUNNEL_REMCSUM) {
+			u16 csum_start = skb_checksum_start_offset(skb);
+			__be16 *pd = data;
+
+			if (csum_start < hdrlen)
+				return -EINVAL;
+
+			csum_start -= hdrlen;
+			pd[0] = htons(csum_start);
+			pd[1] = htons(csum_start + skb->csum_offset);
+
+			if (!skb_is_gso(skb)) {
+				skb->ip_summed = CHECKSUM_NONE;
+				skb->encapsulation = 0;
+			}
+
+			*flags |= GUE_PFLAG_REMCSUM;
+			data += GUE_PLEN_REMCSUM;
+		}
+
+	}
+
+	fou_build_udp(skb, e, fl4, protocol, sport);
+
+	return 0;
+}
+EXPORT_SYMBOL(gue_build_header);
+
 static int __init fou_init(void)
 {
 	int ret;
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index dedb21e..31802af 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -104,7 +104,7 @@
 	memcpy(geneveh->options, options, options_len);
 }
 
-/* Transmit a fully formated Geneve frame.
+/* Transmit a fully formatted Geneve frame.
  *
  * When calling this function. The skb->data should point
  * to the geneve header which is fully formed.
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index fb70e3e..666cf36 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -112,17 +112,17 @@
 #ifdef CONFIG_IP_MULTICAST
 /* Parameter names and values are taken from igmp-v2-06 draft */
 
-#define IGMP_V1_Router_Present_Timeout		(400*HZ)
-#define IGMP_V2_Router_Present_Timeout		(400*HZ)
-#define IGMP_V2_Unsolicited_Report_Interval	(10*HZ)
-#define IGMP_V3_Unsolicited_Report_Interval	(1*HZ)
-#define IGMP_Query_Response_Interval		(10*HZ)
-#define IGMP_Query_Robustness_Variable		2
+#define IGMP_V1_ROUTER_PRESENT_TIMEOUT		(400*HZ)
+#define IGMP_V2_ROUTER_PRESENT_TIMEOUT		(400*HZ)
+#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL	(10*HZ)
+#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL	(1*HZ)
+#define IGMP_QUERY_RESPONSE_INTERVAL		(10*HZ)
+#define IGMP_QUERY_ROBUSTNESS_VARIABLE		2
 
 
-#define IGMP_Initial_Report_Delay		(1)
+#define IGMP_INITIAL_REPORT_DELAY		(1)
 
-/* IGMP_Initial_Report_Delay is not from IGMP specs!
+/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs!
  * IGMP specs require to report membership immediately after
  * joining a group, but we delay the first report by a
  * small interval. It seems more natural and still does not
@@ -318,9 +318,7 @@
 	return scount;
 }
 
-#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
-
-static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
 {
 	struct sk_buff *skb;
 	struct rtable *rt;
@@ -330,6 +328,7 @@
 	struct flowi4 fl4;
 	int hlen = LL_RESERVED_SPACE(dev);
 	int tlen = dev->needed_tailroom;
+	unsigned int size = mtu;
 
 	while (1) {
 		skb = alloc_skb(size + hlen + tlen,
@@ -341,7 +340,6 @@
 			return NULL;
 	}
 	skb->priority = TC_PRIO_CONTROL;
-	igmp_skb_size(skb) = size;
 
 	rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
 				   0, 0,
@@ -354,6 +352,8 @@
 	skb_dst_set(skb, &rt->dst);
 	skb->dev = dev;
 
+	skb->reserved_tailroom = skb_end_offset(skb) -
+				 min(mtu, skb_end_offset(skb));
 	skb_reserve(skb, hlen);
 
 	skb_reset_network_header(skb);
@@ -423,8 +423,7 @@
 	return skb;
 }
 
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
-	skb_tailroom(skb)) : 0)
+#define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
 
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
 	int type, int gdeleted, int sdeleted)
@@ -879,15 +878,15 @@
 		if (ih->code == 0) {
 			/* Alas, old v1 router presents here. */
 
-			max_delay = IGMP_Query_Response_Interval;
+			max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
 			in_dev->mr_v1_seen = jiffies +
-				IGMP_V1_Router_Present_Timeout;
+				IGMP_V1_ROUTER_PRESENT_TIMEOUT;
 			group = 0;
 		} else {
 			/* v2 router present */
 			max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
 			in_dev->mr_v2_seen = jiffies +
-				IGMP_V2_Router_Present_Timeout;
+				IGMP_V2_ROUTER_PRESENT_TIMEOUT;
 		}
 		/* cancel the interface change timer */
 		in_dev->mr_ifc_count = 0;
@@ -899,7 +898,7 @@
 		return true;	/* ignore bogus packet; freed by caller */
 	} else if (IGMP_V1_SEEN(in_dev)) {
 		/* This is a v3 query with v1 queriers present */
-		max_delay = IGMP_Query_Response_Interval;
+		max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
 		group = 0;
 	} else if (IGMP_V2_SEEN(in_dev)) {
 		/* this is a v3 query with v2 queriers present;
@@ -1218,7 +1217,7 @@
 		return;
 	if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
 		spin_lock_bh(&im->lock);
-		igmp_start_timer(im, IGMP_Initial_Report_Delay);
+		igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
 		spin_unlock_bh(&im->lock);
 		return;
 	}
@@ -1541,7 +1540,7 @@
 int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
 int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
 #ifdef CONFIG_IP_MULTICAST
-int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable;
+int sysctl_igmp_qrv __read_mostly = IGMP_QUERY_ROBUSTNESS_VARIABLE;
 #endif
 
 static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
@@ -2687,11 +2686,7 @@
 	struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
 
 	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq,
-			   "%3s %6s "
-			   "%10s %10s %6s %6s\n", "Idx",
-			   "Device", "MCA",
-			   "SRC", "INC", "EXC");
+		seq_puts(seq, "Idx Device        MCA        SRC    INC    EXC\n");
 	} else {
 		seq_printf(seq,
 			   "%3d %6.6s 0x%08x "
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 2811cc1..4d964da 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -80,7 +80,7 @@
 	struct inet_peer *peer;
 };
 
-static inline u8 ip4_frag_ecn(u8 tos)
+static u8 ip4_frag_ecn(u8 tos)
 {
 	return 1 << (tos & INET_ECN_MASK);
 }
@@ -148,7 +148,7 @@
 		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
 }
 
-static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
+static void ip4_frag_free(struct inet_frag_queue *q)
 {
 	struct ipq *qp;
 
@@ -160,7 +160,7 @@
 
 /* Destruction primitives. */
 
-static __inline__ void ipq_put(struct ipq *ipq)
+static void ipq_put(struct ipq *ipq)
 {
 	inet_frag_put(&ipq->q, &ip4_frags);
 }
@@ -236,7 +236,7 @@
 /* Find the correct entry in the "incomplete datagrams" queue for
  * this IP datagram, and create new one, if nothing is found.
  */
-static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
 {
 	struct inet_frag_queue *q;
 	struct ip4_create_arg arg;
@@ -256,7 +256,7 @@
 }
 
 /* Is the fragment too far ahead to be part of ipq? */
-static inline int ip_frag_too_far(struct ipq *qp)
+static int ip_frag_too_far(struct ipq *qp)
 {
 	struct inet_peer *peer = qp->peer;
 	unsigned int max = sysctl_ipfrag_max_dist;
@@ -795,16 +795,16 @@
 	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
 }
 #else
-static inline int ip4_frags_ns_ctl_register(struct net *net)
+static int ip4_frags_ns_ctl_register(struct net *net)
 {
 	return 0;
 }
 
-static inline void ip4_frags_ns_ctl_unregister(struct net *net)
+static void ip4_frags_ns_ctl_unregister(struct net *net)
 {
 }
 
-static inline void __init ip4_frags_ctl_register(void)
+static void __init ip4_frags_ctl_register(void)
 {
 }
 #endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 12055fd..ac84912 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -789,7 +789,7 @@
 	    nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT,
 			t->encap.dport) ||
 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
-			t->encap.dport))
+			t->encap.flags))
 		goto nla_put_failure;
 
 	return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index bc6471d..4a929ad 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -662,12 +662,10 @@
 		if (len < left)	{
 			len &= ~7;
 		}
-		/*
-		 *	Allocate buffer.
-		 */
 
-		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
-			NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
+		/* Allocate buffer */
+		skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC);
+		if (!skb2) {
 			err = -ENOMEM;
 			goto fail;
 		}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c373a9a..21894df 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -424,7 +424,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 		copied = len;
 	}
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto out_free_skb;
 
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 0bb8e14..c3587e1 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -56,7 +56,10 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/udp.h>
-#include <net/gue.h>
+
+#if IS_ENABLED(CONFIG_NET_FOU)
+#include <net/fou.h>
+#endif
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -494,10 +497,12 @@
 	switch (e->type) {
 	case TUNNEL_ENCAP_NONE:
 		return 0;
+#if IS_ENABLED(CONFIG_NET_FOU)
 	case TUNNEL_ENCAP_FOU:
-		return sizeof(struct udphdr);
+		return fou_encap_hlen(e);
 	case TUNNEL_ENCAP_GUE:
-		return sizeof(struct udphdr) + sizeof(struct guehdr);
+		return gue_encap_hlen(e);
+#endif
 	default:
 		return -EINVAL;
 	}
@@ -526,60 +531,18 @@
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
 
-static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
-			    size_t hdr_len, u8 *protocol, struct flowi4 *fl4)
-{
-	struct udphdr *uh;
-	__be16 sport;
-	bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
-	int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-
-	skb = iptunnel_handle_offloads(skb, csum, type);
-
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
-
-	/* Get length and hash before making space in skb */
-
-	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
-					       skb, 0, 0, false);
-
-	skb_push(skb, hdr_len);
-
-	skb_reset_transport_header(skb);
-	uh = udp_hdr(skb);
-
-	if (e->type == TUNNEL_ENCAP_GUE) {
-		struct guehdr *guehdr = (struct guehdr *)&uh[1];
-
-		guehdr->version = 0;
-		guehdr->hlen = 0;
-		guehdr->flags = 0;
-		guehdr->next_hdr = *protocol;
-	}
-
-	uh->dest = e->dport;
-	uh->source = sport;
-	uh->len = htons(skb->len);
-	uh->check = 0;
-	udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
-		     fl4->saddr, fl4->daddr, skb->len);
-
-	*protocol = IPPROTO_UDP;
-
-	return 0;
-}
-
 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
 		    u8 *protocol, struct flowi4 *fl4)
 {
 	switch (t->encap.type) {
 	case TUNNEL_ENCAP_NONE:
 		return 0;
+#if IS_ENABLED(CONFIG_NET_FOU)
 	case TUNNEL_ENCAP_FOU:
+		return fou_build_header(skb, &t->encap, protocol, fl4);
 	case TUNNEL_ENCAP_GUE:
-		return fou_build_header(skb, &t->encap, t->encap_hlen,
-					protocol, fl4);
+		return gue_build_header(skb, &t->encap, protocol, fl4);
+#endif
 	default:
 		return -EINVAL;
 	}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 648fa14..7fa18bc 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -115,7 +115,7 @@
  */
 int ic_set_manually __initdata = 0;		/* IPconfig parameters set manually */
 
-static int ic_enable __initdata = 0;		/* IP config enabled? */
+static int ic_enable __initdata;		/* IP config enabled? */
 
 /* Protocol choice */
 int ic_proto_enabled __initdata = 0
@@ -130,7 +130,7 @@
 #endif
 			;
 
-static int ic_host_name_set __initdata = 0;	/* Host name set by us? */
+static int ic_host_name_set __initdata;	/* Host name set by us? */
 
 __be32 ic_myaddr = NONE;		/* My IP address */
 static __be32 ic_netmask = NONE;	/* Netmask for local subnet */
@@ -160,17 +160,17 @@
 static char user_dev_name[IFNAMSIZ] __initdata = { 0, };
 
 /* Protocols supported by available interfaces */
-static int ic_proto_have_if __initdata = 0;
+static int ic_proto_have_if __initdata;
 
 /* MTU for boot device */
-static int ic_dev_mtu __initdata = 0;
+static int ic_dev_mtu __initdata;
 
 #ifdef IPCONFIG_DYNAMIC
 static DEFINE_SPINLOCK(ic_recv_lock);
-static volatile int ic_got_reply __initdata = 0;    /* Proto(s) that replied */
+static volatile int ic_got_reply __initdata;    /* Proto(s) that replied */
 #endif
 #ifdef IPCONFIG_DHCP
-static int ic_dhcp_msgtype __initdata = 0;	/* DHCP msg type received */
+static int ic_dhcp_msgtype __initdata;	/* DHCP msg type received */
 #endif
 
 
@@ -186,8 +186,8 @@
 	__be32 xid;
 };
 
-static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
-static struct net_device *ic_dev __initdata = NULL;	/* Selected device */
+static struct ic_device *ic_first_dev __initdata;	/* List of open device */
+static struct net_device *ic_dev __initdata;		/* Selected device */
 
 static bool __init ic_is_init_dev(struct net_device *dev)
 {
@@ -498,7 +498,7 @@
 	struct arphdr *rarp;
 	unsigned char *rarp_ptr;
 	__be32 sip, tip;
-	unsigned char *sha, *tha;		/* s for "source", t for "target" */
+	unsigned char *tha;		/* t for "target" */
 	struct ic_device *d;
 
 	if (!net_eq(dev_net(dev), &init_net))
@@ -549,7 +549,6 @@
 		goto drop_unlock;	/* should never happen */
 
 	/* Extract variable-width fields */
-	sha = rarp_ptr;
 	rarp_ptr += dev->addr_len;
 	memcpy(&sip, rarp_ptr, 4);
 	rarp_ptr += 4;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 37096d6..40403114 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -465,7 +465,7 @@
 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
 			tunnel->encap.dport) ||
 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
-			tunnel->encap.dport))
+			tunnel->encap.flags))
 		goto nla_put_failure;
 
 	return 0;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 57f7c98..736236c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -875,7 +875,7 @@
 	}
 
 	/* Don't bother checking the checksum */
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto done;
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8e3eb39..f0d4eb8 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -296,12 +296,12 @@
 	int j;
 
 	if (count) {
-		seq_printf(seq, "\nIcmpMsg:");
+		seq_puts(seq, "\nIcmpMsg:");
 		for (j = 0; j < count; ++j)
 			seq_printf(seq, " %sType%u",
 				type[j] & 0x100 ? "Out" : "In",
 				type[j] & 0xff);
-		seq_printf(seq, "\nIcmpMsg:");
+		seq_puts(seq, "\nIcmpMsg:");
 		for (j = 0; j < count; ++j)
 			seq_printf(seq, " %lu", vals[j]);
 	}
@@ -342,7 +342,7 @@
 	seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
 	for (i = 0; icmpmibmap[i].name != NULL; i++)
 		seq_printf(seq, " In%s", icmpmibmap[i].name);
-	seq_printf(seq, " OutMsgs OutErrors");
+	seq_puts(seq, " OutMsgs OutErrors");
 	for (i = 0; icmpmibmap[i].name != NULL; i++)
 		seq_printf(seq, " Out%s", icmpmibmap[i].name);
 	seq_printf(seq, "\nIcmp: %lu %lu %lu",
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 739db31..ee8fa4b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -718,7 +718,7 @@
 		copied = len;
 	}
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto done;
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 32b98d0..45fe60c 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -19,10 +19,6 @@
 #include <net/tcp.h>
 #include <net/route.h>
 
-/* Timestamps: lowest bits store TCP options */
-#define TSBITS 6
-#define TSMASK (((__u32)1 << TSBITS) - 1)
-
 extern int sysctl_tcp_syncookies;
 
 static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
@@ -30,6 +26,30 @@
 #define COOKIEBITS 24	/* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
+/* TCP Timestamp: 6 lowest bits of timestamp sent in the cookie SYN-ACK
+ * stores TCP options:
+ *
+ * MSB                               LSB
+ * | 31 ...   6 |  5  |  4   | 3 2 1 0 |
+ * |  Timestamp | ECN | SACK | WScale  |
+ *
+ * When we receive a valid cookie-ACK, we look at the echoed tsval (if
+ * any) to figure out which TCP options we should use for the rebuilt
+ * connection.
+ *
+ * A WScale setting of '0xf' (which is an invalid scaling value)
+ * means that original syn did not include the TCP window scaling option.
+ */
+#define TS_OPT_WSCALE_MASK	0xf
+#define TS_OPT_SACK		BIT(4)
+#define TS_OPT_ECN		BIT(5)
+/* There is no TS_OPT_TIMESTAMP:
+ * if ACK contains timestamp option, we already know it was
+ * requested/supported by the syn/synack exchange.
+ */
+#define TSBITS	6
+#define TSMASK	(((__u32)1 << TSBITS) - 1)
+
 static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
 		      ipv4_cookie_scratch);
 
@@ -67,9 +87,11 @@
 
 	ireq = inet_rsk(req);
 
-	options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
-	options |= ireq->sack_ok << 4;
-	options |= ireq->ecn_ok << 5;
+	options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+	if (ireq->sack_ok)
+		options |= TS_OPT_SACK;
+	if (ireq->ecn_ok)
+		options |= TS_OPT_ECN;
 
 	ts = ts_now & ~TSMASK;
 	ts |= options;
@@ -219,16 +241,13 @@
  * additional tcp options in the timestamp.
  * This extracts these options from the timestamp echo.
  *
- * The lowest 4 bits store snd_wscale.
- * next 2 bits indicate SACK and ECN support.
- *
- * return false if we decode an option that should not be.
+ * return false if we decode a tcp option that is disabled
+ * on the host.
  */
-bool cookie_check_timestamp(struct tcp_options_received *tcp_opt,
-			struct net *net, bool *ecn_ok)
+bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt)
 {
 	/* echoed timestamp, lowest bits contain options */
-	u32 options = tcp_opt->rcv_tsecr & TSMASK;
+	u32 options = tcp_opt->rcv_tsecr;
 
 	if (!tcp_opt->saw_tstamp)  {
 		tcp_clear_options(tcp_opt);
@@ -238,22 +257,35 @@
 	if (!sysctl_tcp_timestamps)
 		return false;
 
-	tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
-	*ecn_ok = (options >> 5) & 1;
-	if (*ecn_ok && !net->ipv4.sysctl_tcp_ecn)
-		return false;
+	tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
 
 	if (tcp_opt->sack_ok && !sysctl_tcp_sack)
 		return false;
 
-	if ((options & 0xf) == 0xf)
+	if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
 		return true; /* no window scaling */
 
 	tcp_opt->wscale_ok = 1;
-	tcp_opt->snd_wscale = options & 0xf;
+	tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
+
 	return sysctl_tcp_window_scaling != 0;
 }
-EXPORT_SYMBOL(cookie_check_timestamp);
+EXPORT_SYMBOL(cookie_timestamp_decode);
+
+bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
+		   const struct net *net, const struct dst_entry *dst)
+{
+	bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN;
+
+	if (!ecn_ok)
+		return false;
+
+	if (net->ipv4.sysctl_tcp_ecn)
+		return true;
+
+	return dst_feature(dst, RTAX_FEATURE_ECN);
+}
+EXPORT_SYMBOL(cookie_ecn_ok);
 
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
 {
@@ -269,14 +301,16 @@
 	int mss;
 	struct rtable *rt;
 	__u8 rcv_wscale;
-	bool ecn_ok = false;
 	struct flowi4 fl4;
 
 	if (!sysctl_tcp_syncookies || !th->ack || th->rst)
 		goto out;
 
-	if (tcp_synq_no_recent_overflow(sk) ||
-	    (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) {
+	if (tcp_synq_no_recent_overflow(sk))
+		goto out;
+
+	mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
+	if (mss == 0) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
 		goto out;
 	}
@@ -287,7 +321,7 @@
 	memset(&tcp_opt, 0, sizeof(tcp_opt));
 	tcp_parse_options(skb, &tcp_opt, 0, NULL);
 
-	if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
+	if (!cookie_timestamp_decode(&tcp_opt))
 		goto out;
 
 	ret = NULL;
@@ -305,7 +339,6 @@
 	ireq->ir_loc_addr	= ip_hdr(skb)->daddr;
 	ireq->ir_rmt_addr	= ip_hdr(skb)->saddr;
 	ireq->ir_mark		= inet_request_mark(sk, skb);
-	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
@@ -354,6 +387,7 @@
 				  dst_metric(&rt->dst, RTAX_INITRWND));
 
 	ireq->rcv_wscale  = rcv_wscale;
+	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
 
 	ret = get_cookie_sock(sk, skb, req, &rt->dst);
 	/* ip_queue_xmit() depends on our flow being setup
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b3c53c8..e0ee384 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -496,6 +496,13 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
+		.procname	= "tcp_max_reordering",
+		.data		= &sysctl_tcp_max_reordering,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
 		.procname	= "tcp_dsack",
 		.data		= &sysctl_tcp_dsack,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 39ec0c3..c239f47 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1377,7 +1377,7 @@
 	/* XXX -- need to support SO_PEEK_OFF */
 
 	skb_queue_walk(&sk->sk_write_queue, skb) {
-		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
+		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
 		if (err)
 			break;
 
@@ -1833,8 +1833,7 @@
 		}
 
 		if (!(flags & MSG_TRUNC)) {
-			err = skb_copy_datagram_iovec(skb, offset,
-						      msg->msg_iov, used);
+			err = skb_copy_datagram_msg(skb, offset, msg, used);
 			if (err) {
 				/* Exception. Bailout! */
 				if (!copied)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index b1c5970..27ead0d 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -1,5 +1,5 @@
 /*
- * Plugable TCP congestion control support and newReno
+ * Pluggable TCP congestion control support and newReno
  * congestion control.
  * Based on ideas from I/O scheduler support and Web100.
  *
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 88fa2d1..5f979c7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -81,6 +81,7 @@
 int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly = 1;
 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
+int sysctl_tcp_max_reordering __read_mostly = 300;
 EXPORT_SYMBOL(sysctl_tcp_reordering);
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
@@ -833,7 +834,7 @@
 	if (metric > tp->reordering) {
 		int mib_idx;
 
-		tp->reordering = min(TCP_MAX_REORDERING, metric);
+		tp->reordering = min(sysctl_tcp_max_reordering, metric);
 
 		/* This exciting event is worth to be remembered. 8) */
 		if (ts)
@@ -5030,7 +5031,7 @@
 	/* step 3: check security and precedence [ignored] */
 
 	/* step 4: Check for a SYN
-	 * RFC 5691 4.2 : Send a challenge ack
+	 * RFC 5961 4.2 : Send a challenge ack
 	 */
 	if (th->syn) {
 syn_challenge:
@@ -5867,7 +5868,7 @@
  * If we receive a SYN packet with these bits set, it means a
  * network is playing bad games with TOS bits. In order to
  * avoid possible false congestion notifications, we disable
- * TCP ECN negociation.
+ * TCP ECN negotiation.
  *
  * Exception: tcp_ca wants ECN. This is required for DCTCP
  * congestion control; it requires setting ECT on all packets,
@@ -5877,20 +5878,22 @@
  */
 static void tcp_ecn_create_request(struct request_sock *req,
 				   const struct sk_buff *skb,
-				   const struct sock *listen_sk)
+				   const struct sock *listen_sk,
+				   const struct dst_entry *dst)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	const struct net *net = sock_net(listen_sk);
 	bool th_ecn = th->ece && th->cwr;
-	bool ect, need_ecn;
+	bool ect, need_ecn, ecn_ok;
 
 	if (!th_ecn)
 		return;
 
 	ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
 	need_ecn = tcp_ca_needs_ecn(listen_sk);
+	ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
 
-	if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn)
+	if (!ect && !need_ecn && ecn_ok)
 		inet_rsk(req)->ecn_ok = 1;
 	else if (ect && need_ecn)
 		inet_rsk(req)->ecn_ok = 1;
@@ -5955,13 +5958,7 @@
 	if (security_inet_conn_request(sk, skb, req))
 		goto drop_and_free;
 
-	if (!want_cookie || tmp_opt.tstamp_ok)
-		tcp_ecn_create_request(req, skb, sk);
-
-	if (want_cookie) {
-		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
-		req->cookie_ts = tmp_opt.tstamp_ok;
-	} else if (!isn) {
+	if (!want_cookie && !isn) {
 		/* VJ's idea. We save last timestamp seen
 		 * from the destination in peer table, when entering
 		 * state TIME-WAIT, and check against it before
@@ -6009,6 +6006,15 @@
 			goto drop_and_free;
 	}
 
+	tcp_ecn_create_request(req, skb, sk, dst);
+
+	if (want_cookie) {
+		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+		req->cookie_ts = tmp_opt.tstamp_ok;
+		if (!tmp_opt.tstamp_ok)
+			inet_rsk(req)->ecn_ok = 0;
+	}
+
 	tcp_rsk(req)->snt_isn = isn;
 	tcp_openreq_init_rwin(req, sk, dst);
 	fastopen = !want_cookie &&
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 5b90f2f..9d7930b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -94,9 +94,9 @@
 			       SKB_GSO_GRE_CSUM |
 			       SKB_GSO_IPIP |
 			       SKB_GSO_SIT |
-			       SKB_GSO_MPLS |
 			       SKB_GSO_UDP_TUNNEL |
 			       SKB_GSO_UDP_TUNNEL_CSUM |
+			       SKB_GSO_TUNNEL_REMCSUM |
 			       0) ||
 			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
 			goto out;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a3d453b..0b88158 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -333,10 +333,19 @@
 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
+		       tcp_ca_needs_ecn(sk);
+
+	if (!use_ecn) {
+		const struct dst_entry *dst = __sk_dst_get(sk);
+
+		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
+			use_ecn = true;
+	}
 
 	tp->ecn_flags = 0;
-	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
-	    tcp_ca_needs_ecn(sk)) {
+
+	if (use_ecn) {
 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 		tp->ecn_flags = TCP_ECN_OK;
 		if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cd0db54..df19027 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1281,8 +1281,8 @@
 	}
 
 	if (skb_csum_unnecessary(skb))
-		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-					      msg->msg_iov, copied);
+		err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
+					    msg, copied);
 	else {
 		err = skb_copy_and_csum_datagram_iovec(skb,
 						       sizeof(struct udphdr),
@@ -1777,14 +1777,13 @@
 		if (ret > 0)
 			return -ret;
 		return 0;
-	} else {
-		if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
-			return __udp4_lib_mcast_deliver(net, skb, uh,
-					saddr, daddr, udptable);
-
-		sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 	}
 
+	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+		return __udp4_lib_mcast_deliver(net, skb, uh,
+				saddr, daddr, udptable);
+
+	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 	if (sk != NULL) {
 		int ret;
 
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6480cea..d3e537e 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -29,7 +29,7 @@
 	netdev_features_t features,
 	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
 					     netdev_features_t features),
-	__be16 new_protocol)
+	__be16 new_protocol, bool is_ipv6)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	u16 mac_offset = skb->mac_header;
@@ -39,7 +39,10 @@
 	netdev_features_t enc_features;
 	int udp_offset, outer_hlen;
 	unsigned int oldlen;
-	bool need_csum;
+	bool need_csum = !!(skb_shinfo(skb)->gso_type &
+			    SKB_GSO_UDP_TUNNEL_CSUM);
+	bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
+	bool offload_csum = false, dont_encap = (need_csum || remcsum);
 
 	oldlen = (u16)~skb->len;
 
@@ -52,10 +55,13 @@
 	skb_set_network_header(skb, skb_inner_network_offset(skb));
 	skb->mac_len = skb_inner_network_offset(skb);
 	skb->protocol = new_protocol;
+	skb->encap_hdr_csum = need_csum;
+	skb->remcsum_offload = remcsum;
 
-	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
-	if (need_csum)
-		skb->encap_hdr_csum = 1;
+	/* Try to offload checksum if possible */
+	offload_csum = !!(need_csum &&
+			  (skb->dev->features &
+			   (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
 
 	/* segment inner packet. */
 	enc_features = skb->dev->hw_enc_features & features;
@@ -72,11 +78,21 @@
 	do {
 		struct udphdr *uh;
 		int len;
+		__be32 delta;
 
-		skb_reset_inner_headers(skb);
-		skb->encapsulation = 1;
+		if (dont_encap) {
+			skb->encapsulation = 0;
+			skb->ip_summed = CHECKSUM_NONE;
+		} else {
+			/* Only set up inner headers if we might be offloading
+			 * inner checksum.
+			 */
+			skb_reset_inner_headers(skb);
+			skb->encapsulation = 1;
+		}
 
 		skb->mac_len = mac_len;
+		skb->protocol = protocol;
 
 		skb_push(skb, outer_hlen);
 		skb_reset_mac_header(skb);
@@ -86,19 +102,36 @@
 		uh = udp_hdr(skb);
 		uh->len = htons(len);
 
-		if (need_csum) {
-			__be32 delta = htonl(oldlen + len);
+		if (!need_csum)
+			continue;
 
-			uh->check = ~csum_fold((__force __wsum)
-					       ((__force u32)uh->check +
-						(__force u32)delta));
+		delta = htonl(oldlen + len);
+
+		uh->check = ~csum_fold((__force __wsum)
+				       ((__force u32)uh->check +
+					(__force u32)delta));
+		if (offload_csum) {
+			skb->ip_summed = CHECKSUM_PARTIAL;
+			skb->csum_start = skb_transport_header(skb) - skb->head;
+			skb->csum_offset = offsetof(struct udphdr, check);
+		} else if (remcsum) {
+			/* Need to calculate checksum from scratch,
+			 * inner checksums are never when doing
+			 * remote_checksum_offload.
+			 */
+
+			skb->csum = skb_checksum(skb, udp_offset,
+						 skb->len - udp_offset,
+						 0);
+			uh->check = csum_fold(skb->csum);
+			if (uh->check == 0)
+				uh->check = CSUM_MANGLED_0;
+		} else {
 			uh->check = gso_make_checksum(skb, ~uh->check);
 
 			if (uh->check == 0)
 				uh->check = CSUM_MANGLED_0;
 		}
-
-		skb->protocol = protocol;
 	} while ((skb = skb->next));
 out:
 	return segs;
@@ -134,7 +167,7 @@
 	}
 
 	segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
-					protocol);
+					protocol, is_ipv6);
 
 out_unlock:
 	rcu_read_unlock();
@@ -172,9 +205,9 @@
 		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
 				      SKB_GSO_UDP_TUNNEL |
 				      SKB_GSO_UDP_TUNNEL_CSUM |
+				      SKB_GSO_TUNNEL_REMCSUM |
 				      SKB_GSO_IPIP |
-				      SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
-				      SKB_GSO_MPLS) ||
+				      SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
 			     !(type & (SKB_GSO_UDP))))
 			goto out;
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0169ccf..06e8978 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1170,6 +1170,9 @@
 	IPV6_SADDR_RULE_PRIVACY,
 	IPV6_SADDR_RULE_ORCHID,
 	IPV6_SADDR_RULE_PREFIX,
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+	IPV6_SADDR_RULE_NOT_OPTIMISTIC,
+#endif
 	IPV6_SADDR_RULE_MAX
 };
 
@@ -1197,6 +1200,15 @@
 	return 0;
 }
 
+static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+{
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+	return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+#else
+	return false;
+#endif
+}
+
 static int ipv6_get_saddr_eval(struct net *net,
 			       struct ipv6_saddr_score *score,
 			       struct ipv6_saddr_dst *dst,
@@ -1257,10 +1269,16 @@
 		score->scopedist = ret;
 		break;
 	case IPV6_SADDR_RULE_PREFERRED:
+	    {
 		/* Rule 3: Avoid deprecated and optimistic addresses */
+		u8 avoid = IFA_F_DEPRECATED;
+
+		if (!ipv6_use_optimistic_addr(score->ifa->idev))
+			avoid |= IFA_F_OPTIMISTIC;
 		ret = ipv6_saddr_preferred(score->addr_type) ||
-		      !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC));
+		      !(score->ifa->flags & avoid);
 		break;
+	    }
 #ifdef CONFIG_IPV6_MIP6
 	case IPV6_SADDR_RULE_HOA:
 	    {
@@ -1306,6 +1324,14 @@
 			ret = score->ifa->prefix_len;
 		score->matchlen = ret;
 		break;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+	case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
+		/* Optimistic addresses still have lower precedence than other
+		 * preferred addresses.
+		 */
+		ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
+		break;
+#endif
 	default:
 		ret = 0;
 	}
@@ -2315,8 +2341,8 @@
 			else
 				stored_lft = 0;
 			if (!update_lft && !create && stored_lft) {
-				const u32 minimum_lft = min(
-					stored_lft, (u32)MIN_VALID_LIFETIME);
+				const u32 minimum_lft = min_t(u32,
+					stored_lft, MIN_VALID_LIFETIME);
 				valid_lft = max(valid_lft, minimum_lft);
 
 				/* RFC4862 Section 5.5.3e:
@@ -3222,8 +3248,15 @@
 	 * Optimistic nodes can start receiving
 	 * Frames right away
 	 */
-	if (ifp->flags & IFA_F_OPTIMISTIC)
+	if (ifp->flags & IFA_F_OPTIMISTIC) {
 		ip6_ins_rt(ifp->rt);
+		if (ipv6_use_optimistic_addr(idev)) {
+			/* Because optimistic nodes can use this address,
+			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
+			 */
+			ipv6_ifa_notify(RTM_NEWADDR, ifp);
+		}
+	}
 
 	addrconf_dad_kick(ifp);
 out:
@@ -4330,6 +4363,7 @@
 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
+	array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
 #endif
 #ifdef CONFIG_IPV6_MROUTE
 	array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
@@ -5156,6 +5190,14 @@
 			.proc_handler   = proc_dointvec,
 
 		},
+		{
+			.procname       = "use_optimistic",
+			.data           = &ipv6_devconf.use_optimistic,
+			.maxlen         = sizeof(int),
+			.mode           = 0644,
+			.proc_handler   = proc_dointvec,
+
+		},
 #endif
 #ifdef CONFIG_IPV6_MROUTE
 		{
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2cdc383..5c6996e 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -351,7 +351,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 		copied = len;
 	}
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto out_free_skb;
 
@@ -445,7 +445,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 		copied = len;
 	}
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto out_free_skb;
 
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 83fc3a3..d21d7b2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -544,12 +544,12 @@
 		BUG_ON(!aalg_desc);
 
 		err = -EINVAL;
-		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
+		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
 		    crypto_aead_authsize(aead)) {
-			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
-				 x->aalg->alg_name,
-				 crypto_aead_authsize(aead),
-				 aalg_desc->uinfo.auth.icv_fullbits/8);
+			pr_info("ESP: %s digestsize %u != %hu\n",
+				x->aalg->alg_name,
+				crypto_aead_authsize(aead),
+				aalg_desc->uinfo.auth.icv_fullbits / 8);
 			goto free_key;
 		}
 
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index bfde361..601d896 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -47,7 +47,7 @@
 #include <net/xfrm.h>
 #endif
 
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 /*
  *	Parsing tlv encoded headers.
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 97ae700..62c1037 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -1009,4 +1009,3 @@
 	return table;
 }
 #endif
-
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 3dd7d4e..7221021 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -769,10 +769,9 @@
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
-	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
-			   "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
-	else {
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
+	} else {
 		struct ip6_flowlabel *fl = v;
 		seq_printf(seq,
 			   "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4564e1f..f6e2533 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -902,7 +902,7 @@
 	struct net_device_stats *stats = &t->dev->stats;
 	int ret;
 
-	if (!ip6_tnl_xmit_ctl(t))
+	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
 		goto tx_err;
 
 	switch (skb->protocol) {
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index a071563..fd76ce9 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -78,7 +78,7 @@
 		       SKB_GSO_SIT |
 		       SKB_GSO_UDP_TUNNEL |
 		       SKB_GSO_UDP_TUNNEL_CSUM |
-		       SKB_GSO_MPLS |
+		       SKB_GSO_TUNNEL_REMCSUM |
 		       SKB_GSO_TCPV6 |
 		       0)))
 		goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8e950c2..916d2a1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -747,13 +747,11 @@
 		if (len < left)	{
 			len &= ~7;
 		}
-		/*
-		 *	Allocate buffer.
-		 */
 
-		if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
-				      hroom + troom, GFP_ATOMIC)) == NULL) {
-			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
+		/* Allocate buffer */
+		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+				 hroom + troom, GFP_ATOMIC);
+		if (!frag) {
 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 				      IPSTATS_MIB_FRAGFAILS);
 			err = -ENOMEM;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9cb94cf..e2b6cfb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -183,6 +183,7 @@
 	unsigned int hash = HASH(remote, local);
 	struct ip6_tnl *t;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+	struct in6_addr any;
 
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
@@ -190,6 +191,22 @@
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
+
+	memset(&any, 0, sizeof(any));
+	hash = HASH(&any, local);
+	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+		if (ipv6_addr_equal(local, &t->parms.laddr) &&
+		    (t->dev->flags & IFF_UP))
+			return t;
+	}
+
+	hash = HASH(remote, &any);
+	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+		    (t->dev->flags & IFF_UP))
+			return t;
+	}
+
 	t = rcu_dereference(ip6n->tnls_wc[0]);
 	if (t && (t->dev->flags & IFF_UP))
 		return t;
@@ -474,6 +491,7 @@
 	int rel_msg = 0;
 	u8 rel_type = ICMPV6_DEST_UNREACH;
 	u8 rel_code = ICMPV6_ADDR_UNREACH;
+	u8 tproto;
 	__u32 rel_info = 0;
 	__u16 len;
 	int err = -ENOENT;
@@ -487,7 +505,8 @@
 					&ipv6h->saddr)) == NULL)
 		goto out;
 
-	if (t->parms.proto != ipproto && t->parms.proto != 0)
+	tproto = ACCESS_ONCE(t->parms.proto);
+	if (tproto != ipproto && tproto != 0)
 		goto out;
 
 	err = 0;
@@ -788,6 +807,7 @@
 {
 	struct ip6_tnl *t;
 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	u8 tproto;
 	int err;
 
 	rcu_read_lock();
@@ -796,7 +816,8 @@
 					&ipv6h->daddr)) != NULL) {
 		struct pcpu_sw_netstats *tstats;
 
-		if (t->parms.proto != ipproto && t->parms.proto != 0) {
+		tproto = ACCESS_ONCE(t->parms.proto);
+		if (tproto != ipproto && tproto != 0) {
 			rcu_read_unlock();
 			goto discard;
 		}
@@ -902,24 +923,28 @@
 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
-int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
+		     const struct in6_addr *laddr,
+		     const struct in6_addr *raddr)
 {
 	struct __ip6_tnl_parm *p = &t->parms;
 	int ret = 0;
 	struct net *net = t->net;
 
-	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
+	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
+	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
 		struct net_device *ldev = NULL;
 
 		rcu_read_lock();
 		if (p->link)
 			ldev = dev_get_by_index_rcu(net, p->link);
 
-		if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
+		if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
 			pr_warn("%s xmit: Local address not yet configured!\n",
 				p->name);
-		else if (!ipv6_addr_is_multicast(&p->raddr) &&
-			 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
+		else if (!ipv6_addr_is_multicast(raddr) &&
+			 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
 				p->name);
 		else
@@ -968,8 +993,34 @@
 	u8 proto;
 	int err = -1;
 
-	if (!fl6->flowi6_mark)
+	/* NBMA tunnel */
+	if (ipv6_addr_any(&t->parms.raddr)) {
+		struct in6_addr *addr6;
+		struct neighbour *neigh;
+		int addr_type;
+
+		if (!skb_dst(skb))
+			goto tx_err_link_failure;
+
+		neigh = dst_neigh_lookup(skb_dst(skb),
+					 &ipv6_hdr(skb)->daddr);
+		if (!neigh)
+			goto tx_err_link_failure;
+
+		addr6 = (struct in6_addr *)&neigh->primary_key;
+		addr_type = ipv6_addr_type(addr6);
+
+		if (addr_type == IPV6_ADDR_ANY)
+			addr6 = &ipv6_hdr(skb)->daddr;
+
+		memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+		neigh_release(neigh);
+	} else if (!fl6->flowi6_mark)
 		dst = ip6_tnl_dst_check(t);
+
+	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
+		goto tx_err_link_failure;
+
 	if (!dst) {
 		ndst = ip6_route_output(net, NULL, fl6);
 
@@ -1075,10 +1126,11 @@
 	struct flowi6 fl6;
 	__u8 dsfield;
 	__u32 mtu;
+	u8 tproto;
 	int err;
 
-	if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
-	    !ip6_tnl_xmit_ctl(t))
+	tproto = ACCESS_ONCE(t->parms.proto);
+	if (tproto != IPPROTO_IPIP && tproto != 0)
 		return -1;
 
 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
@@ -1117,10 +1169,12 @@
 	struct flowi6 fl6;
 	__u8 dsfield;
 	__u32 mtu;
+	u8 tproto;
 	int err;
 
-	if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
-	    !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
+	tproto = ACCESS_ONCE(t->parms.proto);
+	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
+	    ip6_tnl_addr_conflict(t, ipv6h))
 		return -1;
 
 	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
@@ -1282,6 +1336,14 @@
 	return err;
 }
 
+static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+	/* for default tnl0 device allow to change only the proto */
+	t->parms.proto = p->proto;
+	netdev_state_change(t->dev);
+	return 0;
+}
+
 static void
 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
 {
@@ -1381,7 +1443,7 @@
 			break;
 		ip6_tnl_parm_from_user(&p1, &p);
 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
-		if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+		if (cmd == SIOCCHGTUNNEL) {
 			if (t != NULL) {
 				if (t->dev != dev) {
 					err = -EEXIST;
@@ -1389,8 +1451,10 @@
 				}
 			} else
 				t = netdev_priv(dev);
-
-			err = ip6_tnl_update(t, &p1);
+			if (dev == ip6n->fb_tnl_dev)
+				err = ip6_tnl0_update(t, &p1);
+			else
+				err = ip6_tnl_update(t, &p1);
 		}
 		if (t) {
 			err = 0;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 31089d1..ec84d03 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -412,6 +412,7 @@
 	struct net_device_stats *stats = &t->dev->stats;
 	struct dst_entry *dst = skb_dst(skb);
 	struct net_device *tdev;
+	struct xfrm_state *x;
 	int err = -1;
 
 	if (!dst)
@@ -425,7 +426,12 @@
 		goto tx_err_link_failure;
 	}
 
-	if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr))
+	x = dst->xfrm;
+	if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
+		goto tx_err_link_failure;
+
+	if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr,
+			      (const struct in6_addr *)&x->id.daddr))
 		goto tx_err_link_failure;
 
 	tdev = dst->dev;
@@ -480,7 +486,7 @@
 		ipv6h = ipv6_hdr(skb);
 
 		if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
-		    !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+		    vti6_addr_conflict(t, ipv6h))
 			goto tx_err;
 
 		xfrm_decode_session(skb, &fl, AF_INET6);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0171f08..467f310 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2090,7 +2090,7 @@
 	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
 		struct mfc6_cache *cache_proxy;
 
-		/* For an (*,G) entry, we only check that the incomming
+		/* For an (*,G) entry, we only check that the incoming
 		 * interface is part of the static tree.
 		 */
 		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9648de2..5ce107c 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1550,7 +1550,7 @@
 	hdr->daddr = *daddr;
 }
 
-static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
 {
 	struct net_device *dev = idev->dev;
 	struct net *net = dev_net(dev);
@@ -1561,13 +1561,13 @@
 	const struct in6_addr *saddr;
 	int hlen = LL_RESERVED_SPACE(dev);
 	int tlen = dev->needed_tailroom;
+	unsigned int size = mtu + hlen + tlen;
 	int err;
 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
 		     IPV6_TLV_PADN, 0 };
 
 	/* we assume size > sizeof(ra) here */
-	size += hlen + tlen;
 	/* limit our allocations to order-0 page */
 	size = min_t(int, size, SKB_MAX_ORDER(0, 0));
 	skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1576,6 +1576,8 @@
 		return NULL;
 
 	skb->priority = TC_PRIO_CONTROL;
+	skb->reserved_tailroom = skb_end_offset(skb) -
+				 min(mtu, skb_end_offset(skb));
 	skb_reserve(skb, hlen);
 
 	if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1690,8 +1692,7 @@
 	return skb;
 }
 
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
-	skb_tailroom(skb)) : 0)
+#define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
 
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
 	int type, int gdeleted, int sdeleted, int crsend)
@@ -2823,11 +2824,7 @@
 	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
 
 	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq,
-			   "%3s %6s "
-			   "%32s %32s %6s %6s\n", "Idx",
-			   "Device", "Multicast Address",
-			   "Source Address", "INC", "EXC");
+		seq_puts(seq, "Idx Device                Multicast Address                   Source Address    INC    EXC\n");
 	} else {
 		seq_printf(seq,
 			   "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 896af88..0cbcf98 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -486,11 +486,11 @@
 	}
 
 	if (skb_csum_unnecessary(skb)) {
-		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+		err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	} else if (msg->msg_flags&MSG_TRUNC) {
 		if (__skb_checksum_complete(skb))
 			goto csum_copy_err;
-		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+		err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	} else {
 		err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
 		if (err == -EINVAL)
@@ -548,7 +548,8 @@
 	if (!rp->checksum)
 		goto send;
 
-	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+	skb = skb_peek(&sk->sk_write_queue);
+	if (!skb)
 		goto out;
 
 	offset = rp->offset;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 1a157ca..51ab096 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -69,7 +69,7 @@
 
 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb *)((skb)->cb))
 
-static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
+static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 {
 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 }
@@ -178,7 +178,7 @@
 	ip6_expire_frag_queue(net, fq, &ip6_frags);
 }
 
-static __inline__ struct frag_queue *
+static struct frag_queue *
 fq_find(struct net *net, __be32 id, const struct in6_addr *src,
 	const struct in6_addr *dst, u8 ecn)
 {
@@ -684,21 +684,21 @@
 	unregister_net_sysctl_table(ip6_ctl_header);
 }
 #else
-static inline int ip6_frags_ns_sysctl_register(struct net *net)
+static int ip6_frags_ns_sysctl_register(struct net *net)
 {
 	return 0;
 }
 
-static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
+static void ip6_frags_ns_sysctl_unregister(struct net *net)
 {
 }
 
-static inline int ip6_frags_sysctl_register(void)
+static int ip6_frags_sysctl_register(void)
 {
 	return 0;
 }
 
-static inline void ip6_frags_sysctl_unregister(void)
+static void ip6_frags_sysctl_unregister(void)
 {
 }
 #endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a318dd89..c910831 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -772,23 +772,22 @@
 }
 #endif
 
-#define BACKTRACK(__net, saddr)			\
-do { \
-	if (rt == __net->ipv6.ip6_null_entry) {	\
-		struct fib6_node *pn; \
-		while (1) { \
-			if (fn->fn_flags & RTN_TL_ROOT) \
-				goto out; \
-			pn = fn->parent; \
-			if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
-				fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
-			else \
-				fn = pn; \
-			if (fn->fn_flags & RTN_RTINFO) \
-				goto restart; \
-		} \
-	} \
-} while (0)
+static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
+					struct in6_addr *saddr)
+{
+	struct fib6_node *pn;
+	while (1) {
+		if (fn->fn_flags & RTN_TL_ROOT)
+			return NULL;
+		pn = fn->parent;
+		if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
+			fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
+		else
+			fn = pn;
+		if (fn->fn_flags & RTN_RTINFO)
+			return fn;
+	}
+}
 
 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
 					     struct fib6_table *table,
@@ -804,8 +803,11 @@
 	rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
 	if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
 		rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
-	BACKTRACK(net, &fl6->saddr);
-out:
+	if (rt == net->ipv6.ip6_null_entry) {
+		fn = fib6_backtrack(fn, &fl6->saddr);
+		if (fn)
+			goto restart;
+	}
 	dst_use(&rt->dst, jiffies);
 	read_unlock_bh(&table->tb6_lock);
 	return rt;
@@ -915,33 +917,48 @@
 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
 				      struct flowi6 *fl6, int flags)
 {
-	struct fib6_node *fn;
+	struct fib6_node *fn, *saved_fn;
 	struct rt6_info *rt, *nrt;
 	int strict = 0;
 	int attempts = 3;
 	int err;
-	int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
 
 	strict |= flags & RT6_LOOKUP_F_IFACE;
+	if (net->ipv6.devconf_all->forwarding == 0)
+		strict |= RT6_LOOKUP_F_REACHABLE;
 
-relookup:
+redo_fib6_lookup_lock:
 	read_lock_bh(&table->tb6_lock);
 
-restart_2:
 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
+	saved_fn = fn;
 
-restart:
-	rt = rt6_select(fn, oif, strict | reachable);
+redo_rt6_select:
+	rt = rt6_select(fn, oif, strict);
 	if (rt->rt6i_nsiblings)
-		rt = rt6_multipath_select(rt, fl6, oif, strict | reachable);
-	BACKTRACK(net, &fl6->saddr);
-	if (rt == net->ipv6.ip6_null_entry ||
-	    rt->rt6i_flags & RTF_CACHE)
-		goto out;
+		rt = rt6_multipath_select(rt, fl6, oif, strict);
+	if (rt == net->ipv6.ip6_null_entry) {
+		fn = fib6_backtrack(fn, &fl6->saddr);
+		if (fn)
+			goto redo_rt6_select;
+		else if (strict & RT6_LOOKUP_F_REACHABLE) {
+			/* also consider unreachable route */
+			strict &= ~RT6_LOOKUP_F_REACHABLE;
+			fn = saved_fn;
+			goto redo_rt6_select;
+		} else {
+			dst_hold(&rt->dst);
+			read_unlock_bh(&table->tb6_lock);
+			goto out2;
+		}
+	}
 
 	dst_hold(&rt->dst);
 	read_unlock_bh(&table->tb6_lock);
 
+	if (rt->rt6i_flags & RTF_CACHE)
+		goto out2;
+
 	if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
 	else if (!(rt->dst.flags & DST_HOST))
@@ -967,15 +984,8 @@
 	 * released someone could insert this route.  Relookup.
 	 */
 	ip6_rt_put(rt);
-	goto relookup;
+	goto redo_fib6_lookup_lock;
 
-out:
-	if (reachable) {
-		reachable = 0;
-		goto restart_2;
-	}
-	dst_hold(&rt->dst);
-	read_unlock_bh(&table->tb6_lock);
 out2:
 	rt->dst.lastuse = jiffies;
 	rt->dst.__use++;
@@ -1235,10 +1245,12 @@
 		rt = net->ipv6.ip6_null_entry;
 	else if (rt->dst.error) {
 		rt = net->ipv6.ip6_null_entry;
-		goto out;
+	} else if (rt == net->ipv6.ip6_null_entry) {
+		fn = fib6_backtrack(fn, &fl6->saddr);
+		if (fn)
+			goto restart;
 	}
-	BACKTRACK(net, &fl6->saddr);
-out:
+
 	dst_hold(&rt->dst);
 
 	read_unlock_bh(&table->tb6_lock);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a24557a..660496d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1711,7 +1711,7 @@
 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
 			tunnel->encap.dport) ||
 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
-			tunnel->encap.dport))
+			tunnel->encap.flags))
 		goto nla_put_failure;
 
 	return 0;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 2f25cb6..7337fc7 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -166,13 +166,15 @@
 	int mss;
 	struct dst_entry *dst;
 	__u8 rcv_wscale;
-	bool ecn_ok = false;
 
 	if (!sysctl_tcp_syncookies || !th->ack || th->rst)
 		goto out;
 
-	if (tcp_synq_no_recent_overflow(sk) ||
-		(mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) {
+	if (tcp_synq_no_recent_overflow(sk))
+		goto out;
+
+	mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
+	if (mss == 0) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
 		goto out;
 	}
@@ -183,7 +185,7 @@
 	memset(&tcp_opt, 0, sizeof(tcp_opt));
 	tcp_parse_options(skb, &tcp_opt, 0, NULL);
 
-	if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
+	if (!cookie_timestamp_decode(&tcp_opt))
 		goto out;
 
 	ret = NULL;
@@ -220,7 +222,6 @@
 
 	req->expires = 0UL;
 	req->num_retrans = 0;
-	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
@@ -261,6 +262,7 @@
 				  dst_metric(dst, RTAX_INITRWND));
 
 	ireq->rcv_wscale = rcv_wscale;
+	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
 
 	ret = get_cookie_sock(sk, skb, req, dst);
 out:
@@ -269,4 +271,3 @@
 	reqsk_free(req);
 	return NULL;
 }
-
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f6ba535..9b68092 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -424,8 +424,8 @@
 	}
 
 	if (skb_csum_unnecessary(skb))
-		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-					      msg->msg_iov, copied);
+		err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
+					    msg, copied);
 	else {
 		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
 		if (err == -EINVAL)
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6b8f543..b6aa8ed 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,11 +42,11 @@
 				      SKB_GSO_DODGY |
 				      SKB_GSO_UDP_TUNNEL |
 				      SKB_GSO_UDP_TUNNEL_CSUM |
+				      SKB_GSO_TUNNEL_REMCSUM |
 				      SKB_GSO_GRE |
 				      SKB_GSO_GRE_CSUM |
 				      SKB_GSO_IPIP |
-				      SKB_GSO_SIT |
-				      SKB_GSO_MPLS) ||
+				      SKB_GSO_SIT) ||
 			     !(type & (SKB_GSO_UDP))))
 			goto out;
 
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 91729b8..a0c7536 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -306,7 +306,7 @@
 	spin_unlock_bh(&ipx_interfaces_lock);
 }
 
-static __inline__ void __ipxitf_put(struct ipx_interface *intrfc)
+static void __ipxitf_put(struct ipx_interface *intrfc)
 {
 	if (atomic_dec_and_test(&intrfc->refcnt))
 		__ipxitf_down(intrfc);
@@ -1805,8 +1805,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
-				     copied);
+	rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied);
 	if (rc)
 		goto out_free;
 	if (skb->tstamp.tv64)
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index e15c16a..c1d247e 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -45,7 +45,7 @@
 	}
 
 	i = list_entry(v, struct ipx_interface, node);
-	seq_printf(seq, "%08lX   ", (unsigned long int)ntohl(i->if_netnum));
+	seq_printf(seq, "%08X   ", ntohl(i->if_netnum));
 	seq_printf(seq, "%02X%02X%02X%02X%02X%02X   ",
 			i->if_node[0], i->if_node[1], i->if_node[2],
 			i->if_node[3], i->if_node[4], i->if_node[5]);
@@ -87,10 +87,10 @@
 
 	rt = list_entry(v, struct ipx_route, node);
 
-	seq_printf(seq, "%08lX   ", (unsigned long int)ntohl(rt->ir_net));
+	seq_printf(seq, "%08X   ", ntohl(rt->ir_net));
 	if (rt->ir_routed)
-		seq_printf(seq, "%08lX     %02X%02X%02X%02X%02X%02X\n",
-			   (long unsigned int)ntohl(rt->ir_intrfc->if_netnum),
+		seq_printf(seq, "%08X     %02X%02X%02X%02X%02X%02X\n",
+			   ntohl(rt->ir_intrfc->if_netnum),
 			   rt->ir_router_node[0], rt->ir_router_node[1],
 			   rt->ir_router_node[2], rt->ir_router_node[3],
 			   rt->ir_router_node[4], rt->ir_router_node[5]);
@@ -194,19 +194,19 @@
 	s = v;
 	ipxs = ipx_sk(s);
 #ifdef CONFIG_IPX_INTERN
-	seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X  ",
-		   (unsigned long)ntohl(ipxs->intrfc->if_netnum),
+	seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X  ",
+		   ntohl(ipxs->intrfc->if_netnum),
 		   ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3],
 		   ipxs->node[4], ipxs->node[5], ntohs(ipxs->port));
 #else
-	seq_printf(seq, "%08lX:%04X  ", (unsigned long) ntohl(ipxs->intrfc->if_netnum),
+	seq_printf(seq, "%08X:%04X  ", ntohl(ipxs->intrfc->if_netnum),
 		   ntohs(ipxs->port));
 #endif	/* CONFIG_IPX_INTERN */
 	if (s->sk_state != TCP_ESTABLISHED)
 		seq_printf(seq, "%-28s", "Not_Connected");
 	else {
-		seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X  ",
-			   (unsigned long)ntohl(ipxs->dest_addr.net),
+		seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X  ",
+			   ntohl(ipxs->dest_addr.net),
 			   ipxs->dest_addr.node[0], ipxs->dest_addr.node[1],
 			   ipxs->dest_addr.node[2], ipxs->dest_addr.node[3],
 			   ipxs->dest_addr.node[4], ipxs->dest_addr.node[5],
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index ad7c03d..0dafcc5 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -9,14 +9,12 @@
 #include <linux/mm.h>
 #include <linux/sysctl.h>
 #include <net/net_namespace.h>
+#include <net/ipx.h>
 
 #ifndef CONFIG_SYSCTL
 #error This file should not be compiled without CONFIG_SYSCTL defined
 #endif
 
-/* From af_ipx.c */
-extern int sysctl_ipx_pprop_broadcasting;
-
 static struct ctl_table ipx_table[] = {
 	{
 		.procname	= "ipx_pprop_broadcasting",
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 92fafd4..980bc26 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1396,7 +1396,7 @@
 		copied = size;
 		msg->msg_flags |= MSG_TRUNC;
 	}
-	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	skb_free_datagram(sk, skb);
 
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a089b6b..057b564 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1355,7 +1355,7 @@
 		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
 
 	cskb = skb;
-	if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
+	if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
 		if (!(flags & MSG_PEEK))
 			skb_queue_head(&sk->sk_receive_queue, skb);
 		return -EFAULT;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1847ec4..e588309 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3654,7 +3654,7 @@
 	}
 
 	skb_reset_transport_header(skb);
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto out_free;
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 369a982..a6cc1fe 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -528,7 +528,7 @@
 		copied = len;
 	}
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto done;
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0edb263..2177b96 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -672,7 +672,7 @@
 		copied = len;
 	}
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto done;
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index b704a93..c559bcd 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -208,7 +208,7 @@
 	else if (len < skb->len)
 		msg->msg_flags |= MSG_TRUNC;
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
+	err = skb_copy_datagram_msg(skb, 0, msg, len);
 	if (likely(err == 0))
 		err = len;
 
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 3cdaa04..fc60d9d 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -73,6 +73,7 @@
 		lapb_put(lapb);
 	}
 }
+EXPORT_SYMBOL(lapb_register);
 
 /*
  *	Add a socket to the bound sockets list.
@@ -195,6 +196,7 @@
 	write_unlock_bh(&lapb_list_lock);
 	return rc;
 }
+EXPORT_SYMBOL(lapb_unregister);
 
 int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
 {
@@ -227,6 +229,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL(lapb_getparms);
 
 int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
 {
@@ -262,6 +265,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL(lapb_setparms);
 
 int lapb_connect_request(struct net_device *dev)
 {
@@ -290,6 +294,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL(lapb_connect_request);
 
 int lapb_disconnect_request(struct net_device *dev)
 {
@@ -334,6 +339,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL(lapb_disconnect_request);
 
 int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
 {
@@ -355,6 +361,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL(lapb_data_request);
 
 int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
 {
@@ -369,6 +376,7 @@
 
 	return rc;
 }
+EXPORT_SYMBOL(lapb_data_received);
 
 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
 {
@@ -415,15 +423,6 @@
 	return used;
 }
 
-EXPORT_SYMBOL(lapb_register);
-EXPORT_SYMBOL(lapb_unregister);
-EXPORT_SYMBOL(lapb_getparms);
-EXPORT_SYMBOL(lapb_setparms);
-EXPORT_SYMBOL(lapb_connect_request);
-EXPORT_SYMBOL(lapb_disconnect_request);
-EXPORT_SYMBOL(lapb_data_request);
-EXPORT_SYMBOL(lapb_data_received);
-
 static int __init lapb_init(void)
 {
 	return 0;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index bb9cbc1..af66266 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -819,8 +819,7 @@
 			used = len;
 
 		if (!(flags & MSG_TRUNC)) {
-			int rc = skb_copy_datagram_iovec(skb, offset,
-							 msg->msg_iov, used);
+			int rc = skb_copy_datagram_msg(skb, offset, msg, used);
 			if (rc) {
 				/* Exception. Bailout! */
 				if (!copied)
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 25c31c0..6daf391 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -15,7 +15,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
 #include <net/llc_if.h>
 #include <net/llc_sap.h>
 #include <net/llc_s_ev.h>
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index e3545f2..ca27837 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -34,8 +34,7 @@
 				  SKB_GSO_TCP_ECN |
 				  SKB_GSO_GRE |
 				  SKB_GSO_GRE_CSUM |
-				  SKB_GSO_IPIP |
-				  SKB_GSO_MPLS)))
+				  SKB_GSO_IPIP)))
 		goto out;
 
 	/* Setup inner SKB. */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f1de72d..580b794 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2401,7 +2401,7 @@
 	}
 
 	skb_reset_transport_header(data_skb);
-	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
 
 	if (msg->msg_name) {
 		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1b06a1f..7e13f6a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1167,7 +1167,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	er = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (er < 0) {
 		skb_free_datagram(sk, skb);
 		release_sock(sk);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 51f077a..83bc785 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -832,7 +832,7 @@
 	copied = min_t(unsigned int, rlen, len);
 
 	cskb = skb;
-	if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
+	if (skb_copy_datagram_msg(cskb, 0, msg, copied)) {
 		if (!(flags & MSG_PEEK))
 			skb_queue_head(&sk->sk_receive_queue, skb);
 		return -EFAULT;
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 11c3544..9d7d2b7 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -269,7 +269,7 @@
 		copied = len;
 	}
 
-	rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	rc = skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	skb_free_datagram(sk, skb);
 
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index ba3bb82..454ce12 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -29,11 +29,12 @@
 	  If unsure, say N.
 
 config OPENVSWITCH_GRE
-	bool "Open vSwitch GRE tunneling support"
+	tristate "Open vSwitch GRE tunneling support"
+	select NET_MPLS_GSO
 	depends on INET
 	depends on OPENVSWITCH
-	depends on NET_IPGRE_DEMUX && !(OPENVSWITCH=y && NET_IPGRE_DEMUX=m)
-	default y
+	depends on NET_IPGRE_DEMUX
+	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create GRE
 	  vport.
@@ -43,11 +44,11 @@
 	  If unsure, say Y.
 
 config OPENVSWITCH_VXLAN
-	bool "Open vSwitch VXLAN tunneling support"
+	tristate "Open vSwitch VXLAN tunneling support"
 	depends on INET
 	depends on OPENVSWITCH
-	depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
-	default y
+	depends on VXLAN
+	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create vxlan vport.
 
@@ -56,11 +57,11 @@
 	  If unsure, say Y.
 
 config OPENVSWITCH_GENEVE
-	bool "Open vSwitch Geneve tunneling support"
+	tristate "Open vSwitch Geneve tunneling support"
 	depends on INET
 	depends on OPENVSWITCH
-	depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m)
-	default y
+	depends on GENEVE
+	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create geneve vport.
 
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 9a33a27..91b9478 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,14 +15,6 @@
 	vport-internal_dev.o \
 	vport-netdev.o
 
-ifneq ($(CONFIG_OPENVSWITCH_GENEVE),)
-openvswitch-y += vport-geneve.o
-endif
-
-ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
-openvswitch-y += vport-vxlan.o
-endif
-
-ifneq ($(CONFIG_OPENVSWITCH_GRE),)
-openvswitch-y += vport-gre.o
-endif
+obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
+obj-$(CONFIG_OPENVSWITCH_VXLAN)	+= vport-vxlan.o
+obj-$(CONFIG_OPENVSWITCH_GRE)	+= vport-gre.o
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 006886d..f7e5891 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -28,10 +28,12 @@
 #include <linux/in6.h>
 #include <linux/if_arp.h>
 #include <linux/if_vlan.h>
+
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/checksum.h>
 #include <net/dsfield.h>
+#include <net/mpls.h>
 #include <net/sctp/checksum.h>
 
 #include "datapath.h"
@@ -118,6 +120,92 @@
 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 }
 
+static int push_mpls(struct sk_buff *skb,
+		     const struct ovs_action_push_mpls *mpls)
+{
+	__be32 *new_mpls_lse;
+	struct ethhdr *hdr;
+
+	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
+	if (skb->encapsulation)
+		return -ENOTSUPP;
+
+	if (skb_cow_head(skb, MPLS_HLEN) < 0)
+		return -ENOMEM;
+
+	skb_push(skb, MPLS_HLEN);
+	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
+		skb->mac_len);
+	skb_reset_mac_header(skb);
+
+	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
+	*new_mpls_lse = mpls->mpls_lse;
+
+	if (skb->ip_summed == CHECKSUM_COMPLETE)
+		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
+							     MPLS_HLEN, 0));
+
+	hdr = eth_hdr(skb);
+	hdr->h_proto = mpls->mpls_ethertype;
+
+	skb_set_inner_protocol(skb, skb->protocol);
+	skb->protocol = mpls->mpls_ethertype;
+
+	return 0;
+}
+
+static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
+{
+	struct ethhdr *hdr;
+	int err;
+
+	err = make_writable(skb, skb->mac_len + MPLS_HLEN);
+	if (unlikely(err))
+		return err;
+
+	if (skb->ip_summed == CHECKSUM_COMPLETE)
+		skb->csum = csum_sub(skb->csum,
+				     csum_partial(skb_mpls_header(skb),
+						  MPLS_HLEN, 0));
+
+	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
+		skb->mac_len);
+
+	__skb_pull(skb, MPLS_HLEN);
+	skb_reset_mac_header(skb);
+
+	/* skb_mpls_header() is used to locate the ethertype
+	 * field correctly in the presence of VLAN tags.
+	 */
+	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
+	hdr->h_proto = ethertype;
+	if (eth_p_mpls(skb->protocol))
+		skb->protocol = ethertype;
+	return 0;
+}
+
+static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse)
+{
+	__be32 *stack;
+	int err;
+
+	err = make_writable(skb, skb->mac_len + MPLS_HLEN);
+	if (unlikely(err))
+		return err;
+
+	stack = (__be32 *)skb_mpls_header(skb);
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		__be32 diff[] = { ~(*stack), *mpls_lse };
+
+		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+					  ~skb->csum);
+	}
+
+	*stack = *mpls_lse;
+
+	return 0;
+}
+
 /* remove VLAN header from packet and update csum accordingly. */
 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
 {
@@ -140,10 +228,12 @@
 
 	vlan_set_encap_proto(skb, vhdr);
 	skb->mac_header += VLAN_HLEN;
+
 	if (skb_network_offset(skb) < ETH_HLEN)
 		skb_set_network_header(skb, ETH_HLEN);
-	skb_reset_mac_len(skb);
 
+	/* Update mac_len for subsequent MPLS actions */
+	skb_reset_mac_len(skb);
 	return 0;
 }
 
@@ -186,6 +276,8 @@
 
 		if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
 			return -ENOMEM;
+		/* Update mac_len for subsequent MPLS actions */
+		skb->mac_len += VLAN_HLEN;
 
 		if (skb->ip_summed == CHECKSUM_COMPLETE)
 			skb->csum = csum_add(skb->csum, csum_partial(skb->data
@@ -459,21 +551,14 @@
 	return 0;
 }
 
-static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
 {
-	struct vport *vport;
+	struct vport *vport = ovs_vport_rcu(dp, out_port);
 
-	if (unlikely(!skb))
-		return -ENOMEM;
-
-	vport = ovs_vport_rcu(dp, out_port);
-	if (unlikely(!vport)) {
+	if (likely(vport))
+		ovs_vport_send(vport, skb);
+	else
 		kfree_skb(skb);
-		return -ENODEV;
-	}
-
-	ovs_vport_send(vport, skb);
-	return 0;
 }
 
 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
@@ -504,11 +589,6 @@
 	return ovs_dp_upcall(dp, skb, &upcall);
 }
 
-static bool last_action(const struct nlattr *a, int rem)
-{
-	return a->nla_len == rem;
-}
-
 static int sample(struct datapath *dp, struct sk_buff *skb,
 		  struct sw_flow_key *key, const struct nlattr *attr)
 {
@@ -543,7 +623,7 @@
 	 * user space. This skb will be consumed by its caller.
 	 */
 	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
-		   last_action(a, rem)))
+		   nla_is_last(a, rem)))
 		return output_userspace(dp, skb, key, a);
 
 	skb = skb_clone(skb, GFP_ATOMIC);
@@ -617,6 +697,10 @@
 	case OVS_KEY_ATTR_SCTP:
 		err = set_sctp(skb, nla_data(nested_attr));
 		break;
+
+	case OVS_KEY_ATTR_MPLS:
+		err = set_mpls(skb, nla_data(nested_attr));
+		break;
 	}
 
 	return err;
@@ -633,7 +717,7 @@
 	if (err)
 		return err;
 
-	if (!last_action(a, rem)) {
+	if (!nla_is_last(a, rem)) {
 		/* Recirc action is the not the last action
 		 * of the action list, need to clone the skb.
 		 */
@@ -677,8 +761,12 @@
 	     a = nla_next(a, &rem)) {
 		int err = 0;
 
-		if (prev_port != -1) {
-			do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+		if (unlikely(prev_port != -1)) {
+			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
+
+			if (out_skb)
+				do_output(dp, out_skb, prev_port);
+
 			prev_port = -1;
 		}
 
@@ -695,6 +783,14 @@
 			execute_hash(skb, key, a);
 			break;
 
+		case OVS_ACTION_ATTR_PUSH_MPLS:
+			err = push_mpls(skb, nla_data(a));
+			break;
+
+		case OVS_ACTION_ATTR_POP_MPLS:
+			err = pop_mpls(skb, nla_get_be16(a));
+			break;
+
 		case OVS_ACTION_ATTR_PUSH_VLAN:
 			err = push_vlan(skb, nla_data(a));
 			if (unlikely(err)) /* skb already freed. */
@@ -707,7 +803,7 @@
 
 		case OVS_ACTION_ATTR_RECIRC:
 			err = execute_recirc(dp, skb, key, a, rem);
-			if (last_action(a, rem)) {
+			if (nla_is_last(a, rem)) {
 				/* If this is the last action, the skb has
 				 * been consumed or freed.
 				 * Return immediately.
@@ -769,14 +865,11 @@
 
 /* Execute a list of actions against 'skb'. */
 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
-			struct sw_flow_key *key)
+			struct sw_flow_actions *acts, struct sw_flow_key *key)
 {
 	int level = this_cpu_read(exec_actions_level);
-	struct sw_flow_actions *acts;
 	int err;
 
-	acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
-
 	this_cpu_inc(exec_actions_level);
 	OVS_CB(skb)->egress_tun_info = NULL;
 	err = do_execute_actions(dp, skb, key,
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e6d7255..014485e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -59,6 +59,7 @@
 #include "vport-netdev.h"
 
 int ovs_net_id __read_mostly;
+EXPORT_SYMBOL(ovs_net_id);
 
 static struct genl_family dp_packet_genl_family;
 static struct genl_family dp_flow_genl_family;
@@ -130,6 +131,7 @@
 	else
 		return 1;
 }
+EXPORT_SYMBOL(lockdep_ovsl_is_held);
 #endif
 
 static struct vport *new_vport(const struct vport_parms *);
@@ -138,19 +140,30 @@
 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
 				  const struct dp_upcall_info *);
 
-/* Must be called with rcu_read_lock or ovs_mutex. */
-static struct datapath *get_dp(struct net *net, int dp_ifindex)
+/* Must be called with rcu_read_lock. */
+static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
 {
-	struct datapath *dp = NULL;
-	struct net_device *dev;
+	struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
 
-	rcu_read_lock();
-	dev = dev_get_by_index_rcu(net, dp_ifindex);
 	if (dev) {
 		struct vport *vport = ovs_internal_dev_get_vport(dev);
 		if (vport)
-			dp = vport->dp;
+			return vport->dp;
 	}
+
+	return NULL;
+}
+
+/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
+ * returned dp pointer valid.
+ */
+static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
+{
+	struct datapath *dp;
+
+	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
+	rcu_read_lock();
+	dp = get_dp_rcu(net, dp_ifindex);
 	rcu_read_unlock();
 
 	return dp;
@@ -185,6 +198,7 @@
 {
 	struct datapath *dp = container_of(rcu, struct datapath, rcu);
 
+	ovs_flow_tbl_destroy(&dp->table);
 	free_percpu(dp->stats_percpu);
 	release_net(ovs_dp_get_net(dp));
 	kfree(dp->ports);
@@ -243,6 +257,7 @@
 	const struct vport *p = OVS_CB(skb)->input_vport;
 	struct datapath *dp = p->dp;
 	struct sw_flow *flow;
+	struct sw_flow_actions *sf_acts;
 	struct dp_stats_percpu *stats;
 	u64 *stats_counter;
 	u32 n_mask_hit;
@@ -268,10 +283,10 @@
 		goto out;
 	}
 
-	OVS_CB(skb)->flow = flow;
+	ovs_flow_stats_update(flow, key->tp.flags, skb);
+	sf_acts = rcu_dereference(flow->sf_acts);
+	ovs_execute_actions(dp, skb, sf_acts, key);
 
-	ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb);
-	ovs_execute_actions(dp, skb, key);
 	stats_counter = &stats->n_hit;
 
 out:
@@ -360,37 +375,12 @@
 	return err;
 }
 
-static size_t key_attr_size(void)
-{
-	return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
-		+ nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
-		  + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
-		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
-		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
-		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
-		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
-		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
-		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
-		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_OAM */
-		  + nla_total_size(256)   /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
-		+ nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
-		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
-		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
-		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
-		+ nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
-		+ nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
-		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
-		+ nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
-		+ nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
-		+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
-}
-
 static size_t upcall_msg_size(const struct nlattr *userdata,
 			      unsigned int hdrlen)
 {
 	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
 		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
-		+ nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
+		+ nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
 
 	/* OVS_PACKET_ATTR_USERDATA */
 	if (userdata)
@@ -510,6 +500,7 @@
 	struct sw_flow_actions *acts;
 	struct sk_buff *packet;
 	struct sw_flow *flow;
+	struct sw_flow_actions *sf_acts;
 	struct datapath *dp;
 	struct ethhdr *eth;
 	struct vport *input_vport;
@@ -552,25 +543,18 @@
 	if (err)
 		goto err_flow_free;
 
-	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
-	err = PTR_ERR(acts);
-	if (IS_ERR(acts))
-		goto err_flow_free;
-
 	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
-				   &flow->key, 0, &acts);
+				   &flow->key, &acts);
 	if (err)
 		goto err_flow_free;
 
 	rcu_assign_pointer(flow->sf_acts, acts);
-
 	OVS_CB(packet)->egress_tun_info = NULL;
-	OVS_CB(packet)->flow = flow;
 	packet->priority = flow->key.phy.priority;
 	packet->mark = flow->key.phy.skb_mark;
 
 	rcu_read_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
 	err = -ENODEV;
 	if (!dp)
 		goto err_unlock;
@@ -583,9 +567,10 @@
 		goto err_unlock;
 
 	OVS_CB(packet)->input_vport = input_vport;
+	sf_acts = rcu_dereference(flow->sf_acts);
 
 	local_bh_disable();
-	err = ovs_execute_actions(dp, packet, &flow->key);
+	err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
 	local_bh_enable();
 	rcu_read_unlock();
 
@@ -662,8 +647,8 @@
 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 {
 	return NLMSG_ALIGN(sizeof(struct ovs_header))
-		+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
-		+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
+		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
 		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
 		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
 		+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
@@ -671,58 +656,67 @@
 }
 
 /* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
-				  struct sk_buff *skb, u32 portid,
-				  u32 seq, u32 flags, u8 cmd)
+static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
+				   struct sk_buff *skb)
 {
-	const int skb_orig_len = skb->len;
-	struct nlattr *start;
-	struct ovs_flow_stats stats;
-	__be16 tcp_flags;
-	unsigned long used;
-	struct ovs_header *ovs_header;
 	struct nlattr *nla;
 	int err;
 
-	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
-	if (!ovs_header)
-		return -EMSGSIZE;
-
-	ovs_header->dp_ifindex = dp_ifindex;
-
 	/* Fill flow key. */
 	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
 	if (!nla)
-		goto nla_put_failure;
+		return -EMSGSIZE;
 
 	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
 	if (err)
-		goto error;
+		return err;
+
 	nla_nest_end(skb, nla);
 
+	/* Fill flow mask. */
 	nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
 	if (!nla)
-		goto nla_put_failure;
+		return -EMSGSIZE;
 
 	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
 	if (err)
-		goto error;
+		return err;
 
 	nla_nest_end(skb, nla);
+	return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
+				   struct sk_buff *skb)
+{
+	struct ovs_flow_stats stats;
+	__be16 tcp_flags;
+	unsigned long used;
 
 	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
 
 	if (used &&
 	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
-		goto nla_put_failure;
+		return -EMSGSIZE;
 
 	if (stats.n_packets &&
 	    nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
-		goto nla_put_failure;
+		return -EMSGSIZE;
 
 	if ((u8)ntohs(tcp_flags) &&
 	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
-		goto nla_put_failure;
+		return -EMSGSIZE;
+
+	return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
+				     struct sk_buff *skb, int skb_orig_len)
+{
+	struct nlattr *start;
+	int err;
 
 	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
 	 * this is the first flow to be dumped into 'skb'.  This is unusual for
@@ -746,17 +740,47 @@
 			nla_nest_end(skb, start);
 		else {
 			if (skb_orig_len)
-				goto error;
+				return err;
 
 			nla_nest_cancel(skb, start);
 		}
-	} else if (skb_orig_len)
-		goto nla_put_failure;
+	} else if (skb_orig_len) {
+		return -EMSGSIZE;
+	}
+
+	return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
+				  struct sk_buff *skb, u32 portid,
+				  u32 seq, u32 flags, u8 cmd)
+{
+	const int skb_orig_len = skb->len;
+	struct ovs_header *ovs_header;
+	int err;
+
+	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
+				 flags, cmd);
+	if (!ovs_header)
+		return -EMSGSIZE;
+
+	ovs_header->dp_ifindex = dp_ifindex;
+
+	err = ovs_flow_cmd_fill_match(flow, skb);
+	if (err)
+		goto error;
+
+	err = ovs_flow_cmd_fill_stats(flow, skb);
+	if (err)
+		goto error;
+
+	err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
+	if (err)
+		goto error;
 
 	return genlmsg_end(skb, ovs_header);
 
-nla_put_failure:
-	err = -EMSGSIZE;
 error:
 	genlmsg_cancel(skb, ovs_header);
 	return err;
@@ -814,10 +838,14 @@
 
 	/* Must have key and actions. */
 	error = -EINVAL;
-	if (!a[OVS_FLOW_ATTR_KEY])
+	if (!a[OVS_FLOW_ATTR_KEY]) {
+		OVS_NLERR("Flow key attribute not present in new flow.\n");
 		goto error;
-	if (!a[OVS_FLOW_ATTR_ACTIONS])
+	}
+	if (!a[OVS_FLOW_ATTR_ACTIONS]) {
+		OVS_NLERR("Flow actions attribute not present in new flow.\n");
 		goto error;
+	}
 
 	/* Most of the time we need to allocate a new flow, do it before
 	 * locking.
@@ -838,16 +866,11 @@
 	ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
 
 	/* Validate actions. */
-	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
-	error = PTR_ERR(acts);
-	if (IS_ERR(acts))
-		goto err_kfree_flow;
-
 	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
-				     0, &acts);
+				     &acts);
 	if (error) {
 		OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
-		goto err_kfree_acts;
+		goto err_kfree_flow;
 	}
 
 	reply = ovs_flow_cmd_alloc_info(acts, info, false);
@@ -938,6 +961,7 @@
 	return error;
 }
 
+/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
 static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
 						const struct sw_flow_key *key,
 						const struct sw_flow_mask *mask)
@@ -946,15 +970,10 @@
 	struct sw_flow_key masked_key;
 	int error;
 
-	acts = ovs_nla_alloc_flow_actions(nla_len(a));
-	if (IS_ERR(acts))
-		return acts;
-
 	ovs_flow_mask_key(&masked_key, key, mask);
-	error = ovs_nla_copy_actions(a, &masked_key, 0, &acts);
+	error = ovs_nla_copy_actions(a, &masked_key, &acts);
 	if (error) {
-		OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
-		kfree(acts);
+		OVS_NLERR("Actions may not be safe on all matching packets.\n");
 		return ERR_PTR(error);
 	}
 
@@ -976,8 +995,10 @@
 
 	/* Extract key. */
 	error = -EINVAL;
-	if (!a[OVS_FLOW_ATTR_KEY])
+	if (!a[OVS_FLOW_ATTR_KEY]) {
+		OVS_NLERR("Flow key attribute not present in set flow.\n");
 		goto error;
+	}
 
 	ovs_match_init(&match, &key, &mask);
 	error = ovs_nla_get_match(&match,
@@ -992,10 +1013,8 @@
 			error = PTR_ERR(acts);
 			goto error;
 		}
-	}
 
-	/* Can allocate before locking if have acts. */
-	if (acts) {
+		/* Can allocate before locking if have acts. */
 		reply = ovs_flow_cmd_alloc_info(acts, info, false);
 		if (IS_ERR(reply)) {
 			error = PTR_ERR(reply);
@@ -1179,7 +1198,7 @@
 	struct datapath *dp;
 
 	rcu_read_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
 	if (!dp) {
 		rcu_read_unlock();
 		return -ENODEV;
@@ -1442,7 +1461,7 @@
 err_destroy_percpu:
 	free_percpu(dp->stats_percpu);
 err_destroy_table:
-	ovs_flow_tbl_destroy(&dp->table, false);
+	ovs_flow_tbl_destroy(&dp->table);
 err_free_dp:
 	release_net(ovs_dp_get_net(dp));
 	kfree(dp);
@@ -1474,8 +1493,6 @@
 	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
 
 	/* RCU destroy the flow table */
-	ovs_flow_tbl_destroy(&dp->table, true);
-
 	call_rcu(&dp->rcu, destroy_dp_rcu);
 }
 
@@ -1764,6 +1781,7 @@
 		return -ENOMEM;
 
 	ovs_lock();
+restart:
 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
 	err = -ENODEV;
 	if (!dp)
@@ -1795,8 +1813,11 @@
 
 	vport = new_vport(&parms);
 	err = PTR_ERR(vport);
-	if (IS_ERR(vport))
+	if (IS_ERR(vport)) {
+		if (err == -EAGAIN)
+			goto restart;
 		goto exit_unlock_free;
+	}
 
 	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
 				      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
@@ -1939,7 +1960,7 @@
 	int i, j = 0;
 
 	rcu_read_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
 	if (!dp) {
 		rcu_read_unlock();
 		return -ENODEV;
@@ -2112,12 +2133,18 @@
 	if (err)
 		goto error_netns_exit;
 
+	err = ovs_netdev_init();
+	if (err)
+		goto error_unreg_notifier;
+
 	err = dp_register_genl();
 	if (err < 0)
-		goto error_unreg_notifier;
+		goto error_unreg_netdev;
 
 	return 0;
 
+error_unreg_netdev:
+	ovs_netdev_exit();
 error_unreg_notifier:
 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
 error_netns_exit:
@@ -2137,6 +2164,7 @@
 static void dp_cleanup(void)
 {
 	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
+	ovs_netdev_exit();
 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
 	unregister_pernet_device(&ovs_net_ops);
 	rcu_barrier();
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 9741354..1c56a80 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -94,14 +94,12 @@
 
 /**
  * struct ovs_skb_cb - OVS data in skb CB
- * @flow: The flow associated with this packet.  May be %NULL if no flow.
  * @egress_tun_key: Tunnel information about this packet on egress path.
  * NULL if the packet is not being tunneled.
  * @input_vport: The original vport packet came in on. This value is cached
  * when a packet is received by OVS.
  */
 struct ovs_skb_cb {
-	struct sw_flow		*flow;
 	struct ovs_tunnel_info  *egress_tun_info;
 	struct vport		*input_vport;
 };
@@ -194,7 +192,7 @@
 					 u8 cmd);
 
 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
-			struct sw_flow_key *);
+			struct sw_flow_actions *acts, struct sw_flow_key *);
 
 void ovs_dp_notify_wq(struct work_struct *work);
 
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 2b78789..90a2101 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -32,6 +32,7 @@
 #include <linux/if_arp.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/mpls.h>
 #include <linux/sctp.h>
 #include <linux/smp.h>
 #include <linux/tcp.h>
@@ -42,6 +43,7 @@
 #include <net/ip.h>
 #include <net/ip_tunnels.h>
 #include <net/ipv6.h>
+#include <net/mpls.h>
 #include <net/ndisc.h>
 
 #include "datapath.h"
@@ -480,6 +482,7 @@
 		return -ENOMEM;
 
 	skb_reset_network_header(skb);
+	skb_reset_mac_len(skb);
 	__skb_push(skb, skb->data - skb_mac_header(skb));
 
 	/* Network layer. */
@@ -584,6 +587,33 @@
 			memset(&key->ip, 0, sizeof(key->ip));
 			memset(&key->ipv4, 0, sizeof(key->ipv4));
 		}
+	} else if (eth_p_mpls(key->eth.type)) {
+		size_t stack_len = MPLS_HLEN;
+
+		/* In the presence of an MPLS label stack the end of the L2
+		 * header and the beginning of the L3 header differ.
+		 *
+		 * Advance network_header to the beginning of the L3
+		 * header. mac_len corresponds to the end of the L2 header.
+		 */
+		while (1) {
+			__be32 lse;
+
+			error = check_header(skb, skb->mac_len + stack_len);
+			if (unlikely(error))
+				return 0;
+
+			memcpy(&lse, skb_network_header(skb), MPLS_HLEN);
+
+			if (stack_len == MPLS_HLEN)
+				memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
+
+			skb_set_network_header(skb, skb->mac_len + stack_len);
+			if (lse & htonl(MPLS_LS_S_MASK))
+				break;
+
+			stack_len += MPLS_HLEN;
+		}
 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
 		int nh_len;             /* IPv6 Header + Extensions */
 
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 7181331..4962bee 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -102,12 +102,17 @@
 		__be16 tci;		/* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
 		__be16 type;		/* Ethernet frame type. */
 	} eth;
-	struct {
-		u8     proto;		/* IP protocol or lower 8 bits of ARP opcode. */
-		u8     tos;		/* IP ToS. */
-		u8     ttl;		/* IP TTL/hop limit. */
-		u8     frag;		/* One of OVS_FRAG_TYPE_*. */
-	} ip;
+	union {
+		struct {
+			__be32 top_lse;	/* top label stack entry */
+		} mpls;
+		struct {
+			u8     proto;	/* IP protocol or lower 8 bits of ARP opcode. */
+			u8     tos;	    /* IP ToS. */
+			u8     ttl;	    /* IP TTL/hop limit. */
+			u8     frag;	/* One of OVS_FRAG_TYPE_*. */
+		} ip;
+	};
 	struct {
 		__be16 src;		/* TCP/UDP/SCTP source port. */
 		__be16 dst;		/* TCP/UDP/SCTP destination port. */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 939bcb3..ed31097 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -46,24 +46,22 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ndisc.h>
+#include <net/mpls.h>
 
 #include "flow_netlink.h"
 
-static void update_range__(struct sw_flow_match *match,
-			   size_t offset, size_t size, bool is_mask)
+static void update_range(struct sw_flow_match *match,
+			 size_t offset, size_t size, bool is_mask)
 {
-	struct sw_flow_key_range *range = NULL;
+	struct sw_flow_key_range *range;
 	size_t start = rounddown(offset, sizeof(long));
 	size_t end = roundup(offset + size, sizeof(long));
 
 	if (!is_mask)
 		range = &match->range;
-	else if (match->mask)
+	else
 		range = &match->mask->range;
 
-	if (!range)
-		return;
-
 	if (range->start == range->end) {
 		range->start = start;
 		range->end = end;
@@ -79,22 +77,20 @@
 
 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
 	do { \
-		update_range__(match, offsetof(struct sw_flow_key, field),  \
-				     sizeof((match)->key->field), is_mask); \
-		if (is_mask) {						    \
-			if ((match)->mask)				    \
-				(match)->mask->key.field = value;	    \
-		} else {                                                    \
+		update_range(match, offsetof(struct sw_flow_key, field),    \
+			     sizeof((match)->key->field), is_mask);	    \
+		if (is_mask)						    \
+			(match)->mask->key.field = value;		    \
+		else							    \
 			(match)->key->field = value;		            \
-		}                                                           \
 	} while (0)
 
 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask)	    \
 	do {								    \
-		update_range__(match, offset, len, is_mask);		    \
+		update_range(match, offset, len, is_mask);		    \
 		if (is_mask)						    \
 			memcpy((u8 *)&(match)->mask->key + offset, value_p, \
-			       len);					    \
+			       len);					   \
 		else							    \
 			memcpy((u8 *)(match)->key + offset, value_p, len);  \
 	} while (0)
@@ -103,18 +99,16 @@
 	SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
 				  value_p, len, is_mask)
 
-#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
-	do { \
-		update_range__(match, offsetof(struct sw_flow_key, field),  \
-				     sizeof((match)->key->field), is_mask); \
-		if (is_mask) {						    \
-			if ((match)->mask)				    \
-				memset((u8 *)&(match)->mask->key.field, value,\
-				       sizeof((match)->mask->key.field));   \
-		} else {                                                    \
+#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask)		    \
+	do {								    \
+		update_range(match, offsetof(struct sw_flow_key, field),    \
+			     sizeof((match)->key->field), is_mask);	    \
+		if (is_mask)						    \
+			memset((u8 *)&(match)->mask->key.field, value,      \
+			       sizeof((match)->mask->key.field));	    \
+		else							    \
 			memset((u8 *)&(match)->key->field, value,           \
 			       sizeof((match)->key->field));                \
-		}                                                           \
 	} while (0)
 
 static bool match_validate(const struct sw_flow_match *match,
@@ -134,7 +128,8 @@
 			| (1 << OVS_KEY_ATTR_ICMP)
 			| (1 << OVS_KEY_ATTR_ICMPV6)
 			| (1 << OVS_KEY_ATTR_ARP)
-			| (1 << OVS_KEY_ATTR_ND));
+			| (1 << OVS_KEY_ATTR_ND)
+			| (1 << OVS_KEY_ATTR_MPLS));
 
 	/* Always allowed mask fields. */
 	mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
@@ -149,6 +144,12 @@
 			mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
 	}
 
+	if (eth_p_mpls(match->key->eth.type)) {
+		key_expected |= 1 << OVS_KEY_ATTR_MPLS;
+		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+			mask_allowed |= 1 << OVS_KEY_ATTR_MPLS;
+	}
+
 	if (match->key->eth.type == htons(ETH_P_IP)) {
 		key_expected |= 1 << OVS_KEY_ATTR_IPV4;
 		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
@@ -244,6 +245,38 @@
 	return true;
 }
 
+size_t ovs_key_attr_size(void)
+{
+	/* Whenever adding new OVS_KEY_ FIELDS, we should consider
+	 * updating this function.
+	 */
+	BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
+
+	return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
+		+ nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
+		  + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
+		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
+		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
+		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
+		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
+		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
+		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
+		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_OAM */
+		  + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
+		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
+		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
+		+ nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
+		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
+		+ nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
+		+ nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
+		+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
+}
+
 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
 static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
 	[OVS_KEY_ATTR_ENCAP] = -1,
@@ -266,6 +299,7 @@
 	[OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
 	[OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
 	[OVS_KEY_ATTR_TUNNEL] = -1,
+	[OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls),
 };
 
 static bool is_all_zero(const u8 *fp, size_t size)
@@ -572,10 +606,13 @@
 	if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
 		u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
 
-		if (is_mask)
+		if (is_mask) {
 			in_port = 0xffffffff; /* Always exact match in_port. */
-		else if (in_port >= DP_MAX_PORTS)
+		} else if (in_port >= DP_MAX_PORTS) {
+			OVS_NLERR("Port (%d) exceeds maximum allowable (%d).\n",
+				  in_port, DP_MAX_PORTS);
 			return -EINVAL;
+		}
 
 		SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
 		*attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
@@ -602,7 +639,6 @@
 				const struct nlattr **a, bool is_mask)
 {
 	int err;
-	u64 orig_attrs = attrs;
 
 	err = metadata_from_nlattrs(match, &attrs, a, is_mask);
 	if (err)
@@ -634,8 +670,7 @@
 
 		SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
 		attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
-	} else if (!is_mask)
-		SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
+	}
 
 	if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
 		__be16 eth_type;
@@ -735,6 +770,16 @@
 		attrs &= ~(1 << OVS_KEY_ATTR_ARP);
 	}
 
+	if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
+		const struct ovs_key_mpls *mpls_key;
+
+		mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
+		SW_FLOW_KEY_PUT(match, mpls.top_lse,
+				mpls_key->mpls_lse, is_mask);
+
+		attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
+	 }
+
 	if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
 		const struct ovs_key_tcp *tcp_key;
 
@@ -745,15 +790,9 @@
 	}
 
 	if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
-		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-			SW_FLOW_KEY_PUT(match, tp.flags,
-					nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
-					is_mask);
-		} else {
-			SW_FLOW_KEY_PUT(match, tp.flags,
-					nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
-					is_mask);
-		}
+		SW_FLOW_KEY_PUT(match, tp.flags,
+				nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
+				is_mask);
 		attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
 	}
 
@@ -812,8 +851,11 @@
 		attrs &= ~(1 << OVS_KEY_ATTR_ND);
 	}
 
-	if (attrs != 0)
+	if (attrs != 0) {
+		OVS_NLERR("Unknown key attributes (%llx).\n",
+			  (unsigned long long)attrs);
 		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -853,8 +895,8 @@
  * attribute specifies the mask field of the wildcarded flow.
  */
 int ovs_nla_get_match(struct sw_flow_match *match,
-		      const struct nlattr *key,
-		      const struct nlattr *mask)
+		      const struct nlattr *nla_key,
+		      const struct nlattr *nla_mask)
 {
 	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
 	const struct nlattr *encap;
@@ -864,7 +906,7 @@
 	bool encap_valid = false;
 	int err;
 
-	err = parse_flow_nlattrs(key, a, &key_attrs);
+	err = parse_flow_nlattrs(nla_key, a, &key_attrs);
 	if (err)
 		return err;
 
@@ -905,36 +947,43 @@
 	if (err)
 		return err;
 
-	if (match->mask && !mask) {
-		/* Create an exact match mask. We need to set to 0xff all the
-		 * 'match->mask' fields that have been touched in 'match->key'.
-		 * We cannot simply memset 'match->mask', because padding bytes
-		 * and fields not specified in 'match->key' should be left to 0.
-		 * Instead, we use a stream of netlink attributes, copied from
-		 * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care
-		 * of filling 'match->mask' appropriately.
-		 */
-		newmask = kmemdup(key, nla_total_size(nla_len(key)),
-				  GFP_KERNEL);
-		if (!newmask)
-			return -ENOMEM;
+	if (match->mask) {
+		if (!nla_mask) {
+			/* Create an exact match mask. We need to set to 0xff
+			 * all the 'match->mask' fields that have been touched
+			 * in 'match->key'. We cannot simply memset
+			 * 'match->mask', because padding bytes and fields not
+			 * specified in 'match->key' should be left to 0.
+			 * Instead, we use a stream of netlink attributes,
+			 * copied from 'key' and set to 0xff.
+			 * ovs_key_from_nlattrs() will take care of filling
+			 * 'match->mask' appropriately.
+			 */
+			newmask = kmemdup(nla_key,
+					  nla_total_size(nla_len(nla_key)),
+					  GFP_KERNEL);
+			if (!newmask)
+				return -ENOMEM;
 
-		mask_set_nlattr(newmask, 0xff);
+			mask_set_nlattr(newmask, 0xff);
 
-		/* The userspace does not send tunnel attributes that are 0,
-		 * but we should not wildcard them nonetheless.
-		 */
-		if (match->key->tun_key.ipv4_dst)
-			SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true);
+			/* The userspace does not send tunnel attributes that
+			 * are 0, but we should not wildcard them nonetheless.
+			 */
+			if (match->key->tun_key.ipv4_dst)
+				SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
+							 0xff, true);
 
-		mask = newmask;
-	}
+			nla_mask = newmask;
+		}
 
-	if (mask) {
-		err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
+		err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs);
 		if (err)
 			goto free_newmask;
 
+		/* Always match on tci. */
+		SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
+
 		if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
 			__be16 eth_type = 0;
 			__be16 tci = 0;
@@ -1140,6 +1189,14 @@
 		arp_key->arp_op = htons(output->ip.proto);
 		ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
 		ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
+	} else if (eth_p_mpls(swkey->eth.type)) {
+		struct ovs_key_mpls *mpls_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
+		if (!nla)
+			goto nla_put_failure;
+		mpls_key = nla_data(nla);
+		mpls_key->mpls_lse = output->mpls.top_lse;
 	}
 
 	if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1226,12 +1283,14 @@
 
 #define MAX_ACTIONS_BUFSIZE	(32 * 1024)
 
-struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
+static struct sw_flow_actions *nla_alloc_flow_actions(int size)
 {
 	struct sw_flow_actions *sfa;
 
-	if (size > MAX_ACTIONS_BUFSIZE)
+	if (size > MAX_ACTIONS_BUFSIZE) {
+		OVS_NLERR("Flow action size (%u bytes) exceeds maximum", size);
 		return ERR_PTR(-EINVAL);
+	}
 
 	sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
 	if (!sfa)
@@ -1269,7 +1328,7 @@
 		new_acts_size = MAX_ACTIONS_BUFSIZE;
 	}
 
-	acts = ovs_nla_alloc_flow_actions(new_acts_size);
+	acts = nla_alloc_flow_actions(new_acts_size);
 	if (IS_ERR(acts))
 		return (void *)acts;
 
@@ -1336,9 +1395,15 @@
 	a->nla_len = sfa->actions_len - st_offset;
 }
 
+static int __ovs_nla_copy_actions(const struct nlattr *attr,
+				  const struct sw_flow_key *key,
+				  int depth, struct sw_flow_actions **sfa,
+				  __be16 eth_type, __be16 vlan_tci);
+
 static int validate_and_copy_sample(const struct nlattr *attr,
 				    const struct sw_flow_key *key, int depth,
-				    struct sw_flow_actions **sfa)
+				    struct sw_flow_actions **sfa,
+				    __be16 eth_type, __be16 vlan_tci)
 {
 	const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
 	const struct nlattr *probability, *actions;
@@ -1375,7 +1440,8 @@
 	if (st_acts < 0)
 		return st_acts;
 
-	err = ovs_nla_copy_actions(actions, key, depth + 1, sfa);
+	err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
+				     eth_type, vlan_tci);
 	if (err)
 		return err;
 
@@ -1385,10 +1451,10 @@
 	return 0;
 }
 
-static int validate_tp_port(const struct sw_flow_key *flow_key)
+static int validate_tp_port(const struct sw_flow_key *flow_key,
+			    __be16 eth_type)
 {
-	if ((flow_key->eth.type == htons(ETH_P_IP) ||
-	     flow_key->eth.type == htons(ETH_P_IPV6)) &&
+	if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) &&
 	    (flow_key->tp.src || flow_key->tp.dst))
 		return 0;
 
@@ -1483,7 +1549,7 @@
 static int validate_set(const struct nlattr *a,
 			const struct sw_flow_key *flow_key,
 			struct sw_flow_actions **sfa,
-			bool *set_tun)
+			bool *set_tun, __be16 eth_type)
 {
 	const struct nlattr *ovs_key = nla_data(a);
 	int key_type = nla_type(ovs_key);
@@ -1508,6 +1574,9 @@
 		break;
 
 	case OVS_KEY_ATTR_TUNNEL:
+		if (eth_p_mpls(eth_type))
+			return -EINVAL;
+
 		*set_tun = true;
 		err = validate_and_copy_set_tun(a, sfa);
 		if (err)
@@ -1515,7 +1584,7 @@
 		break;
 
 	case OVS_KEY_ATTR_IPV4:
-		if (flow_key->eth.type != htons(ETH_P_IP))
+		if (eth_type != htons(ETH_P_IP))
 			return -EINVAL;
 
 		if (!flow_key->ip.proto)
@@ -1531,7 +1600,7 @@
 		break;
 
 	case OVS_KEY_ATTR_IPV6:
-		if (flow_key->eth.type != htons(ETH_P_IPV6))
+		if (eth_type != htons(ETH_P_IPV6))
 			return -EINVAL;
 
 		if (!flow_key->ip.proto)
@@ -1553,19 +1622,24 @@
 		if (flow_key->ip.proto != IPPROTO_TCP)
 			return -EINVAL;
 
-		return validate_tp_port(flow_key);
+		return validate_tp_port(flow_key, eth_type);
 
 	case OVS_KEY_ATTR_UDP:
 		if (flow_key->ip.proto != IPPROTO_UDP)
 			return -EINVAL;
 
-		return validate_tp_port(flow_key);
+		return validate_tp_port(flow_key, eth_type);
+
+	case OVS_KEY_ATTR_MPLS:
+		if (!eth_p_mpls(eth_type))
+			return -EINVAL;
+		break;
 
 	case OVS_KEY_ATTR_SCTP:
 		if (flow_key->ip.proto != IPPROTO_SCTP)
 			return -EINVAL;
 
-		return validate_tp_port(flow_key);
+		return validate_tp_port(flow_key, eth_type);
 
 	default:
 		return -EINVAL;
@@ -1609,12 +1683,13 @@
 	return 0;
 }
 
-int ovs_nla_copy_actions(const struct nlattr *attr,
-			 const struct sw_flow_key *key,
-			 int depth,
-			 struct sw_flow_actions **sfa)
+static int __ovs_nla_copy_actions(const struct nlattr *attr,
+				  const struct sw_flow_key *key,
+				  int depth, struct sw_flow_actions **sfa,
+				  __be16 eth_type, __be16 vlan_tci)
 {
 	const struct nlattr *a;
+	bool out_tnl_port = false;
 	int rem, err;
 
 	if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1626,6 +1701,8 @@
 			[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
 			[OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
 			[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+			[OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
+			[OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
 			[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
 			[OVS_ACTION_ATTR_POP_VLAN] = 0,
 			[OVS_ACTION_ATTR_SET] = (u32)-1,
@@ -1655,6 +1732,8 @@
 		case OVS_ACTION_ATTR_OUTPUT:
 			if (nla_get_u32(a) >= DP_MAX_PORTS)
 				return -EINVAL;
+			out_tnl_port = false;
+
 			break;
 
 		case OVS_ACTION_ATTR_HASH: {
@@ -1671,6 +1750,7 @@
 		}
 
 		case OVS_ACTION_ATTR_POP_VLAN:
+			vlan_tci = htons(0);
 			break;
 
 		case OVS_ACTION_ATTR_PUSH_VLAN:
@@ -1679,25 +1759,73 @@
 				return -EINVAL;
 			if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
 				return -EINVAL;
+			vlan_tci = vlan->vlan_tci;
 			break;
 
 		case OVS_ACTION_ATTR_RECIRC:
 			break;
 
+		case OVS_ACTION_ATTR_PUSH_MPLS: {
+			const struct ovs_action_push_mpls *mpls = nla_data(a);
+
+			/* Networking stack do not allow simultaneous Tunnel
+			 * and MPLS GSO.
+			 */
+			if (out_tnl_port)
+				return -EINVAL;
+
+			if (!eth_p_mpls(mpls->mpls_ethertype))
+				return -EINVAL;
+			/* Prohibit push MPLS other than to a white list
+			 * for packets that have a known tag order.
+			 */
+			if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+			    (eth_type != htons(ETH_P_IP) &&
+			     eth_type != htons(ETH_P_IPV6) &&
+			     eth_type != htons(ETH_P_ARP) &&
+			     eth_type != htons(ETH_P_RARP) &&
+			     !eth_p_mpls(eth_type)))
+				return -EINVAL;
+			eth_type = mpls->mpls_ethertype;
+			break;
+		}
+
+		case OVS_ACTION_ATTR_POP_MPLS:
+			if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+			    !eth_p_mpls(eth_type))
+				return -EINVAL;
+
+			/* Disallow subsequent L2.5+ set and mpls_pop actions
+			 * as there is no check here to ensure that the new
+			 * eth_type is valid and thus set actions could
+			 * write off the end of the packet or otherwise
+			 * corrupt it.
+			 *
+			 * Support for these actions is planned using packet
+			 * recirculation.
+			 */
+			eth_type = htons(0);
+			break;
+
 		case OVS_ACTION_ATTR_SET:
-			err = validate_set(a, key, sfa, &skip_copy);
+			err = validate_set(a, key, sfa,
+					   &out_tnl_port, eth_type);
 			if (err)
 				return err;
+
+			skip_copy = out_tnl_port;
 			break;
 
 		case OVS_ACTION_ATTR_SAMPLE:
-			err = validate_and_copy_sample(a, key, depth, sfa);
+			err = validate_and_copy_sample(a, key, depth, sfa,
+						       eth_type, vlan_tci);
 			if (err)
 				return err;
 			skip_copy = true;
 			break;
 
 		default:
+			OVS_NLERR("Unknown tunnel attribute (%d).\n", type);
 			return -EINVAL;
 		}
 		if (!skip_copy) {
@@ -1713,6 +1841,24 @@
 	return 0;
 }
 
+int ovs_nla_copy_actions(const struct nlattr *attr,
+			 const struct sw_flow_key *key,
+			 struct sw_flow_actions **sfa)
+{
+	int err;
+
+	*sfa = nla_alloc_flow_actions(nla_len(attr));
+	if (IS_ERR(*sfa))
+		return PTR_ERR(*sfa);
+
+	err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
+				     key->eth.tci);
+	if (err)
+		kfree(*sfa);
+
+	return err;
+}
+
 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
 {
 	const struct nlattr *a;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 206e45a..eb0b177 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -37,6 +37,8 @@
 
 #include "flow.h"
 
+size_t ovs_key_attr_size(void);
+
 void ovs_match_init(struct sw_flow_match *match,
 		    struct sw_flow_key *key, struct sw_flow_mask *mask);
 
@@ -49,12 +51,11 @@
 		      const struct nlattr *);
 
 int ovs_nla_copy_actions(const struct nlattr *attr,
-			 const struct sw_flow_key *key, int depth,
+			 const struct sw_flow_key *key,
 			 struct sw_flow_actions **sfa);
 int ovs_nla_put_actions(const struct nlattr *attr,
 			int len, struct sk_buff *skb);
 
-struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len);
 void ovs_nla_free_flow_actions(struct sw_flow_actions *);
 
 #endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index cf2d853..90f8b40 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -250,11 +250,14 @@
 		__table_instance_destroy(ti);
 }
 
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
+/* No need for locking this function is called from RCU callback or
+ * error path.
+ */
+void ovs_flow_tbl_destroy(struct flow_table *table)
 {
-	struct table_instance *ti = ovsl_dereference(table->ti);
+	struct table_instance *ti = rcu_dereference_raw(table->ti);
 
-	table_instance_destroy(ti, deferred);
+	table_instance_destroy(ti, false);
 }
 
 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 5918bff..f682c8c 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -62,7 +62,7 @@
 
 int ovs_flow_tbl_init(struct flow_table *);
 int ovs_flow_tbl_count(struct flow_table *table);
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
+void ovs_flow_tbl_destroy(struct flow_table *table);
 int ovs_flow_tbl_flush(struct flow_table *flow_table);
 
 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 106a9d8..70c9765 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -17,6 +17,7 @@
 #include <linux/rculist.h>
 #include <linux/udp.h>
 #include <linux/if_vlan.h>
+#include <linux/module.h>
 
 #include <net/geneve.h>
 #include <net/icmp.h>
@@ -28,6 +29,8 @@
 #include "datapath.h"
 #include "vport.h"
 
+static struct vport_ops ovs_geneve_vport_ops;
+
 /**
  * struct geneve_port - Keeps track of open UDP ports
  * @gs: The socket created for this port number.
@@ -225,11 +228,29 @@
 	return geneve_port->name;
 }
 
-const struct vport_ops ovs_geneve_vport_ops = {
+static struct vport_ops ovs_geneve_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GENEVE,
 	.create		= geneve_tnl_create,
 	.destroy	= geneve_tnl_destroy,
 	.get_name	= geneve_get_name,
 	.get_options	= geneve_get_options,
 	.send		= geneve_tnl_send,
+	.owner          = THIS_MODULE,
 };
+
+static int __init ovs_geneve_tnl_init(void)
+{
+	return ovs_vport_ops_register(&ovs_geneve_vport_ops);
+}
+
+static void __exit ovs_geneve_tnl_exit(void)
+{
+	ovs_vport_ops_unregister(&ovs_geneve_vport_ops);
+}
+
+module_init(ovs_geneve_tnl_init);
+module_exit(ovs_geneve_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: Geneve swiching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-5");
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 108b82d..00270b6 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -29,6 +29,7 @@
 #include <linux/jhash.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/workqueue.h>
 #include <linux/rculist.h>
 #include <net/route.h>
@@ -45,6 +46,8 @@
 #include "datapath.h"
 #include "vport.h"
 
+static struct vport_ops ovs_gre_vport_ops;
+
 /* Returns the least-significant 32 bits of a __be64. */
 static __be32 be64_get_low32(__be64 x)
 {
@@ -281,10 +284,28 @@
 	gre_exit();
 }
 
-const struct vport_ops ovs_gre_vport_ops = {
+static struct vport_ops ovs_gre_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GRE,
 	.create		= gre_create,
 	.destroy	= gre_tnl_destroy,
 	.get_name	= gre_get_name,
 	.send		= gre_tnl_send,
+	.owner		= THIS_MODULE,
 };
+
+static int __init ovs_gre_tnl_init(void)
+{
+	return ovs_vport_ops_register(&ovs_gre_vport_ops);
+}
+
+static void __exit ovs_gre_tnl_exit(void)
+{
+	ovs_vport_ops_unregister(&ovs_gre_vport_ops);
+}
+
+module_init(ovs_gre_tnl_init);
+module_exit(ovs_gre_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: GRE switching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-3");
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 8451612..6a55f71 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -36,6 +36,8 @@
 	struct vport *vport;
 };
 
+static struct vport_ops ovs_internal_vport_ops;
+
 static struct internal_dev *internal_dev_priv(struct net_device *netdev)
 {
 	return netdev_priv(netdev);
@@ -222,6 +224,11 @@
 	struct net_device *netdev = netdev_vport_priv(vport)->dev;
 	int len;
 
+	if (unlikely(!(netdev->flags & IFF_UP))) {
+		kfree_skb(skb);
+		return 0;
+	}
+
 	len = skb->len;
 
 	skb_dst_drop(skb);
@@ -238,7 +245,7 @@
 	return len;
 }
 
-const struct vport_ops ovs_internal_vport_ops = {
+static struct vport_ops ovs_internal_vport_ops = {
 	.type		= OVS_VPORT_TYPE_INTERNAL,
 	.create		= internal_dev_create,
 	.destroy	= internal_dev_destroy,
@@ -261,10 +268,21 @@
 
 int ovs_internal_dev_rtnl_link_register(void)
 {
-	return rtnl_link_register(&internal_dev_link_ops);
+	int err;
+
+	err = rtnl_link_register(&internal_dev_link_ops);
+	if (err < 0)
+		return err;
+
+	err = ovs_vport_ops_register(&ovs_internal_vport_ops);
+	if (err < 0)
+		rtnl_link_unregister(&internal_dev_link_ops);
+
+	return err;
 }
 
 void ovs_internal_dev_rtnl_link_unregister(void)
 {
+	ovs_vport_ops_unregister(&ovs_internal_vport_ops);
 	rtnl_link_unregister(&internal_dev_link_ops);
 }
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index d21f77d..877ee74 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -33,6 +33,8 @@
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
+static struct vport_ops ovs_netdev_vport_ops;
+
 /* Must be called with rcu_read_lock. */
 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 {
@@ -224,10 +226,20 @@
 		return NULL;
 }
 
-const struct vport_ops ovs_netdev_vport_ops = {
+static struct vport_ops ovs_netdev_vport_ops = {
 	.type		= OVS_VPORT_TYPE_NETDEV,
 	.create		= netdev_create,
 	.destroy	= netdev_destroy,
 	.get_name	= ovs_netdev_get_name,
 	.send		= netdev_send,
 };
+
+int __init ovs_netdev_init(void)
+{
+	return ovs_vport_ops_register(&ovs_netdev_vport_ops);
+}
+
+void ovs_netdev_exit(void)
+{
+	ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
+}
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 8df01c11..6f7038e 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -41,4 +41,7 @@
 const char *ovs_netdev_get_name(const struct vport *);
 void ovs_netdev_detach_dev(struct vport *);
 
+int __init ovs_netdev_init(void);
+void ovs_netdev_exit(void);
+
 #endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 2735e01..965e750 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -24,6 +24,7 @@
 #include <linux/net.h>
 #include <linux/rculist.h>
 #include <linux/udp.h>
+#include <linux/module.h>
 
 #include <net/icmp.h>
 #include <net/ip.h>
@@ -50,6 +51,8 @@
 	char name[IFNAMSIZ];
 };
 
+static struct vport_ops ovs_vxlan_vport_ops;
+
 static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
 {
 	return vport_priv(vport);
@@ -192,11 +195,29 @@
 	return vxlan_port->name;
 }
 
-const struct vport_ops ovs_vxlan_vport_ops = {
+static struct vport_ops ovs_vxlan_vport_ops = {
 	.type		= OVS_VPORT_TYPE_VXLAN,
 	.create		= vxlan_tnl_create,
 	.destroy	= vxlan_tnl_destroy,
 	.get_name	= vxlan_get_name,
 	.get_options	= vxlan_get_options,
 	.send		= vxlan_tnl_send,
+	.owner		= THIS_MODULE,
 };
+
+static int __init ovs_vxlan_tnl_init(void)
+{
+	return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+}
+
+static void __exit ovs_vxlan_tnl_exit(void)
+{
+	ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+}
+
+module_init(ovs_vxlan_tnl_init);
+module_exit(ovs_vxlan_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: VXLAN switching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-4");
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6015802..8168ef0 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -28,6 +28,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
 #include <net/net_namespace.h>
+#include <linux/module.h>
 
 #include "datapath.h"
 #include "vport.h"
@@ -36,22 +37,7 @@
 static void ovs_vport_record_error(struct vport *,
 				   enum vport_err_type err_type);
 
-/* List of statically compiled vport implementations.  Don't forget to also
- * add yours to the list at the bottom of vport.h. */
-static const struct vport_ops *vport_ops_list[] = {
-	&ovs_netdev_vport_ops,
-	&ovs_internal_vport_ops,
-
-#ifdef CONFIG_OPENVSWITCH_GRE
-	&ovs_gre_vport_ops,
-#endif
-#ifdef CONFIG_OPENVSWITCH_VXLAN
-	&ovs_vxlan_vport_ops,
-#endif
-#ifdef CONFIG_OPENVSWITCH_GENEVE
-	&ovs_geneve_vport_ops,
-#endif
-};
+static LIST_HEAD(vport_ops_list);
 
 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
 static struct hlist_head *dev_table;
@@ -88,6 +74,32 @@
 	return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
 }
 
+int ovs_vport_ops_register(struct vport_ops *ops)
+{
+	int err = -EEXIST;
+	struct vport_ops *o;
+
+	ovs_lock();
+	list_for_each_entry(o, &vport_ops_list, list)
+		if (ops->type == o->type)
+			goto errout;
+
+	list_add_tail(&ops->list, &vport_ops_list);
+	err = 0;
+errout:
+	ovs_unlock();
+	return err;
+}
+EXPORT_SYMBOL(ovs_vport_ops_register);
+
+void ovs_vport_ops_unregister(struct vport_ops *ops)
+{
+	ovs_lock();
+	list_del(&ops->list);
+	ovs_unlock();
+}
+EXPORT_SYMBOL(ovs_vport_ops_unregister);
+
 /**
  *	ovs_vport_locate - find a port that has already been created
  *
@@ -153,6 +165,7 @@
 
 	return vport;
 }
+EXPORT_SYMBOL(ovs_vport_alloc);
 
 /**
  *	ovs_vport_free - uninitialize and free vport
@@ -173,6 +186,18 @@
 	free_percpu(vport->percpu_stats);
 	kfree(vport);
 }
+EXPORT_SYMBOL(ovs_vport_free);
+
+static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
+{
+	struct vport_ops *ops;
+
+	list_for_each_entry(ops, &vport_ops_list, list)
+		if (ops->type == parms->type)
+			return ops;
+
+	return NULL;
+}
 
 /**
  *	ovs_vport_add - add vport device (for kernel callers)
@@ -184,31 +209,40 @@
  */
 struct vport *ovs_vport_add(const struct vport_parms *parms)
 {
+	struct vport_ops *ops;
 	struct vport *vport;
-	int err = 0;
-	int i;
 
-	for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
-		if (vport_ops_list[i]->type == parms->type) {
-			struct hlist_head *bucket;
+	ops = ovs_vport_lookup(parms);
+	if (ops) {
+		struct hlist_head *bucket;
 
-			vport = vport_ops_list[i]->create(parms);
-			if (IS_ERR(vport)) {
-				err = PTR_ERR(vport);
-				goto out;
-			}
+		if (!try_module_get(ops->owner))
+			return ERR_PTR(-EAFNOSUPPORT);
 
-			bucket = hash_bucket(ovs_dp_get_net(vport->dp),
-					     vport->ops->get_name(vport));
-			hlist_add_head_rcu(&vport->hash_node, bucket);
+		vport = ops->create(parms);
+		if (IS_ERR(vport)) {
+			module_put(ops->owner);
 			return vport;
 		}
+
+		bucket = hash_bucket(ovs_dp_get_net(vport->dp),
+				     vport->ops->get_name(vport));
+		hlist_add_head_rcu(&vport->hash_node, bucket);
+		return vport;
 	}
 
-	err = -EAFNOSUPPORT;
+	/* Unlock to attempt module load and return -EAGAIN if load
+	 * was successful as we need to restart the port addition
+	 * workflow.
+	 */
+	ovs_unlock();
+	request_module("vport-type-%d", parms->type);
+	ovs_lock();
 
-out:
-	return ERR_PTR(err);
+	if (!ovs_vport_lookup(parms))
+		return ERR_PTR(-EAFNOSUPPORT);
+	else
+		return ERR_PTR(-EAGAIN);
 }
 
 /**
@@ -242,6 +276,8 @@
 	hlist_del_rcu(&vport->hash_node);
 
 	vport->ops->destroy(vport);
+
+	module_put(vport->ops->owner);
 }
 
 /**
@@ -457,6 +493,7 @@
 	}
 	ovs_dp_process_packet(skb, &key);
 }
+EXPORT_SYMBOL(ovs_vport_receive);
 
 /**
  *	ovs_vport_send - send a packet on a device
@@ -535,3 +572,4 @@
 
 	call_rcu(&vport->rcu, free_vport_rcu);
 }
+EXPORT_SYMBOL(ovs_vport_deferred_free);
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 8942125..e41c3fa 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -161,6 +161,9 @@
 	const char *(*get_name)(const struct vport *);
 
 	int (*send)(struct vport *, struct sk_buff *);
+
+	struct module *owner;
+	struct list_head list;
 };
 
 enum vport_err_type {
@@ -209,14 +212,6 @@
 void ovs_vport_receive(struct vport *, struct sk_buff *,
 		       struct ovs_tunnel_info *);
 
-/* List of statically compiled vport implementations.  Don't forget to also
- * add yours to the list at the top of vport.c. */
-extern const struct vport_ops ovs_netdev_vport_ops;
-extern const struct vport_ops ovs_internal_vport_ops;
-extern const struct vport_ops ovs_gre_vport_ops;
-extern const struct vport_ops ovs_vxlan_vport_ops;
-extern const struct vport_ops ovs_geneve_vport_ops;
-
 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
 				      const void *start, unsigned int len)
 {
@@ -224,4 +219,7 @@
 		skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
 }
 
+int ovs_vport_ops_register(struct vport_ops *ops);
+void ovs_vport_ops_unregister(struct vport_ops *ops);
+
 #endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 87d20f4..4cd13d8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2953,7 +2953,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (err)
 		goto out_free;
 
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 290352c..0918bc2 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -150,7 +150,7 @@
 		copylen = len;
 	}
 
-	rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
+	rval = skb_copy_datagram_msg(skb, 0, msg, copylen);
 	if (rval) {
 		rval = -EFAULT;
 		goto out;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 70a547e..44b2123 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1296,7 +1296,7 @@
 	else
 		len = skb->len;
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
+	err = skb_copy_datagram_msg(skb, 0, msg, len);
 	if (!err)
 		err = (flags & MSG_TRUNC) ? skb->len : len;
 
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a85c1a0..9b600c2 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1249,7 +1249,7 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	if (msg->msg_name) {
 		struct sockaddr_rose *srose;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index e9aaa65..4575485 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -180,7 +180,7 @@
 		if (copy > len - copied)
 			copy = len - copied;
 
-		ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);
+		ret = skb_copy_datagram_msg(skb, offset, msg, copy);
 
 		if (ret < 0)
 			goto copy_error;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index d6bcbd9..7fffc22 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/gact.c	Generic actions
+ * net/sched/act_gact.c		Generic actions
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8a64a07..cbc8dd7 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/ipt.c     iptables target interface
+ * net/sched/act_ipt.c		iptables target interface
  *
  *TODO: Add other tables. For now we only support the ipv4 table targets
  *
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index eb48306..5953517 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/mirred.c	packet mirroring and redirect actions
+ * net/sched/act_mirred.c	packet mirroring and redirect actions
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 5f9bcb2..59649d5 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/pedit.c	Generic packet editor
+ * net/sched/act_pedit.c	Generic packet editor
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 69791ca..9a1c42a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/police.c	Input police filter.
+ * net/sched/act_police.c	Input police filter
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 992c231..6a8d948 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/simp.c	Simple example of an action
+ * net/sched/act_simple.c	Simple example of an action
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b343319..179f1c8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -139,33 +139,20 @@
 
 /* Time stamp put into socket buffer control block
  * Only valid when skbs are in our internal t(ime)fifo queue.
+ *
+ * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
+ * and skb->next & skb->prev are scratch space for a qdisc,
+ * we save skb->tstamp value in skb->cb[] before destroying it.
  */
 struct netem_skb_cb {
 	psched_time_t	time_to_send;
 	ktime_t		tstamp_save;
 };
 
-/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
- * to hold a rb_node structure.
- *
- * If struct sk_buff layout is changed, the following checks will complain.
- */
-static struct rb_node *netem_rb_node(struct sk_buff *skb)
-{
-	BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
-	BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
-		     offsetof(struct sk_buff, next) + sizeof(skb->next));
-	BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
-		     offsetof(struct sk_buff, prev) + sizeof(skb->prev));
-	BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
-					      sizeof(skb->prev) +
-					      sizeof(skb->tstamp));
-	return (struct rb_node *)&skb->next;
-}
 
 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
 {
-	return (struct sk_buff *)rb;
+	return container_of(rb, struct sk_buff, rbnode);
 }
 
 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
@@ -403,8 +390,8 @@
 		else
 			p = &parent->rb_left;
 	}
-	rb_link_node(netem_rb_node(nskb), parent, p);
-	rb_insert_color(netem_rb_node(nskb), &q->t_root);
+	rb_link_node(&nskb->rbnode, parent, p);
+	rb_insert_color(&nskb->rbnode, &q->t_root);
 	sch->q.qlen++;
 }
 
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 34229ee..0697eda 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -417,7 +417,7 @@
 
 	if (*pos == 0)
 		seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
-				"REM_ADDR_RTX  START\n");
+				"REM_ADDR_RTX START STATE\n");
 
 	return (void *)pos;
 }
@@ -490,14 +490,20 @@
 			 * Note: We don't have a way to tally this at the moment
 			 * so lets just leave it as zero for the moment
 			 */
-			seq_printf(seq, "0 ");
+			seq_puts(seq, "0 ");
 
 			/*
 			 * remote address start time (START).  This is also not
 			 * currently implemented, but we can record it with a
 			 * jiffies marker in a subsequent patch
 			 */
-			seq_printf(seq, "0");
+			seq_puts(seq, "0 ");
+
+			/*
+			 * The current state of this destination. I.e.
+			 * SCTP_ACTIVE, SCTP_INACTIVE, ...
+			 */
+			seq_printf(seq, "%d", tsp->state);
 
 			seq_printf(seq, "\n");
 		}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 634a2ab..2120292 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2095,7 +2095,7 @@
 	if (copied > len)
 		copied = len;
 
-	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	err = skb_copy_datagram_msg(skb, 0, msg, copied);
 
 	event = sctp_skb2event(skb);
 
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 74745a4..ec18076 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -91,7 +91,7 @@
  * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
  *            out: set when successful non-complete reassembly, otherwise NULL
  * @*buf:     in:  the buffer to append. Always defined
- *            out: head buf after sucessful complete reassembly, otherwise NULL
+ *            out: head buf after successful complete reassembly, otherwise NULL
  * Returns 1 when reassembly complete, otherwise 0
  */
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
@@ -311,7 +311,7 @@
  * @mtu:  max allowable size for the bundle buffer, inclusive header
  * @dnode: destination node for message. (Not always present in header)
  * Replaces buffer if successful
- * Returns true if sucess, otherwise false
+ * Returns true if success, otherwise false
  */
 bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
 {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 51bddc2..591bbfa 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1372,8 +1372,7 @@
 			sz = buf_len;
 			m->msg_flags |= MSG_TRUNC;
 		}
-		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
-					      m->msg_iov, sz);
+		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
 		if (res)
 			goto exit;
 		res = sz;
@@ -1473,8 +1472,8 @@
 		needed = (buf_len - sz_copied);
 		sz_to_copy = (sz <= needed) ? sz : needed;
 
-		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
-					      m->msg_iov, sz_to_copy);
+		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
+					    m, sz_to_copy);
 		if (res)
 			goto exit;
 
@@ -1556,7 +1555,7 @@
  * @tsk: TIPC socket
  * @msg: message
  *
- * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
  */
 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
 {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e968843..5eee625 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1825,7 +1825,7 @@
 	else if (size < skb->len - skip)
 		msg->msg_flags |= MSG_TRUNC;
 
-	err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
+	err = skb_copy_datagram_msg(skb, skip, msg, size);
 	if (err)
 		goto out_free;
 
@@ -2030,8 +2030,8 @@
 		}
 
 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
-		if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
-					    msg->msg_iov, chunk)) {
+		if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
+					  msg, chunk)) {
 			if (copied == 0)
 				copied = -EFAULT;
 			break;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 9bb63ff..a57ddef 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1773,8 +1773,7 @@
 	}
 
 	/* Place the datagram payload in the user's iovec. */
-	err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
-		payload_len);
+	err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
 	if (err)
 		goto out;
 
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5ad4418..59e785b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1335,7 +1335,7 @@
 	/* Currently, each datagram always contains a complete record */
 	msg->msg_flags |= MSG_EOR;
 
-	rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	rc = skb_copy_datagram_msg(skb, 0, msg, copied);
 	if (rc)
 		goto out_free_dgram;
 
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index eb4bec0..6340274 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -602,6 +602,45 @@
 		},
 		.result = ACCEPT,
 	},
+	{
+		"jump test 5",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
@@ -630,7 +669,7 @@
 
 static int test(void)
 {
-	int prog_fd, i;
+	int prog_fd, i, pass_cnt = 0, err_cnt = 0;
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		struct bpf_insn *prog = tests[i].insns;
@@ -657,21 +696,25 @@
 				printf("FAIL\nfailed to load prog '%s'\n",
 				       strerror(errno));
 				printf("%s", bpf_log_buf);
+				err_cnt++;
 				goto fail;
 			}
 		} else {
 			if (prog_fd >= 0) {
 				printf("FAIL\nunexpected success to load\n");
 				printf("%s", bpf_log_buf);
+				err_cnt++;
 				goto fail;
 			}
 			if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
 				printf("FAIL\nunexpected error message: %s",
 				       bpf_log_buf);
+				err_cnt++;
 				goto fail;
 			}
 		}
 
+		pass_cnt++;
 		printf("OK\n");
 fail:
 		if (map_fd >= 0)
@@ -679,6 +722,7 @@
 		close(prog_fd);
 
 	}
+	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
 
 	return 0;
 }