mlx_accel_core/mlx_accel_tools/mlx_ipsec: Initial accelerator modules

Issue: None
Change-Id: I29b109c49fa4d283285aa7d51d3a54c5147deabb
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Ilan Tayari <ilant@mellanox.com>
Signed-off-by: Ariel Levkovich <lariel@mellanox.com>
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index ce80b36..2b6e463 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7651,6 +7651,16 @@
 F:	drivers/scsi/megaraid.*
 F:	drivers/scsi/megaraid/
 
+MELLANOX ETHERNET ACCELERATOR DRIVERS
+M:	Ilan Tayari <ilant@mellanox.com>
+M:	Aviad Yehezkel <aviadye@mellanox.com>
+M:	Ariel Levkovich <lariel@mellanox.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+W:	http://www.mellanox.com
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
+F:	drivers/net/ethernet/mellanox/accelerator/
+
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:	Tariq Toukan <tariqt@mellanox.com>
 L:	netdev@vger.kernel.org
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 7493a83..aa44026 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_MLX5_INFINIBAND)	+= mlx5_ib.o
 
-mlx5_ib-y :=	main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o
+mlx5_ib-y :=	main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o rsvd_gid.o
 mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e4aecbf..b68b60f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -179,73 +179,86 @@
 	return 0;
 }
 
-static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
-				     const struct ib_gid_attr *attr,
-				     void *mlx5_addr)
+int mlx5_ib_set_roce_gid(struct mlx5_core_dev *dev, unsigned int index,
+			 enum ib_gid_type gid_type, const union ib_gid *gid,
+			 const u8 *mac, bool vlan, u16 vlan_id)
 {
 #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
-	char *mlx5_addr_l3_addr	= MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
+	u32  in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
+	u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
+	void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
+	char *mlx5_addr_l3_addr	= MLX5_ADDR_OF(roce_addr_layout, in_addr,
 					       source_l3_address);
-	void *mlx5_addr_mac	= MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
+	void *mlx5_addr_mac	= MLX5_ADDR_OF(roce_addr_layout, in_addr,
 					       source_mac_47_32);
 
-	if (!gid)
-		return;
+	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return -EINVAL;
 
-	ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
+	if (gid) {
+		ether_addr_copy(mlx5_addr_mac, mac);
 
-	if (is_vlan_dev(attr->ndev)) {
-		MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
-		MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
-	}
+		if (vlan) {
+			MLX5_SET_RA(in_addr, vlan_valid, 1);
+			MLX5_SET_RA(in_addr, vlan_id, vlan_id);
+		}
 
-	switch (attr->gid_type) {
-	case IB_GID_TYPE_IB:
-		MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
-		break;
-	case IB_GID_TYPE_ROCE_UDP_ENCAP:
-		MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
-		break;
+		switch (gid_type) {
+		case IB_GID_TYPE_IB:
+			MLX5_SET_RA(in_addr, roce_version, MLX5_ROCE_VERSION_1);
+			break;
+		case IB_GID_TYPE_ROCE_UDP_ENCAP:
+			MLX5_SET_RA(in_addr, roce_version, MLX5_ROCE_VERSION_2);
+			break;
 
-	default:
-		WARN_ON(true);
-	}
+		default:
+			WARN_ON(true);
+		}
 
-	if (attr->gid_type != IB_GID_TYPE_IB) {
-		if (ipv6_addr_v4mapped((void *)gid))
-			MLX5_SET_RA(mlx5_addr, roce_l3_type,
-				    MLX5_ROCE_L3_TYPE_IPV4);
+		if (gid_type != IB_GID_TYPE_IB) {
+			if (ipv6_addr_v4mapped((void *)gid))
+				MLX5_SET_RA(in_addr, roce_l3_type,
+					    MLX5_ROCE_L3_TYPE_IPV4);
+			else
+				MLX5_SET_RA(in_addr, roce_l3_type,
+					    MLX5_ROCE_L3_TYPE_IPV6);
+		}
+
+		if ((gid_type == IB_GID_TYPE_IB) ||
+		    !ipv6_addr_v4mapped((void *)gid))
+			memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
 		else
-			MLX5_SET_RA(mlx5_addr, roce_l3_type,
-				    MLX5_ROCE_L3_TYPE_IPV6);
+			memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
 	}
 
-	if ((attr->gid_type == IB_GID_TYPE_IB) ||
-	    !ipv6_addr_v4mapped((void *)gid))
-		memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
-	else
-		memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
+	MLX5_SET(set_roce_address_in, in, roce_address_index, index);
+	MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
+EXPORT_SYMBOL(mlx5_ib_set_roce_gid);
 
 static int set_roce_addr(struct ib_device *device, u8 port_num,
 			 unsigned int index,
 			 const union ib_gid *gid,
 			 const struct ib_gid_attr *attr)
 {
-	struct mlx5_ib_dev *dev = to_mdev(device);
-	u32  in[MLX5_ST_SZ_DW(set_roce_address_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
-	void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
-	enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
+	u8 mac[ETH_ALEN] = {0};
+	bool vlan = false;
+	u16 vlan_id = 0;
+	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
 
-	if (ll != IB_LINK_LAYER_ETHERNET)
-		return -EINVAL;
+	if (gid) {
+		gid_type = attr->gid_type;
+		ether_addr_copy(mac, attr->ndev->dev_addr);
 
-	ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
+		if (is_vlan_dev(attr->ndev)) {
+			vlan = true;
+			vlan_id = vlan_dev_vlan_id(attr->ndev);
+		}
+	}
 
-	MLX5_SET(set_roce_address_in, in, roce_address_index, index);
-	MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
-	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+	return mlx5_ib_set_roce_gid(to_mdev(device)->mdev, index,
+				    gid_type, gid, mac, vlan, vlan_id);
 }
 
 static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -268,17 +281,20 @@
 	struct ib_gid_attr attr;
 	union ib_gid gid;
 
+	/* Reserved GIDs are not in cache and not connected to a netdev */
+	if (mlx5_ib_is_gid_reserved(&dev->ib_dev, port_num, index))
+		goto out;
+
 	if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
 		return 0;
 
-	if (!attr.ndev)
-		return 0;
-
-	dev_put(attr.ndev);
+	if (attr.ndev)
+		dev_put(attr.ndev);
 
 	if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
 		return 0;
 
+out:
 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
 }
 
@@ -773,19 +789,35 @@
 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 		       struct ib_port_attr *props)
 {
+	int ret;
+	unsigned int reserved;
+
 	switch (mlx5_get_vport_access_method(ibdev)) {
 	case MLX5_VPORT_ACCESS_METHOD_MAD:
-		return mlx5_query_mad_ifc_port(ibdev, port, props);
+		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
+		break;
 
 	case MLX5_VPORT_ACCESS_METHOD_HCA:
-		return mlx5_query_hca_port(ibdev, port, props);
+		ret = mlx5_query_hca_port(ibdev, port, props);
+		break;
 
 	case MLX5_VPORT_ACCESS_METHOD_NIC:
-		return mlx5_query_port_roce(ibdev, port, props);
+		ret = mlx5_query_port_roce(ibdev, port, props);
+		break;
 
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
 	}
+
+	if (!ret && props) {
+		reserved = to_mdev(ibdev)->reserved_gids.count;
+		if (props->gid_tbl_len > reserved) {
+			props->gid_tbl_len -= reserved;
+			pr_debug("Gid table reduced by %u reserved gids to %u\n",
+				 reserved, props->gid_tbl_len);
+		}
+	}
+	return ret;
 }
 
 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
@@ -2127,6 +2159,11 @@
 		ibev.event = IB_EVENT_CLIENT_REREGISTER;
 		port = (u8)param;
 		break;
+
+	case MLX5_DEV_EVENT_FPGA_ERROR:
+	case MLX5_DEV_EVENT_FPGA_QP_ERROR:
+		/* Uninteresting device event */
+		return;
 	}
 
 	ibev.device	      = &ibdev->ib_dev;
@@ -2669,6 +2706,8 @@
 		goto err_dealloc;
 
 	rwlock_init(&dev->roce.netdev_lock);
+
+	mlx5_ib_reserved_gid_init(dev);
 	err = get_port_caps(dev);
 	if (err)
 		goto err_free_port;
@@ -2826,6 +2865,7 @@
 	mutex_init(&dev->cap_mask_mutex);
 	INIT_LIST_HEAD(&dev->qp_list);
 	spin_lock_init(&dev->reset_flow_resource_lock);
+	mutex_init(&dev->reserved_gids.mutex);
 
 	if (ll == IB_LINK_LAYER_ETHERNET) {
 		err = mlx5_enable_roce(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 67cc741..7953d5b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -44,6 +44,7 @@
 #include <linux/types.h>
 #include <linux/mlx5/transobj.h>
 #include <rdma/ib_user_verbs.h>
+#include <linux/mlx5_ib/driver.h>
 
 #define mlx5_ib_dbg(dev, format, arg...)				\
 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
@@ -538,6 +539,10 @@
 	MLX5_FMR_BUSY,
 };
 
+enum {
+	MLX5_MAX_RESERVED_GIDS = 8,
+};
+
 struct mlx5_cache_ent {
 	struct list_head	head;
 	/* sync access to the cahce entry
@@ -605,6 +610,12 @@
 	struct notifier_block	nb;
 };
 
+struct mlx5_ib_reserved_gids {
+	unsigned int	count;
+	bool		used[MLX5_MAX_RESERVED_GIDS];
+	struct mutex	mutex;
+};
+
 struct mlx5_ib_dev {
 	struct ib_device		ib_dev;
 	struct mlx5_core_dev		*mdev;
@@ -636,6 +647,7 @@
 	struct list_head	qp_list;
 	/* Array with num_ports elements */
 	struct mlx5_ib_port	*port;
+	struct mlx5_ib_reserved_gids	reserved_gids;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -837,6 +849,10 @@
 						      struct ib_rwq_ind_table_init_attr *init_attr,
 						      struct ib_udata *udata);
 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx5_ib_set_roce_gid(struct mlx5_core_dev *dev, unsigned int index,
+			 enum ib_gid_type gid_type, const union ib_gid *gid,
+			 const u8 *mac, bool vlan, u16 vlan_id);
+void mlx5_ib_reserved_gid_init(struct mlx5_ib_dev *dev);
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9529b46..721feb4 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2167,8 +2167,10 @@
 						     attr->pkey_index);
 
 	if (ah->ah_flags & IB_AH_GRH) {
-		if (ah->grh.sgid_index >=
-		    dev->mdev->port_caps[port - 1].gid_table_len) {
+		if ((ah->grh.sgid_index >=
+		    dev->mdev->port_caps[port - 1].gid_table_len) &&
+		    !mlx5_ib_is_gid_reserved(&dev->ib_dev, port,
+					     ah->grh.sgid_index)) {
 			pr_err("sgid_index (%u) too large. max is %d\n",
 			       ah->grh.sgid_index,
 			       dev->mdev->port_caps[port - 1].gid_table_len);
diff --git a/drivers/infiniband/hw/mlx5/rsvd_gid.c b/drivers/infiniband/hw/mlx5/rsvd_gid.c
new file mode 100644
index 0000000..6013088
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/rsvd_gid.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5_ib/driver.h>
+#include "mlx5_ib.h"
+
+#define MLX5_FPGA_NUM_QPS 2
+void mlx5_ib_reserved_gid_init(struct mlx5_ib_dev *dev)
+{
+	int ix;
+	unsigned int count = 0;
+
+	if (MLX5_CAP_GEN(dev->mdev, fpga))
+		count = MLX5_FPGA_NUM_QPS;
+
+	if (count > MLX5_MAX_RESERVED_GIDS)
+		count = MLX5_MAX_RESERVED_GIDS;
+
+	pr_debug("Reserving %u GIDs\n", count);
+	dev->reserved_gids.count = count;
+	for (ix = 0; ix < count; ix++)
+		dev->reserved_gids.used[ix] = false;
+}
+
+bool mlx5_ib_is_gid_reserved(struct ib_device *ib_dev, u8 port, int index)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+	int table_size = dev->mdev->port_caps[port - 1].gid_table_len;
+
+	return (index >= table_size) &&
+	       (index < table_size + dev->reserved_gids.count);
+}
+EXPORT_SYMBOL_GPL(mlx5_ib_is_gid_reserved);
+
+int mlx5_ib_reserved_gid_add(struct ib_device *ib_dev, u8 port,
+			     enum ib_gid_type gid_type, union ib_gid *gid,
+			     u8 *mac, bool vlan, u16 vlan_id, int *gid_index)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+	int index = 0;
+	int ret = 0;
+	int empty_index;
+	int table_size = dev->mdev->port_caps[port - 1].gid_table_len;
+
+	mutex_lock(&dev->reserved_gids.mutex);
+	while ((index < dev->reserved_gids.count) &&
+	       dev->reserved_gids.used[index])
+		index++;
+	if (index >= dev->reserved_gids.count)
+		ret = -ENOMEM;
+	else
+		dev->reserved_gids.used[index] = true;
+	mutex_unlock(&dev->reserved_gids.mutex);
+
+	if (ret)
+		goto out;
+	empty_index = table_size + index;
+	pr_debug("Reserving GID %u\n", empty_index);
+
+	ret = mlx5_ib_set_roce_gid(dev->mdev, empty_index, gid_type, gid,
+				   mac, vlan, vlan_id);
+	if (ret)
+		goto out;
+	*gid_index = empty_index;
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_ib_reserved_gid_add);
+
+void mlx5_ib_reserved_gid_del(struct ib_device *ib_dev, u8 port, int gid_index)
+{
+	struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+	int table_size = dev->mdev->port_caps[port - 1].gid_table_len;
+	int index, ret = 0;
+
+	if (!mlx5_ib_is_gid_reserved(ib_dev, port, gid_index)) {
+		pr_warn("Not a reserved GID %u\n", gid_index);
+		return;
+	}
+
+	pr_debug("Unreserving GID %u\n", gid_index);
+	ret = mlx5_ib_set_roce_gid(dev->mdev, gid_index, IB_GID_TYPE_IB,
+				   NULL, NULL, false, 0);
+	if (ret) {
+		pr_warn("Failed to delete reserved GID %u: %u\n", gid_index,
+			ret);
+		return;
+	}
+
+	index = gid_index - table_size;
+	mutex_lock(&dev->reserved_gids.mutex);
+	if (!dev->reserved_gids.used[index])
+		pr_warn("Deleting an unused reserved GID %u\n", gid_index);
+	dev->reserved_gids.used[index] = false;
+	mutex_unlock(&dev->reserved_gids.mutex);
+}
+EXPORT_SYMBOL_GPL(mlx5_ib_reserved_gid_del);
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d547010..1b3ca6a 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -19,5 +19,8 @@
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
 source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
+source "drivers/net/ethernet/mellanox/accelerator/core/Kconfig"
+source "drivers/net/ethernet/mellanox/accelerator/ipsec/Kconfig"
+source "drivers/net/ethernet/mellanox/accelerator/tools/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 2e2a5ec..96a5856 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -5,3 +5,6 @@
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
 obj-$(CONFIG_MLXSW_CORE) += mlxsw/
+obj-$(CONFIG_MLX_ACCEL_CORE) += accelerator/core/
+obj-$(CONFIG_MLX_ACCEL_IPSEC) += accelerator/ipsec/
+obj-$(CONFIG_MLX_ACCEL_TOOLS) += accelerator/tools/
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/Kconfig b/drivers/net/ethernet/mellanox/accelerator/core/Kconfig
new file mode 100644
index 0000000..8c7be59
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/Kconfig
@@ -0,0 +1,12 @@
+#
+# Mellanox accelerator core driver configuration
+#
+
+config MLX_ACCEL_CORE
+	tristate "Mellanox Technologies ConnectX-4 accelerator core driver"
+	depends on MLX5_INFINIBAND && MLX5_CORE_EN && MLX5_CORE
+	default n
+	---help---
+	  Core driver for low level functionality of the ConnectX-4 accelerator
+	  by Mellanox Technologies.
+
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/Makefile b/drivers/net/ethernet/mellanox/accelerator/core/Makefile
new file mode 100644
index 0000000..a5811b5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MLX_ACCEL_CORE)		+= mlx_accel_core.o
+
+mlx_accel_core-y :=	accel_core_main.o accel_core_rdma.o accel_core_sdk.o accel_core_hw.o accel_core_trans.o accel_core_xfer.o
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core.h b/drivers/net/ethernet/mellanox/accelerator/core/accel_core.h
new file mode 100644
index 0000000..35f8c3b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX_ACCEL_CORE_H__
+#define __MLX_ACCEL_CORE_H__
+
+#include "accel_core_sdk.h"
+#include <linux/in6.h>
+
+#define MLX_RECV_SIZE 2048
+#define MLX_EXIT_WRID 1
+
+struct mlx_accel_client_data {
+	struct list_head  list;
+	struct mlx_accel_core_client *client;
+	void *data;
+	bool added;
+};
+
+struct mlx_accel_client_data *
+mlx_accel_client_context_create(struct mlx_accel_core_device *device,
+				struct mlx_accel_core_client *client);
+void mlx_accel_client_context_destroy(struct mlx_accel_core_device *device,
+				      struct mlx_accel_client_data *context);
+void mlx_accel_device_teardown(struct mlx_accel_core_device *accel_device);
+
+/* RDMA */
+struct mlx_accel_core_conn *
+mlx_accel_core_rdma_conn_create(struct mlx_accel_core_device *accel_device,
+				struct mlx_accel_core_conn_init_attr
+				*conn_init_attr, bool is_shell_conn);
+void mlx_accel_core_rdma_conn_destroy(struct mlx_accel_core_conn *conn);
+
+int mlx_accel_core_rdma_post_send(struct mlx_accel_core_conn *conn,
+				  struct mlx_accel_core_dma_buf *buf);
+
+int mlx_accel_core_rdma_connect(struct mlx_accel_core_conn *conn);
+
+/* I2C */
+int mlx_accel_read_i2c(struct mlx5_core_dev *dev,
+		       size_t size, u64 addr, u8 *buf);
+int mlx_accel_write_i2c(struct mlx5_core_dev *dev,
+			size_t size, u64 addr, u8 *buf);
+
+/* fpga QP */
+#define mlx_accel_fpga_qp_device_init(accel_device) 0
+#define mlx_accel_fpga_qp_device_deinit(accel_device)
+
+#if defined(QP_SIMULATOR)
+#undef mlx_accel_fpga_qp_device_init
+#undef mlx_accel_fpga_qp_device_deinit
+int mlx_accel_fpga_qp_device_init(struct mlx_accel_core_device *accel_device);
+int mlx_accel_fpga_qp_device_deinit(struct mlx_accel_core_device *accel_device);
+#endif
+
+#endif /* __MLX_ACCEL_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_hw.c b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_hw.c
new file mode 100644
index 0000000..06d5e48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_hw.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "accel_core.h"
+#include <linux/etherdevice.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/driver.h>
+#include <rdma/ib_mad.h>
+
+int mlx_accel_read_i2c(struct mlx5_core_dev *dev,
+		       size_t size, u64 addr, u8 *buf)
+{
+	u8 actual_size;
+	size_t bytes_done = 0;
+	size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
+	int rc;
+
+	while (bytes_done < size) {
+		actual_size = min(max_size, (size - bytes_done));
+
+		rc = mlx5_fpga_access_reg(dev, actual_size,
+					  addr + bytes_done,
+					  buf + bytes_done, false);
+		if (rc) {
+			pr_err("Failed to read FPGA crspace data for %s\n",
+			       dev_name(&dev->pdev->dev));
+			return rc;
+		}
+
+		bytes_done += actual_size;
+	}
+
+	return 0;
+}
+
+int mlx_accel_write_i2c(struct mlx5_core_dev *dev,
+			size_t size, u64 addr, u8 *buf)
+{
+	u8 actual_size;
+	size_t bytes_done = 0;
+	size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
+	int rc;
+
+	while (bytes_done < size) {
+		actual_size = min(max_size, (size - bytes_done));
+
+		rc = mlx5_fpga_access_reg(dev, actual_size,
+					  addr + bytes_done,
+					  buf + bytes_done, true);
+		if (rc) {
+			pr_err("Failed to write FPGA crspace data for %s\n",
+			       dev_name(&dev->pdev->dev));
+			return rc;
+		}
+
+		bytes_done += actual_size;
+	}
+
+	return 0;
+}
+
+#ifdef QP_SIMULATOR
+
+static ssize_t core_conn_ip_read(struct mlx_accel_core_device *dev, char *buf)
+{
+	__be16 *sgid = (__be16 *)&dev->core_conn->fpga_qpc.remote_ip;
+
+	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+			be16_to_cpu(sgid[0]),
+			be16_to_cpu(sgid[1]),
+			be16_to_cpu(sgid[2]),
+			be16_to_cpu(sgid[3]),
+			be16_to_cpu(sgid[4]),
+			be16_to_cpu(sgid[5]),
+			be16_to_cpu(sgid[6]),
+			be16_to_cpu(sgid[7]));
+}
+
+static ssize_t core_conn_qpn_read(struct mlx_accel_core_device *dev, char *buf)
+{
+	if (dev->core_conn && dev->core_conn->qp)
+		return sprintf(buf, "%u\n", dev->core_conn->qp->qp_num);
+	return sprintf(buf, "null\n");
+}
+
+static ssize_t core_conn_fpga_ip_write(struct mlx_accel_core_device *dev,
+				       const char *buf, size_t count)
+{
+	__be16 *gid = (__be16 *)&dev->core_conn->fpga_qpc.fpga_ip;
+	int i = 0;
+
+	if (sscanf(buf, "%04hx:%04hx:%04hx:%04hx:%04hx:%04hx:%04hx:%04hx\n",
+		   &gid[0], &gid[1], &gid[2], &gid[3],
+		   &gid[4], &gid[5], &gid[6], &gid[7]) != 8)
+		return -EINVAL;
+
+	for (i = 0; i < 8; i++)
+		gid[i] = cpu_to_be16(gid[i]);
+	return count;
+}
+
+static ssize_t core_conn_fpga_qpn_write(struct mlx_accel_core_device *dev,
+					const char *buf, size_t count)
+{
+	if (sscanf(buf, "%u\n", &dev->core_conn->fpga_qpn) != 1)
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t core_conn_fpga_conn_write(struct mlx_accel_core_device *dev,
+					 const char *buf, size_t count)
+{
+	int err;
+
+	err = mlx_accel_core_rdma_connect(dev->core_conn);
+	if (err) {
+		pr_err("Failed to connect core RC QP to FPGA QP: %d\n", err);
+		return -EIO;
+	}
+	return count;
+}
+
+struct core_conn_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct mlx_accel_core_device *dev, char *buf);
+	ssize_t (*store)(struct mlx_accel_core_device *dev, const char *buf,
+			 size_t count);
+};
+
+#define CORE_CONN_ATTR_RO(_name, _show) \
+	struct core_conn_attribute core_conn_attr_ ## _name = { \
+			.attr = {.name = __stringify(_name), .mode = 0444}, \
+			.show = _show, \
+	}
+#define CORE_CONN_ATTR_WO(_name, _store) \
+	struct core_conn_attribute core_conn_attr_ ## _name = { \
+			.attr = {.name = __stringify(_name), .mode = 0222}, \
+			.store = _store, \
+	}
+#define to_dev(obj) container_of(kobj, struct mlx_accel_core_device, sim_kobj)
+#define to_attr(_attr) container_of(attr, struct core_conn_attribute, attr)
+
+static CORE_CONN_ATTR_RO(ip, core_conn_ip_read);
+static CORE_CONN_ATTR_RO(qpn, core_conn_qpn_read);
+static CORE_CONN_ATTR_WO(fpga_qpn, core_conn_fpga_qpn_write);
+static CORE_CONN_ATTR_WO(fpga_ip, core_conn_fpga_ip_write);
+static CORE_CONN_ATTR_WO(fpga_conn, core_conn_fpga_conn_write);
+
+struct attribute *core_conn_def_attrs[] = {
+		&core_conn_attr_ip.attr,
+		&core_conn_attr_qpn.attr,
+		&core_conn_attr_fpga_ip.attr,
+		&core_conn_attr_fpga_qpn.attr,
+		&core_conn_attr_fpga_conn.attr,
+		NULL,
+};
+
+static ssize_t core_conn_sysfs_show(struct kobject *kobj,
+				    struct attribute *attr, char *buf)
+{
+	struct mlx_accel_core_device *dev = to_dev(kobj);
+	struct core_conn_attribute *core_conn_attr = to_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (core_conn_attr->show)
+		ret = core_conn_attr->show(dev, buf);
+
+	return ret;
+}
+
+static ssize_t core_conn_sysfs_store(struct kobject *kobj,
+				     struct attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct mlx_accel_core_device *dev = to_dev(kobj);
+	struct core_conn_attribute *core_conn_attr = to_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (core_conn_attr->store)
+		ret = core_conn_attr->store(dev, buf, count);
+
+	return ret;
+}
+
+const struct sysfs_ops core_conn_sysfs_ops = {
+	.show  = core_conn_sysfs_show,
+	.store = core_conn_sysfs_store,
+};
+
+static struct kobj_type core_conn_sysfs_type = {
+	.sysfs_ops      = &core_conn_sysfs_ops,
+	.default_attrs  = core_conn_def_attrs,
+};
+
+int mlx_accel_fpga_qp_device_init(struct mlx_accel_core_device *accel_device)
+{
+	return kobject_init_and_add(&accel_device->sim_kobj,
+				    &core_conn_sysfs_type,
+				    mlx_accel_core_kobj(accel_device), "%s",
+				    "fpga_core_conn");
+}
+
+int mlx_accel_fpga_qp_device_deinit(struct mlx_accel_core_device *accel_device)
+{
+	kobject_put(&accel_device->sim_kobj);
+	return 0;
+}
+
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev,
+			struct mlx5_fpga_qpc *fpga_qpc, u32 *fpga_qpn)
+{
+	return 0;
+}
+
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+			enum mlx5_fpga_qpc_field_select fields,
+			struct mlx5_fpga_qpc *fpga_qpc)
+{
+	return 0;
+}
+
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
+{
+	return 0;
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_main.c b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_main.c
new file mode 100644
index 0000000..b72d1ec
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_main.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <rdma/ib_mad.h>
+
+#include "accel_core.h"
+#include "accel_core_trans.h"
+
+static struct workqueue_struct *mlx_accel_core_workq;
+atomic_t mlx_accel_device_id = ATOMIC_INIT(0);
+LIST_HEAD(mlx_accel_core_devices);
+LIST_HEAD(mlx_accel_core_clients);
+/* protects access between client un/registration and device add/remove calls */
+DEFINE_MUTEX(mlx_accel_core_mutex);
+
+MODULE_AUTHOR("Ilan Tayari <ilant@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Innova Core Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.1");
+
+static const char * const mlx_accel_fpga_error_string[] = {
+	"Null Syndrome",
+	"Corrupted DDR",
+	"Flash Timeout",
+	"Internal Link Error",
+	"Watchdog HW Failure",
+	"I2C Failure",
+	"Image Changed",
+};
+
+static const char * const mlx_accel_fpga_qp_error_string[] = {
+	"Null Syndrome",
+	"Retry Counter Expired",
+	"RNR Expired",
+};
+
+int mlx_accel_core_workq_init(void)
+{
+	mlx_accel_core_workq = create_workqueue("mlx_accel_core");
+	if (!mlx_accel_core_workq)
+		return -ENOMEM;
+	return 0;
+}
+
+void mlx_accel_core_workq_deinit(void)
+{
+	flush_workqueue(mlx_accel_core_workq);
+	destroy_workqueue(mlx_accel_core_workq);
+	mlx_accel_core_workq = NULL;
+}
+
+void mlx_accel_client_context_destroy(struct mlx_accel_core_device *device,
+				      struct mlx_accel_client_data *context)
+{
+	pr_debug("Deleting client context %p of client %p\n",
+		 context, context->client);
+	if (context->client->destroy)
+		context->client->destroy(device);
+	list_del(&context->list);
+	kfree(context);
+}
+
+struct mlx_accel_client_data *
+mlx_accel_client_context_create(struct mlx_accel_core_device *device,
+				struct mlx_accel_core_client *client)
+{
+	struct mlx_accel_client_data *context;
+
+	/* TODO: If device caps do not match client driver, then
+	 * do nothing and return
+	 */
+
+	context = kmalloc(sizeof(*context), GFP_KERNEL);
+	if (!context) {
+		pr_err("Failed to allocate accel device client context\n");
+		return NULL;
+	}
+
+	context->client = client;
+	context->data   = NULL;
+	context->added  = false;
+	list_add(&context->list, &device->client_data_list);
+
+	pr_debug("Adding client context %p device %p client %p\n",
+		 context, device, client);
+
+	if (client->create)
+		client->create(device);
+	return context;
+}
+
+static inline struct mlx_accel_core_device *
+	mlx_find_accel_dev_by_hw_dev_unlocked(struct mlx5_core_dev *dev)
+{
+	struct mlx_accel_core_device *accel_device, *tmp;
+
+	list_for_each_entry_safe(accel_device, tmp, &mlx_accel_core_devices,
+			list) {
+		if (accel_device->hw_dev == dev)
+			goto found;
+		if (!accel_device->hw_dev) {
+			if (!accel_device->ib_dev) {
+				/* [AY]: TODO: do we want to check this case */
+				/* [BP]: Yes, WARN_ON could be nice */
+				dump_stack();
+				pr_err("Found Invalid accel device\n");
+				continue;
+			}
+			if (accel_device->ib_dev->dma_device == &dev->pdev->dev) {
+				/* [BP]: Can you move this out of the
+				 * function? Currently, this isn't a "find"
+				 * function */
+				accel_device->hw_dev = dev;
+				goto found;
+			}
+		}
+	}
+
+	return NULL;
+found:
+	return accel_device;
+}
+
+static inline struct mlx_accel_core_device *
+	mlx_find_accel_dev_by_ib_dev_unlocked(struct ib_device *dev)
+{
+	struct mlx_accel_core_device *accel_device = NULL, *tmp;
+
+	list_for_each_entry_safe(accel_device, tmp, &mlx_accel_core_devices,
+			list) {
+		if (accel_device->ib_dev == dev)
+			goto found;
+		if (!accel_device->ib_dev) {
+			if (!accel_device->hw_dev) {
+				/* [AY]: TODO: do we want to check this case */
+				/* [BP]: Yes, WARN_ON could be nice */
+				dump_stack();
+				pr_err("Found Invalid accel device\n");
+				continue;
+			}
+			if (&accel_device->hw_dev->pdev->dev == dev->dma_device) {
+				/* [BP]: Can you move this out of the
+				 * function? Currently, this isn't a "find"
+				 * function */
+				accel_device->ib_dev = dev;
+				goto found;
+			}
+		}
+	}
+
+	return NULL;
+found:
+	return accel_device;
+}
+
+static struct mlx_accel_core_device *mlx_accel_device_alloc(void)
+{
+	struct mlx_accel_core_device *accel_device = NULL;
+
+	accel_device = kzalloc(sizeof(*accel_device), GFP_KERNEL);
+	if (!accel_device)
+		return NULL;
+
+	mutex_init(&accel_device->mutex);
+	accel_device->state = MLX_ACCEL_FPGA_STATUS_NONE;
+	accel_device->id = atomic_add_return(1, &mlx_accel_device_id);
+	INIT_LIST_HEAD(&accel_device->client_data_list);
+	INIT_LIST_HEAD(&accel_device->client_connections);
+	list_add_tail(&accel_device->list, &mlx_accel_core_devices);
+	accel_device->port = 1;
+	return accel_device;
+}
+
+static void mlx_accel_device_init(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_accel_core_conn_init_attr core_conn_attr;
+	struct mlx_accel_client_data *client_context;
+#ifdef DEBUG
+	__be16 *fpga_ip;
+#endif
+	int err = 0;
+
+	snprintf(accel_device->name, sizeof(accel_device->name), "%s-%s",
+		 accel_device->ib_dev->name,
+		 accel_device->hw_dev->priv.name);
+
+	err = mlx5_fpga_caps(accel_device->hw_dev, accel_device->fpga_caps);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to query FPGA capabilities: %d\n", err);
+		goto out;
+	}
+
+	err = mlx_accel_fpga_qp_device_init(accel_device);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to initialize FPGA QP: %d\n", err);
+		goto out;
+	}
+
+	err = ib_find_pkey(accel_device->ib_dev, accel_device->port,
+			   IB_DEFAULT_PKEY_FULL, &accel_device->pkey_index);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to query pkey: %d\n", err);
+		goto err_fpga_dev;
+	}
+	pr_debug("pkey %x index is %u\n", IB_DEFAULT_PKEY_FULL,
+		 accel_device->pkey_index);
+
+	err = mlx_accel_trans_device_init(accel_device);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to initialize transaction machine: %d\n", err);
+		goto err_fpga_dev;
+	}
+
+	accel_device->pd = ib_alloc_pd(accel_device->ib_dev);
+	if (IS_ERR(accel_device->pd)) {
+		err = PTR_ERR(accel_device->pd);
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to create PD: %d\n", err);
+		goto err_trans;
+	}
+
+	accel_device->mr = ib_get_dma_mr(accel_device->pd,
+					 IB_ACCESS_LOCAL_WRITE |
+					 IB_ACCESS_REMOTE_WRITE);
+	if (IS_ERR(accel_device->mr)) {
+		err = PTR_ERR(accel_device->mr);
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to create MR: %d\n", err);
+		goto err_pd;
+	}
+
+	memset(&core_conn_attr, 0, sizeof(core_conn_attr));
+	core_conn_attr.tx_size = MLX_ACCEL_TID_COUNT;
+	core_conn_attr.rx_size = MLX_ACCEL_TID_COUNT;
+	core_conn_attr.recv_cb = mlx_accel_trans_recv;
+	core_conn_attr.cb_arg = accel_device;
+
+	accel_device->core_conn = mlx_accel_core_rdma_conn_create(accel_device,
+							&core_conn_attr, true);
+	if (IS_ERR(accel_device->core_conn)) {
+		err = PTR_ERR(accel_device->core_conn);
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to create core RC QP: %d\n", err);
+		accel_device->core_conn = NULL;
+		goto err_mr;
+	}
+
+#ifdef DEBUG
+	dev_dbg(&accel_device->hw_dev->pdev->dev,
+		"Local QPN is %u\n", accel_device->core_conn->qp->qp_num);
+	fpga_ip = (__be16 *)&accel_device->core_conn->fpga_qpc.fpga_ip;
+	dev_dbg(&accel_device->hw_dev->pdev->dev,
+		"FPGA device gid is %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+		ntohs(fpga_ip[0]), ntohs(fpga_ip[1]),
+		ntohs(fpga_ip[2]), ntohs(fpga_ip[3]),
+		ntohs(fpga_ip[4]), ntohs(fpga_ip[5]),
+		ntohs(fpga_ip[6]), ntohs(fpga_ip[7]));
+	dev_dbg(&accel_device->hw_dev->pdev->dev,
+		"FPGA QPN is %u\n", accel_device->core_conn->fpga_qpn);
+#endif
+
+#ifdef QP_SIMULATOR
+	dev_ntc(&accel_device->hw_dev->pdev->dev,
+		"**** QP Simulator mode; Waiting for QP setup ****\n");
+#else
+	err = mlx_accel_core_rdma_connect(accel_device->core_conn);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to connect core RC QP to FPGA QP: %d\n", err);
+		goto err_core_conn;
+	}
+#endif
+
+	if (accel_device->last_oper_image == MLX_ACCEL_IMAGE_USER) {
+		err = mlx5_fpga_ctrl_op(accel_device->hw_dev,
+					MLX5_FPGA_CTRL_OP_SB_BYPASS_ON);
+		if (err) {
+			dev_err(&accel_device->hw_dev->pdev->dev,
+				"Failed to set SBU bypass on: %d\n", err);
+			goto err_core_conn;
+		}
+		err = mlx5_fpga_ctrl_op(accel_device->hw_dev,
+					MLX5_FPGA_CTRL_OP_RESET_SB);
+		if (err) {
+			dev_err(&accel_device->hw_dev->pdev->dev,
+				"Failed to reset SBU: %d\n", err);
+			goto err_core_conn;
+		}
+		err = mlx5_fpga_ctrl_op(accel_device->hw_dev,
+					MLX5_FPGA_CTRL_OP_SB_BYPASS_OFF);
+		if (err) {
+			dev_err(&accel_device->hw_dev->pdev->dev,
+				"Failed to set SBU bypass off: %d\n", err);
+			goto err_core_conn;
+		}
+
+		list_for_each_entry(client_context,
+				    &accel_device->client_data_list, list) {
+			if (client_context->client->add(accel_device))
+				continue;
+			client_context->added = true;
+		}
+	}
+
+	goto out;
+
+err_core_conn:
+	mlx_accel_core_rdma_conn_destroy(accel_device->core_conn);
+	accel_device->core_conn = NULL;
+err_mr:
+	ib_dereg_mr(accel_device->mr);
+	accel_device->mr = NULL;
+err_pd:
+	ib_dealloc_pd(accel_device->pd);
+	accel_device->pd = NULL;
+err_trans:
+	mlx_accel_trans_device_deinit(accel_device);
+err_fpga_dev:
+	mlx_accel_fpga_qp_device_deinit(accel_device);
+out:
+	accel_device->state = err ? MLX_ACCEL_FPGA_STATUS_FAILURE :
+				    MLX_ACCEL_FPGA_STATUS_SUCCESS;
+}
+
+void mlx_accel_device_teardown(struct mlx_accel_core_device *accel_device)
+{
+	int err = 0;
+	struct mlx_accel_client_data *client_context;
+
+	list_for_each_entry(client_context,
+			    &accel_device->client_data_list, list) {
+		if (!client_context->added)
+			continue;
+		client_context->client->remove(accel_device);
+		client_context->added = false;
+	}
+	WARN_ON(!list_empty(&accel_device->client_connections));
+
+	if (accel_device->state == MLX_ACCEL_FPGA_STATUS_SUCCESS) {
+		err = mlx5_fpga_ctrl_op(accel_device->hw_dev,
+					MLX5_FPGA_CTRL_OP_SB_BYPASS_ON);
+		if (err) {
+			dev_err(&accel_device->hw_dev->pdev->dev,
+				"Failed to re-set SBU bypass on: %d\n", err);
+		}
+	}
+
+	if (accel_device->core_conn) {
+		mlx_accel_core_rdma_conn_destroy(accel_device->core_conn);
+		accel_device->core_conn = NULL;
+		err = ib_dereg_mr(accel_device->mr);
+		if (err)
+			pr_err("Unexpected error deregistering MR: %d\n", err);
+		accel_device->mr = NULL;
+		ib_dealloc_pd(accel_device->pd);
+		accel_device->pd = NULL;
+		mlx_accel_trans_device_deinit(accel_device);
+		mlx_accel_fpga_qp_device_deinit(accel_device);
+	}
+}
+
+static void mlx_accel_device_check(struct mlx_accel_core_device *accel_device)
+{
+	int err;
+	enum mlx_accel_fpga_status status;
+
+	err = mlx5_fpga_query(accel_device->hw_dev,
+			      &status, &accel_device->last_admin_image,
+			      &accel_device->last_oper_image);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to query FPGA status: %d\n", err);
+		return;
+	}
+
+	switch (status) {
+	case MLX_ACCEL_FPGA_STATUS_SUCCESS:
+		mlx_accel_device_init(accel_device);
+		break;
+	case MLX_ACCEL_FPGA_STATUS_FAILURE:
+		accel_device->state = MLX_ACCEL_FPGA_STATUS_FAILURE;
+		dev_info(&accel_device->hw_dev->pdev->dev,
+			 "FPGA device has failed\n");
+		break;
+	case MLX_ACCEL_FPGA_STATUS_IN_PROGRESS:
+		accel_device->state = MLX_ACCEL_FPGA_STATUS_IN_PROGRESS;
+		dev_info(&accel_device->hw_dev->pdev->dev,
+			 "FPGA device is not ready yet\n");
+		break;
+	default:
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"FPGA status unknown: %u\n", status);
+		break;
+	}
+}
+
+static void mlx_accel_device_start(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_accel_core_client *client;
+
+	list_for_each_entry(client, &mlx_accel_core_clients, list)
+		mlx_accel_client_context_create(accel_device, client);
+
+	mutex_lock(&accel_device->mutex);
+	mlx_accel_device_check(accel_device);
+	mutex_unlock(&accel_device->mutex);
+}
+
+static void mlx_accel_device_stop(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_accel_client_data *context, *tmp;
+
+	mutex_lock(&accel_device->mutex);
+	mlx_accel_device_teardown(accel_device);
+	accel_device->state = MLX_ACCEL_FPGA_STATUS_NONE;
+	mutex_unlock(&accel_device->mutex);
+
+	list_for_each_entry_safe(context, tmp, &accel_device->client_data_list,
+				 list)
+		mlx_accel_client_context_destroy(accel_device, context);
+}
+
+static void mlx_accel_ib_dev_add_one(struct ib_device *dev)
+{
+	struct mlx_accel_core_device *accel_device = NULL;
+
+	pr_info("mlx_accel_ib_dev_add_one called for %s\n", dev->name);
+
+	mutex_lock(&mlx_accel_core_mutex);
+
+	accel_device = mlx_find_accel_dev_by_ib_dev_unlocked(dev);
+	if (!accel_device) {
+		accel_device = mlx_accel_device_alloc();
+		if (!accel_device)
+			goto out;
+		accel_device->ib_dev = dev;
+	}
+
+	/* An accel device is ready once it has both IB and HW devices */
+	if ((accel_device->ib_dev) && (accel_device->hw_dev))
+		mlx_accel_device_start(accel_device);
+
+out:
+	mutex_unlock(&mlx_accel_core_mutex);
+}
+
+static void mlx_accel_ib_dev_remove_one(struct ib_device *dev,
+					void *client_data)
+{
+	struct mlx_accel_core_device *accel_device;
+
+	pr_info("mlx_accel_ib_dev_remove_one called for %s\n", dev->name);
+
+	mutex_lock(&mlx_accel_core_mutex);
+
+	accel_device = mlx_find_accel_dev_by_ib_dev_unlocked(dev);
+	if (!accel_device) {
+		pr_err("Not found valid accel device\n");
+		goto out;
+	}
+
+	if (!accel_device->ib_dev) {
+		pr_warn("Removing IB device that was not added\n");
+		goto out;
+	}
+
+	if (accel_device->hw_dev) {
+		mlx_accel_device_stop(accel_device);
+		accel_device->ib_dev = NULL;
+	} else {
+		list_del(&accel_device->list);
+		kfree(accel_device);
+	}
+
+out:
+	mutex_unlock(&mlx_accel_core_mutex);
+}
+
+static void *mlx_accel_hw_dev_add_one(struct mlx5_core_dev *dev)
+{
+	struct mlx_accel_core_device *accel_device = NULL;
+
+	pr_info("mlx_accel_hw_dev_add_one called for %s\n", dev->priv.name);
+
+	if (!MLX5_CAP_GEN(dev, fpga))
+		goto out;
+
+	mutex_lock(&mlx_accel_core_mutex);
+
+	accel_device = mlx_find_accel_dev_by_hw_dev_unlocked(dev);
+	if (!accel_device) {
+		accel_device = mlx_accel_device_alloc();
+		if (!accel_device)
+			goto out_unlock;
+		accel_device->hw_dev = dev;
+	}
+
+	/* An accel device is ready once it has both IB and HW devices */
+	if ((accel_device->hw_dev) && (accel_device->ib_dev))
+		mlx_accel_device_start(accel_device);
+
+out_unlock:
+	mutex_unlock(&mlx_accel_core_mutex);
+out:
+	return accel_device;
+}
+
+static void mlx_accel_hw_dev_remove_one(struct mlx5_core_dev *dev,
+		void *context)
+{
+	struct mlx_accel_core_device *accel_device =
+			(struct mlx_accel_core_device *)context;
+
+	pr_info("mlx_accel_hw_dev_remove_one called for %s\n", dev->priv.name);
+
+	mutex_lock(&mlx_accel_core_mutex);
+
+	if (accel_device->ib_dev) {
+		mlx_accel_device_stop(accel_device);
+		accel_device->hw_dev = NULL;
+	} else {
+		list_del(&accel_device->list);
+		kfree(accel_device);
+	}
+
+	mutex_unlock(&mlx_accel_core_mutex);
+}
+
+static const char *mlx_accel_error_string(u8 syndrome)
+{
+	if (syndrome < ARRAY_SIZE(mlx_accel_fpga_error_string))
+		return mlx_accel_fpga_error_string[syndrome];
+	return "Unknown";
+}
+
+static const char *mlx_accel_qp_error_string(u8 syndrome)
+{
+	if (syndrome < ARRAY_SIZE(mlx_accel_fpga_qp_error_string))
+		return mlx_accel_fpga_qp_error_string[syndrome];
+	return "Unknown";
+}
+
+struct my_work {
+	struct work_struct work;
+	struct mlx_accel_core_device *accel_device;
+	u8 syndrome;
+	u32 fpga_qpn;
+};
+
+static void mlx_accel_fpga_error(struct work_struct *work)
+{
+	struct my_work *mywork = container_of(work, struct my_work, work);
+	struct mlx_accel_core_device *accel_device = mywork->accel_device;
+	u8 syndrome = mywork->syndrome;
+
+	mutex_lock(&accel_device->mutex);
+	switch (accel_device->state) {
+	case MLX_ACCEL_FPGA_STATUS_NONE:
+	case MLX_ACCEL_FPGA_STATUS_FAILURE:
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Unexpected FPGA event %u: %s\n",
+			syndrome, mlx_accel_error_string(syndrome));
+		break;
+	case MLX_ACCEL_FPGA_STATUS_IN_PROGRESS:
+		if (syndrome != MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED)
+			dev_warn(&accel_device->hw_dev->pdev->dev,
+				 "FPGA Error while loading %u: %s\n",
+				 syndrome, mlx_accel_error_string(syndrome));
+		else
+			mlx_accel_device_check(accel_device);
+		break;
+	case MLX_ACCEL_FPGA_STATUS_SUCCESS:
+		mlx_accel_device_teardown(accel_device);
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"FPGA Error %u: %s\n",
+			syndrome, mlx_accel_error_string(syndrome));
+		accel_device->state = MLX_ACCEL_FPGA_STATUS_FAILURE;
+		break;
+	}
+	mutex_unlock(&accel_device->mutex);
+}
+
+static void mlx_accel_fpga_qp_error(struct work_struct *work)
+{
+	struct my_work *mywork = container_of(work, struct my_work, work);
+	struct mlx_accel_core_device *accel_device = mywork->accel_device;
+	u8 syndrome = mywork->syndrome;
+	u32 fpga_qpn = mywork->fpga_qpn;
+
+	dev_warn(&accel_device->ib_dev->dev,
+		 "FPGA Error %u on QP %u: %s\n",
+		 syndrome, fpga_qpn, mlx_accel_qp_error_string(syndrome));
+}
+
+static void mlx_accel_hw_dev_event_one(struct mlx5_core_dev *mdev,
+				       void *context, enum mlx5_dev_event event,
+				       unsigned long param)
+{
+	struct my_work *work;
+	struct mlx_accel_core_device *accel_device =
+			(struct mlx_accel_core_device *)context;
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	work->accel_device = accel_device;
+
+	switch (event) {
+	case MLX5_DEV_EVENT_FPGA_ERROR:
+		INIT_WORK(&work->work, mlx_accel_fpga_error);
+		work->syndrome = MLX5_GET(fpga_error_event,
+					(void *)param, syndrome);
+		break;
+	case MLX5_DEV_EVENT_FPGA_QP_ERROR:
+		INIT_WORK(&work->work, mlx_accel_fpga_qp_error);
+		work->syndrome = MLX5_GET(fpga_qp_error_event,
+					(void *)param, syndrome);
+		work->fpga_qpn = MLX5_GET(fpga_qp_error_event,
+					(void *)param, fpga_qpn);
+		break;
+
+	default:
+		return;
+	}
+	queue_work(mlx_accel_core_workq, &work->work);
+}
+
+static struct ib_client mlx_accel_ib_client = {
+		.name   = "mlx_accel_core",
+		.add    = mlx_accel_ib_dev_add_one,
+		.remove = mlx_accel_ib_dev_remove_one
+};
+
+static struct mlx5_interface mlx_accel_hw_intf  = {
+		.add = mlx_accel_hw_dev_add_one,
+		.remove = mlx_accel_hw_dev_remove_one,
+		.event = mlx_accel_hw_dev_event_one
+};
+
+static int __init mlx_accel_core_init(void)
+{
+	int rc;
+
+	rc = mlx_accel_core_workq_init();
+	if (rc) {
+		pr_err("mlx_accel_core failed to create event workq\n");
+		goto err;
+	}
+
+	rc = mlx5_register_interface(&mlx_accel_hw_intf);
+	if (rc) {
+		pr_err("mlx5_register_interface failed\n");
+		goto err_workq;
+	}
+
+	rc = ib_register_client(&mlx_accel_ib_client);
+	if (rc) {
+		pr_err("ib_register_client failed\n");
+		goto err_register_intf;
+	}
+
+	return 0;
+
+err_register_intf:
+	mlx5_unregister_interface(&mlx_accel_hw_intf);
+err_workq:
+	mlx_accel_core_workq_deinit();
+err:
+	return rc;
+}
+
+static void __exit mlx_accel_core_exit(void)
+{
+	ib_unregister_client(&mlx_accel_ib_client);
+	mlx5_unregister_interface(&mlx_accel_hw_intf);
+	mlx_accel_core_workq_deinit();
+}
+
+module_init(mlx_accel_core_init);
+module_exit(mlx_accel_core_exit);
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_rdma.c b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_rdma.c
new file mode 100644
index 0000000..62bf49f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_rdma.c
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "accel_core.h"
+#include <linux/etherdevice.h>
+#include <linux/mlx5_ib/driver.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include <rdma/ib_mad.h>
+
+static int mlx_accel_core_rdma_close_qp(struct mlx_accel_core_conn *conn);
+static void mlx_accel_core_rdma_destroy_res(struct mlx_accel_core_conn *conn);
+
+static int mlx_accel_core_rdma_post_recv(struct mlx_accel_core_conn *conn)
+{
+	struct ib_sge sge;
+	struct ib_recv_wr wr;
+	struct ib_recv_wr *bad_wr;
+	struct mlx_accel_core_dma_buf *buf;
+	int rc;
+
+	buf = kmalloc(sizeof(*buf) + MLX_RECV_SIZE, 0);
+	if (!buf)
+		return -ENOMEM;
+
+	memset(buf, 0, sizeof(*buf));
+	buf->data = ((u8 *)buf + sizeof(*buf));
+	buf->data_size = MLX_RECV_SIZE;
+	buf->data_dma_addr = ib_dma_map_single(conn->accel_device->ib_dev,
+					       buf->data, buf->data_size,
+					       DMA_FROM_DEVICE);
+
+	if (ib_dma_mapping_error(conn->accel_device->ib_dev,
+				 buf->data_dma_addr)) {
+		pr_warn("post_recv: DMA mapping error on address %p\n",
+			buf->data);
+		return -ENOMEM;
+	}
+
+	buf->dma_dir = DMA_FROM_DEVICE;
+
+	memset(&sge, 0, sizeof(sge));
+	sge.addr = buf->data_dma_addr;
+	sge.length = buf->data_size;
+	sge.lkey = conn->accel_device->pd->local_dma_lkey;
+
+	/* prepare the send work request (SR) */
+	memset(&wr, 0, sizeof(wr));
+	wr.next		= NULL;
+	wr.sg_list	= &sge;
+	wr.num_sge	= 1;
+	wr.wr_id	= (uint64_t)buf;
+
+	atomic_inc(&conn->pending_recvs);
+	rc = ib_post_recv(conn->qp, &wr, &bad_wr);
+	if (rc) {
+		atomic_dec(&conn->pending_recvs);
+		ib_dma_unmap_single(conn->accel_device->ib_dev,
+				    buf->data_dma_addr, buf->data_size,
+				    DMA_FROM_DEVICE);
+		kfree(buf);
+		goto out;
+	}
+	pr_debug("Posted RECV buf %p\n", buf);
+
+out:
+	return rc;
+}
+
+int mlx_accel_core_rdma_post_send(struct mlx_accel_core_conn *conn,
+				  struct mlx_accel_core_dma_buf *buf)
+{
+	struct ib_device *ib_dev = conn->accel_device->ib_dev;
+	struct ib_sge sge[2];
+	struct ib_send_wr wr;
+	struct ib_send_wr *bad_wr;
+	int rc;
+	int sge_count = 1;
+
+	buf->dma_dir = DMA_TO_DEVICE;
+
+	if (buf->more) {
+		buf->more_dma_addr = ib_dma_map_single(ib_dev, buf->more,
+						       buf->more_size,
+						       DMA_TO_DEVICE);
+		if (ib_dma_mapping_error(ib_dev, buf->more_dma_addr)) {
+			pr_warn("sendmsg: DMA mapping error on header address %p\n",
+				buf->more);
+			rc = -ENOMEM;
+			goto out;
+		}
+	}
+	buf->data_dma_addr = ib_dma_map_single(ib_dev, buf->data,
+					       buf->data_size, DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(ib_dev, buf->data_dma_addr)) {
+		pr_warn("sendmsg: DMA mapping error on address %p\n",
+			buf->data);
+		rc = -ENOMEM;
+		goto out_header_dma;
+	}
+
+	memset(&sge, 0, sizeof(sge));
+	sge[0].addr = buf->data_dma_addr;
+	sge[0].length = buf->data_size;
+	sge[0].lkey = conn->accel_device->pd->local_dma_lkey;
+	if (buf->more) {
+		sge[sge_count].addr = buf->more_dma_addr;
+		sge[sge_count].length = buf->more_size;
+		sge[sge_count].lkey = conn->accel_device->pd->local_dma_lkey;
+		sge_count++;
+	}
+
+	/* prepare the send work request (SR) */
+	memset(&wr, 0, sizeof(wr));
+	wr.next		= NULL;
+	wr.sg_list	= sge;
+	wr.num_sge	= sge_count;
+	wr.wr_id	= (uint64_t)buf;
+	wr.opcode	= IB_WR_SEND;
+	wr.send_flags	= IB_SEND_SIGNALED;
+
+	atomic_inc(&conn->inflight_sends);
+	pr_debug("Posting SEND buf %p\n", buf);
+#ifdef DEBUG
+	print_hex_dump_bytes("SEND Data ", DUMP_PREFIX_OFFSET,
+			     buf->data, buf->data_size);
+	if (buf->more)
+		print_hex_dump_bytes("SEND More ", DUMP_PREFIX_OFFSET,
+				     buf->more, buf->more_size);
+#endif
+
+	rc = ib_post_send(conn->qp, &wr, &bad_wr);
+	if (rc) {
+		pr_debug("SEND buf %p post failed: %d\n", buf, rc);
+		atomic_dec(&conn->inflight_sends);
+		goto out_dma;
+	}
+	goto out;
+
+out_dma:
+	ib_dma_unmap_single(ib_dev, buf->data_dma_addr, buf->data_size,
+			    DMA_TO_DEVICE);
+out_header_dma:
+	ib_dma_unmap_single(ib_dev, buf->more_dma_addr, buf->more_size,
+			    DMA_TO_DEVICE);
+out:
+	return rc;
+}
+
+static void mlx_accel_core_rdma_handle_pending(struct mlx_accel_core_conn *conn)
+{
+	struct mlx_accel_core_dma_buf *buf;
+	int rc;
+
+	spin_lock(&conn->pending_lock);
+	while (1) {
+		buf = list_first_entry_or_null(&conn->pending_msgs,
+					       struct mlx_accel_core_dma_buf,
+					       list);
+		spin_unlock(&conn->pending_lock);
+		if (!buf)
+			break;
+
+		rc = mlx_accel_core_rdma_post_send(conn, buf);
+		if (rc)
+			break;
+
+		spin_lock(&conn->pending_lock);
+		list_del(&buf->list);
+	}
+}
+
+static void mlx_accel_complete(struct mlx_accel_core_conn *conn,
+			       struct ib_wc *wc)
+{
+	struct mlx_accel_core_dma_buf *buf;
+
+	if ((wc->status != IB_WC_SUCCESS) &&
+	    (conn->exiting) && (wc->wr_id == MLX_EXIT_WRID)) {
+		pr_debug("QP exiting %u; wr_id is %llx\n",
+			 conn->exiting, wc->wr_id);
+		if (++conn->exiting >= 3)
+			complete(&conn->exit_completion);
+		return;
+	}
+	buf = (struct mlx_accel_core_dma_buf *)wc->wr_id;
+	if ((wc->status != IB_WC_SUCCESS) && (wc->status != IB_WC_WR_FLUSH_ERR))
+		pr_warn("QP returned buf %p with vendor error %d status msg: %s\n",
+			buf, wc->vendor_err, ib_wc_status_msg(wc->status));
+	else
+		pr_debug("Completion of buf %p opcode %u status %d: %s\n", buf,
+			 wc->opcode, wc->vendor_err,
+			 ib_wc_status_msg(wc->status));
+
+	ib_dma_unmap_single(conn->accel_device->ib_dev,
+			    buf->data_dma_addr,
+			    buf->data_size,
+			    buf->dma_dir);
+	if (buf->more) {
+		ib_dma_unmap_single(conn->accel_device->ib_dev,
+				    buf->more_dma_addr,
+				    buf->more_size,
+				    buf->dma_dir);
+	}
+	if (wc->status == IB_WC_SUCCESS) {
+		switch (wc->opcode) {
+		case IB_WC_RECV:
+			atomic_dec(&conn->pending_recvs);
+			buf->data_size = wc->byte_len;
+#ifdef DEBUG
+			print_hex_dump_bytes("RECV Data ",
+					     DUMP_PREFIX_OFFSET, buf->data,
+					     buf->data_size);
+			if (buf->more)
+				print_hex_dump_bytes("RECV More ",
+						     DUMP_PREFIX_OFFSET,
+						     buf->more, buf->more_size);
+#endif
+			conn->recv_cb(conn->cb_arg, buf);
+			pr_debug("Msg with %u bytes received successfully %d buffs are posted\n",
+				 wc->byte_len,
+				 atomic_read(&conn->pending_recvs));
+			break;
+		case IB_WC_SEND:
+			atomic_dec(&conn->inflight_sends);
+			pr_debug("Msg sent successfully; %d send msgs inflight\n",
+				 atomic_read(&conn->inflight_sends));
+			break;
+		default:
+			pr_warn("Unknown wc opcode %d\n", wc->opcode);
+		}
+	}
+
+	if (buf->complete)
+		buf->complete(conn, buf, wc);
+	if (wc->opcode == IB_WC_RECV)
+		kfree(buf);
+}
+
+static void mlx_accel_core_rdma_comp_handler(struct ib_cq *cq, void *arg)
+{
+	struct mlx_accel_core_conn *conn = (struct mlx_accel_core_conn *)arg;
+	struct ib_wc wc;
+	int ret;
+	bool continue_polling = true;
+
+	pr_debug("-> Polling completions...\n");
+	while (continue_polling) {
+		continue_polling = false;
+		while (ib_poll_cq(cq, 1, &wc) == 1) {
+			if (wc.status == IB_WC_SUCCESS)
+				continue_polling = true;
+			mlx_accel_complete(conn, &wc);
+		}
+
+		if (continue_polling && !conn->exiting) {
+			/* fill receive queue */
+			while (!mlx_accel_core_rdma_post_recv(conn))
+				;
+
+			mlx_accel_core_rdma_handle_pending(conn);
+		}
+	}
+
+	pr_debug("<- Requesting next completions\n");
+	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+	if (ret)
+		pr_warn("completion_handler: ib_req_notify_cq failed with error=%d\n",
+			ret);
+}
+
+static int mlx_accel_core_rdma_create_res(struct mlx_accel_core_conn *conn,
+					  unsigned int tx_size,
+					  unsigned int rx_size)
+{
+	struct ib_cq_init_attr cq_attr = {0};
+	struct ib_qp_init_attr qp_init_attr = {0};
+	int rc = 0;
+
+	cq_attr.cqe = 2 * (tx_size + rx_size);
+	/* TODO: add event cb for cq */
+	conn->cq = ib_create_cq(conn->accel_device->ib_dev,
+			mlx_accel_core_rdma_comp_handler, NULL, conn, &cq_attr);
+	if (IS_ERR(conn->cq)) {
+		rc = PTR_ERR(conn->cq);
+		pr_warn("Failed to create recv CQ\n");
+		goto out;
+	}
+
+	ib_req_notify_cq(conn->cq, IB_CQ_NEXT_COMP);
+
+	/*
+	 * allocate QP
+	 */
+	qp_init_attr.cap.max_send_wr	= tx_size;
+	qp_init_attr.cap.max_recv_wr	= rx_size;
+	qp_init_attr.cap.max_recv_sge	= 1;
+	qp_init_attr.cap.max_send_sge	= 2;
+	qp_init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
+	qp_init_attr.qp_type		= IB_QPT_RC;
+	qp_init_attr.send_cq		= conn->cq;
+	qp_init_attr.recv_cq		= conn->cq;
+
+	conn->qp = ib_create_qp(conn->accel_device->pd, &qp_init_attr);
+	if (IS_ERR(conn->qp)) {
+		rc = PTR_ERR(conn->qp);
+		pr_warn("Failed to create QP\n");
+		goto err_create_cq;
+	}
+
+	goto out;
+
+err_create_cq:
+	ib_destroy_cq(conn->cq);
+out:
+	return rc;
+}
+
+struct mlx_accel_core_conn *
+mlx_accel_core_rdma_conn_create(struct mlx_accel_core_device *accel_device,
+				struct mlx_accel_core_conn_init_attr *
+				conn_init_attr, bool is_shell_conn)
+{
+	int err;
+	struct mlx_accel_core_conn *ret = NULL;
+	struct mlx_accel_core_conn *conn = NULL;
+	union ib_gid *gid = NULL;
+
+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+	if (!conn) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err;
+	}
+
+	if (!conn_init_attr->recv_cb) {
+		ret = ERR_PTR(-EINVAL);
+		goto err;
+	}
+
+	conn->accel_device = accel_device;
+	conn->port_num = accel_device->port;
+
+	atomic_set(&conn->inflight_sends, 0);
+	atomic_set(&conn->pending_recvs, 0);
+
+	INIT_LIST_HEAD(&conn->pending_msgs);
+	spin_lock_init(&conn->pending_lock);
+
+	conn->recv_cb = conn_init_attr->recv_cb;
+	conn->cb_arg = conn_init_attr->cb_arg;
+
+	err = mlx5_query_nic_vport_mac_address(accel_device->hw_dev, 0,
+					       conn->fpga_qpc.remote_mac);
+	if (err) {
+		pr_err("Failed to query local MAC: %d\n", err);
+		goto err;
+	}
+
+	conn->fpga_qpc.remote_ip.s6_addr[0] = 0xfe;
+	conn->fpga_qpc.remote_ip.s6_addr[1] = 0x80;
+	conn->fpga_qpc.remote_ip.s6_addr[8] = conn->fpga_qpc.remote_mac[0] ^
+					      0x02;
+	conn->fpga_qpc.remote_ip.s6_addr[9] = conn->fpga_qpc.remote_mac[1];
+	conn->fpga_qpc.remote_ip.s6_addr[10] = conn->fpga_qpc.remote_mac[2];
+	conn->fpga_qpc.remote_ip.s6_addr[11] = 0xff;
+	conn->fpga_qpc.remote_ip.s6_addr[12] = 0xfe;
+	conn->fpga_qpc.remote_ip.s6_addr[13] = conn->fpga_qpc.remote_mac[3];
+	conn->fpga_qpc.remote_ip.s6_addr[14] = conn->fpga_qpc.remote_mac[4];
+	conn->fpga_qpc.remote_ip.s6_addr[15] = conn->fpga_qpc.remote_mac[5];
+
+	pr_debug("Local gid is %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[0]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[1]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[2]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[3]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[4]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[5]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[6]),
+		 ntohs(((__be16 *)&conn->fpga_qpc.remote_ip)[7]));
+
+	gid = (union ib_gid *)&conn->fpga_qpc.remote_ip;
+	err = mlx5_ib_reserved_gid_add(accel_device->ib_dev, accel_device->port,
+				       IB_GID_TYPE_ROCE_UDP_ENCAP,
+				       gid, conn->fpga_qpc.remote_mac,
+#ifdef QP_SIMULATOR
+				       false,
+#else
+				       true,
+#endif
+				       0, &conn->sgid_index);
+	if (err) {
+		pr_warn("Failed to add reserved GID: %d\n", err);
+		ret = ERR_PTR(err);
+		goto err;
+	}
+
+	err = mlx_accel_core_rdma_create_res(conn,
+					     conn_init_attr->tx_size,
+					     conn_init_attr->rx_size);
+	if (err) {
+		ret = ERR_PTR(err);
+		goto err_rsvd_gid;
+	}
+
+	conn->fpga_qpc.state = MLX5_FPGA_QP_STATE_INIT;
+	conn->fpga_qpc.qp_type = is_shell_conn ? MLX5_FPGA_QP_TYPE_SHELL :
+			MLX5_FPGA_QP_TYPE_SANDBOX;
+	conn->fpga_qpc.st = MLX5_FPGA_QP_SERVICE_TYPE_RC;
+	conn->fpga_qpc.ether_type = ETH_P_8021Q;
+	conn->fpga_qpc.pkey = IB_DEFAULT_PKEY_FULL;
+	conn->fpga_qpc.remote_qpn = conn->qp->qp_num;
+	conn->fpga_qpc.rnr_retry = 7;
+	conn->fpga_qpc.retry_count = 7;
+	conn->fpga_qpc.vlan_id = 0;
+	conn->fpga_qpc.next_rcv_psn = 1;
+	conn->fpga_qpc.next_send_psn = 0;
+
+	err = mlx5_fpga_create_qp(accel_device->hw_dev,
+				  &conn->fpga_qpc,
+				  &conn->fpga_qpn);
+	if (err) {
+		pr_err("Failed to create FPGA RC QP: %d\n", err);
+		ret = ERR_PTR(err);
+		goto err_create_res;
+	}
+
+	pr_debug("FPGA QPN is %u\n", conn->fpga_qpn);
+	ret = conn;
+	goto out;
+
+err_create_res:
+	mlx_accel_core_rdma_destroy_res(conn);
+err_rsvd_gid:
+	mlx5_ib_reserved_gid_del(accel_device->ib_dev, accel_device->port,
+				 conn->sgid_index);
+err:
+	kfree(conn);
+out:
+	return ret;
+}
+
+static int mlx_accel_core_rdma_close_qp(struct mlx_accel_core_conn *conn)
+{
+	struct ib_recv_wr *bad_recv_wr, recv_wr = {0};
+	struct ib_send_wr *bad_send_wr, send_wr = {0};
+	struct ib_qp_attr attr = {0};
+	struct ib_qp_init_attr init_attr = {0};
+	int rc = 0, flags;
+
+	conn->exiting = 1;
+
+	rc = ib_query_qp(conn->qp, &attr, IB_QP_STATE, &init_attr);
+	if (rc || (attr.qp_state == IB_QPS_RESET)) {
+		pr_info("mlx_accel_core_close_qp: no need to modify state for ibdev %s\n",
+				conn->accel_device->ib_dev->name);
+		goto out;
+	}
+
+	pr_debug("mlx_accel_core_close_qp: curr qp state: %d", attr.qp_state);
+	memset(&attr, 0, sizeof(attr));
+	attr.qp_state = IB_QPS_ERR;
+	flags = IB_QP_STATE;
+	rc = ib_modify_qp(conn->qp, &attr, flags);
+	if (rc) {
+		pr_warn("mlx_accel_core_close_qp: ib_modify_qp failed ibdev %s err:%d\n",
+			conn->accel_device->ib_dev->name, rc);
+		goto out;
+	}
+
+	init_completion(&conn->exit_completion);
+	recv_wr.wr_id = MLX_EXIT_WRID;
+	while ((rc = ib_post_recv(conn->qp, &recv_wr, &bad_recv_wr)) == -ENOMEM)
+		;
+	if (rc) {
+		pr_warn("mlx_accel_core_close_qp: posting recv failed\n");
+		goto out;
+	}
+	send_wr.wr_id = MLX_EXIT_WRID;
+	while ((rc = ib_post_send(conn->qp, &send_wr, &bad_send_wr)) == -ENOMEM)
+		;
+	if (rc) {
+		pr_warn("mlx_accel_core_close_qp: posting send failed\n");
+		goto out;
+	}
+	wait_for_completion(&conn->exit_completion);
+out:
+	rc = ib_destroy_qp(conn->qp);
+	conn->qp = NULL;
+	return rc;
+}
+
+static void mlx_accel_core_rdma_destroy_res(struct mlx_accel_core_conn *conn)
+{
+	int err = 0;
+
+	mlx_accel_core_rdma_close_qp(conn);
+	err = ib_destroy_cq(conn->cq);
+	if (err)
+		pr_warn("Failed to destroy CQ: %d\n", err);
+}
+
+void mlx_accel_core_rdma_conn_destroy(struct mlx_accel_core_conn *conn)
+{
+	mlx5_fpga_destroy_qp(conn->accel_device->hw_dev, conn->fpga_qpn);
+	mlx_accel_core_rdma_destroy_res(conn);
+	mlx5_ib_reserved_gid_del(conn->accel_device->ib_dev, conn->port_num,
+				 conn->sgid_index);
+	kfree(conn);
+}
+
+static inline int mlx_accel_core_rdma_init_qp(struct mlx_accel_core_conn *conn)
+{
+	struct ib_qp_attr attr = {0};
+	int rc = 0;
+
+	attr.qp_state = IB_QPS_INIT;
+	attr.qp_access_flags = 0;
+	attr.port_num = conn->port_num;
+	attr.pkey_index = conn->accel_device->pkey_index;
+
+	rc = ib_modify_qp(conn->qp, &attr,
+				IB_QP_STATE		|
+				IB_QP_PKEY_INDEX	|
+				IB_QP_ACCESS_FLAGS	|
+				IB_QP_PORT);
+
+	return rc;
+}
+
+static inline int mlx_accel_core_rdma_reset_qp(struct mlx_accel_core_conn *conn)
+{
+	struct ib_qp_attr attr = {0};
+	int rc = 0;
+
+	attr.qp_state = IB_QPS_RESET;
+
+	rc = ib_modify_qp(conn->qp, &attr, IB_QP_STATE);
+
+	return rc;
+}
+
+static inline int mlx_accel_core_rdma_rtr_qp(struct mlx_accel_core_conn *conn)
+{
+	struct ib_qp_attr attr = {0};
+	int rc = 0;
+
+	attr.qp_state = IB_QPS_RTR;
+	attr.path_mtu = IB_MTU_1024;
+	attr.dest_qp_num = conn->fpga_qpn;
+	attr.rq_psn = conn->fpga_qpc.next_send_psn;
+	attr.max_dest_rd_atomic = 0;
+	attr.min_rnr_timer = 0x12;
+	attr.ah_attr.port_num = conn->port_num;
+	attr.ah_attr.sl = conn->accel_device->sl;
+	attr.ah_attr.ah_flags = IB_AH_GRH;
+	memcpy(&attr.ah_attr.grh.dgid, &conn->fpga_qpc.fpga_ip,
+	       sizeof(attr.ah_attr.grh.dgid));
+	attr.ah_attr.grh.sgid_index = conn->sgid_index;
+	pr_debug("Transition to RTR using sGID index %u\n", conn->sgid_index);
+
+	rc = ib_modify_qp(conn->qp, &attr,
+				IB_QP_STATE			|
+				IB_QP_AV			|
+				IB_QP_PATH_MTU			|
+				IB_QP_DEST_QPN			|
+				IB_QP_RQ_PSN			|
+				IB_QP_MAX_DEST_RD_ATOMIC	|
+				IB_QP_MIN_RNR_TIMER);
+
+	return rc;
+}
+
+static inline int mlx_accel_core_rdma_rts_qp(struct mlx_accel_core_conn *conn)
+{
+	struct ib_qp_attr attr = {0};
+	int rc = 0, flags;
+
+	attr.qp_state		= IB_QPS_RTS;
+	attr.timeout		= 0x12; /* 0x12 = ~1.07 sec */
+	attr.retry_cnt		= 7;
+	attr.rnr_retry		= 7; /* Infinite retry in case of RNR NACK */
+	attr.sq_psn		= conn->fpga_qpc.next_rcv_psn;
+	attr.max_rd_atomic	= 0;
+
+	flags = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+			IB_QP_RNR_RETRY | IB_QP_SQ_PSN | IB_QP_MAX_QP_RD_ATOMIC;
+
+	rc = ib_modify_qp(conn->qp, &attr, flags);
+
+	return rc;
+}
+
+int mlx_accel_core_rdma_connect(struct mlx_accel_core_conn *conn)
+{
+	int rc = 0;
+
+	conn->fpga_qpc.state = MLX5_FPGA_QP_STATE_ACTIVE;
+	rc = mlx5_fpga_modify_qp(conn->accel_device->hw_dev,
+				 conn->fpga_qpn,
+				 MLX5_FPGA_QPC_STATE,
+				 &conn->fpga_qpc);
+	if (rc) {
+		pr_warn("Failed to activate FPGA RC QP: %d\n", rc);
+		goto err;
+	}
+
+	rc = mlx_accel_core_rdma_reset_qp(conn);
+	if (rc) {
+		pr_warn("Failed to change QP state to reset\n");
+		goto err_fpga_qp;
+	}
+
+	rc = mlx_accel_core_rdma_init_qp(conn);
+	if (rc) {
+		pr_warn("Failed to modify QP from RESET to INIT\n");
+		goto err_fpga_qp;
+	}
+
+	while (!mlx_accel_core_rdma_post_recv(conn))
+		;
+
+	rc = mlx_accel_core_rdma_rtr_qp(conn);
+	if (rc) {
+		pr_warn("Failed to change QP state from INIT to RTR\n");
+		goto err_fpga_qp;
+	}
+
+	rc = mlx_accel_core_rdma_rts_qp(conn);
+	if (rc) {
+		pr_warn("Failed to change QP state from RTR to RTS\n");
+		goto err_fpga_qp;
+	}
+	goto err;
+
+err_fpga_qp:
+	conn->fpga_qpc.state = MLX5_FPGA_QP_STATE_INIT;
+	if (mlx5_fpga_modify_qp(conn->accel_device->hw_dev,
+				conn->fpga_qpn, MLX5_FPGA_QPC_STATE,
+				&conn->fpga_qpc))
+		pr_warn("Failed to revert FPGA QP to INIT\n");
+err:
+	return rc;
+}
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_sdk.c b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_sdk.c
new file mode 100644
index 0000000..42972ee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_sdk.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <rdma/ib_verbs.h>
+#include <linux/mlx5/device.h>
+
+#include "accel_core.h"
+#include "accel_core_xfer.h"
+
+extern struct list_head mlx_accel_core_devices;
+extern struct list_head mlx_accel_core_clients;
+extern struct mutex mlx_accel_core_mutex;
+
+void mlx_accel_core_client_register(struct mlx_accel_core_client *client)
+{
+	struct mlx_accel_core_device *accel_device;
+	struct mlx_accel_client_data *context;
+
+	pr_info("mlx_accel_core_client_register called for %s\n", client->name);
+
+	mutex_lock(&mlx_accel_core_mutex);
+
+	list_add_tail(&client->list, &mlx_accel_core_clients);
+
+	list_for_each_entry(accel_device, &mlx_accel_core_devices, list) {
+		context = mlx_accel_client_context_create(accel_device, client);
+		if (!context)
+			continue;
+		mutex_lock(&accel_device->mutex);
+		if (accel_device->state == MLX_ACCEL_FPGA_STATUS_SUCCESS)
+			if (!client->add(accel_device))
+				context->added = true;
+		mutex_unlock(&accel_device->mutex);
+	}
+
+	mutex_unlock(&mlx_accel_core_mutex);
+}
+EXPORT_SYMBOL(mlx_accel_core_client_register);
+
+void mlx_accel_core_client_unregister(struct mlx_accel_core_client *client)
+{
+	struct mlx_accel_core_device *accel_device;
+	struct mlx_accel_client_data *context, *tmp_context;
+
+	pr_info("mlx_accel_core_client_unregister called for %s\n",
+		client->name);
+
+	mutex_lock(&mlx_accel_core_mutex);
+
+	list_for_each_entry(accel_device, &mlx_accel_core_devices, list) {
+		list_for_each_entry_safe(context, tmp_context,
+					 &accel_device->client_data_list,
+					 list) {
+			pr_debug("Unregister client %p. context %p device %p client %p\n",
+				 client, context, accel_device,
+				 context->client);
+			if (context->client != client)
+				continue;
+			mutex_lock(&accel_device->mutex);
+			if (context->added)
+				client->remove(accel_device);
+			mutex_unlock(&accel_device->mutex);
+			mlx_accel_client_context_destroy(accel_device, context);
+			break;
+		}
+	}
+
+	list_del(&client->list);
+	mutex_unlock(&mlx_accel_core_mutex);
+}
+EXPORT_SYMBOL(mlx_accel_core_client_unregister);
+
+int
+mlx_accel_core_client_ops_register(struct net_device *netdev,
+				   struct mlx5e_accel_client_ops *client_ops)
+{
+	int ret = 0;
+
+	ret = mlx5e_register_accel_ops(netdev, client_ops);
+	if (ret)
+		pr_err("mlx_ipsec_add_one(): Got error while registering client_ops %d\n",
+		       ret);
+	return ret;
+}
+EXPORT_SYMBOL(mlx_accel_core_client_ops_register);
+
+void mlx_accel_core_client_ops_unregister(struct net_device *netdev)
+{
+	mlx5e_unregister_accel_ops(netdev);
+}
+EXPORT_SYMBOL(mlx_accel_core_client_ops_unregister);
+
+struct mlx_accel_core_conn *
+mlx_accel_core_conn_create(struct mlx_accel_core_device *accel_device,
+		struct mlx_accel_core_conn_init_attr *attr)
+{
+	struct mlx_accel_core_conn *ret;
+
+	pr_info("mlx_accel_core_conn_create called for %s\n",
+		accel_device->name);
+
+	ret = mlx_accel_core_rdma_conn_create(accel_device, attr, false);
+	if (IS_ERR(ret))
+		return ret;
+
+	list_add_tail(&ret->list, &accel_device->client_connections);
+	return ret;
+}
+EXPORT_SYMBOL(mlx_accel_core_conn_create);
+
+void mlx_accel_core_conn_destroy(struct mlx_accel_core_conn *conn)
+{
+	pr_info("mlx_accel_core_conn_destroy called for %s\n",
+			conn->accel_device->name);
+
+	list_del(&conn->list);
+	mlx_accel_core_rdma_conn_destroy(conn);
+}
+EXPORT_SYMBOL(mlx_accel_core_conn_destroy);
+
+int mlx_accel_core_connect(struct mlx_accel_core_conn *conn)
+{
+	pr_info("mlx_accel_core_connect called for %s\n",
+		conn->accel_device->name);
+
+	return mlx_accel_core_rdma_connect(conn);
+}
+EXPORT_SYMBOL(mlx_accel_core_connect);
+
+int mlx_accel_core_sendmsg(struct mlx_accel_core_conn *conn,
+			   struct mlx_accel_core_dma_buf *buf)
+{
+	return mlx_accel_core_rdma_post_send(conn, buf);
+}
+EXPORT_SYMBOL(mlx_accel_core_sendmsg);
+
+u64 mlx_accel_core_ddr_size_get(struct mlx_accel_core_device *dev)
+{
+	return (u64)MLX5_CAP_FPGA(dev, fpga_ddr_size) << 10;
+}
+EXPORT_SYMBOL(mlx_accel_core_ddr_size_get);
+
+u64 mlx_accel_core_ddr_base_get(struct mlx_accel_core_device *dev)
+{
+	return MLX5_CAP64_FPGA(dev, fpga_ddr_start_addr);
+}
+EXPORT_SYMBOL(mlx_accel_core_ddr_base_get);
+
+struct mem_transfer {
+	struct mlx_accel_transaction t;
+	struct completion comp;
+	enum ib_wc_status status;
+};
+
+static void
+mlx_accel_core_mem_complete(const struct mlx_accel_transaction *complete,
+			    enum ib_wc_status status)
+{
+	struct mem_transfer *xfer;
+
+	pr_debug("Memory transaction %p is complete with status %u\n",
+		 complete, status);
+
+	xfer = container_of(complete, struct mem_transfer, t);
+	xfer->status = status;
+	complete_all(&xfer->comp);
+}
+
+int mlx_accel_core_mem_transaction(struct mlx_accel_core_device *dev,
+				   size_t size, u64 addr, void *buf,
+				   enum mlx_accel_direction direction)
+{
+	int ret;
+	struct mem_transfer xfer;
+
+	if (!dev->core_conn) {
+		ret = -ENOTCONN;
+		goto out;
+	}
+
+	xfer.t.data = buf;
+	xfer.t.size = size;
+	xfer.t.addr = addr;
+	xfer.t.conn = dev->core_conn;
+	xfer.t.direction = direction;
+	xfer.t.complete = mlx_accel_core_mem_complete;
+	init_completion(&xfer.comp);
+	ret = mlx_accel_xfer_exec(&xfer.t);
+	if (ret) {
+		pr_debug("Transaction returned value %d\n", ret);
+		goto out;
+	}
+	ret = wait_for_completion_interruptible(&xfer.comp);
+	if (ret) {
+		pr_debug("Wait completed with value %d\n", ret);
+		/* TODO: Cancel the transfer! */
+		goto out;
+	}
+	if (xfer.status != 0)
+		ret = -EIO;
+out:
+	return ret;
+}
+
+int mlx_accel_core_mem_read(struct mlx_accel_core_device *dev,
+			    size_t size, u64 addr, void *buf,
+			    enum mlx_accel_access_type access_type)
+{
+	int ret;
+
+	if (access_type == MLX_ACCEL_ACCESS_TYPE_DONTCARE)
+		access_type = dev->core_conn ? MLX_ACCEL_ACCESS_TYPE_RDMA :
+			      MLX_ACCEL_ACCESS_TYPE_I2C;
+
+	pr_debug("**** Reading %lu bytes at 0x%llx using %s\n", size, addr,
+		 access_type ? "RDMA" : "I2C");
+
+	switch (access_type) {
+	case MLX_ACCEL_ACCESS_TYPE_RDMA:
+		ret = mlx_accel_core_mem_transaction(dev, size, addr, buf,
+						     MLX_ACCEL_READ);
+		if (ret)
+			return ret;
+		break;
+	case MLX_ACCEL_ACCESS_TYPE_I2C:
+		if (!dev->hw_dev)
+			return -ENOTCONN;
+		ret = mlx_accel_read_i2c(dev->hw_dev, size, addr, buf);
+		if (ret)
+			return ret;
+		break;
+	default:
+		pr_warn("Unexpected read access_type %u\n", access_type);
+		return -EACCES;
+	}
+
+	return size;
+}
+EXPORT_SYMBOL(mlx_accel_core_mem_read);
+
+int mlx_accel_core_mem_write(struct mlx_accel_core_device *dev,
+			     size_t size, u64 addr, void *buf,
+			     enum mlx_accel_access_type access_type)
+{
+	int ret;
+
+	if (access_type == MLX_ACCEL_ACCESS_TYPE_DONTCARE)
+		access_type = dev->core_conn ? MLX_ACCEL_ACCESS_TYPE_RDMA :
+			      MLX_ACCEL_ACCESS_TYPE_I2C;
+
+	pr_debug("**** Writing %lu bytes at 0x%llx using %s\n", size, addr,
+		 access_type ? "RDMA" : "I2C");
+
+	switch (access_type) {
+	case MLX_ACCEL_ACCESS_TYPE_RDMA:
+		ret = mlx_accel_core_mem_transaction(dev, size, addr, buf,
+						     MLX_ACCEL_WRITE);
+		if (ret)
+			return ret;
+		break;
+	case MLX_ACCEL_ACCESS_TYPE_I2C:
+		if (!dev->hw_dev)
+			return -ENOTCONN;
+		ret = mlx_accel_write_i2c(dev->hw_dev, size, addr, buf);
+		if (ret)
+			return ret;
+		break;
+	default:
+		pr_warn("Unexpected write access_type %u\n", access_type);
+		return -EACCES;
+	}
+
+	return size;
+}
+EXPORT_SYMBOL(mlx_accel_core_mem_write);
+
+void mlx_accel_core_client_data_set(struct mlx_accel_core_device *accel_device,
+				    struct mlx_accel_core_client *client,
+				    void *data)
+{
+	struct mlx_accel_client_data *context;
+
+	list_for_each_entry(context, &accel_device->client_data_list, list) {
+		if (context->client != client)
+			continue;
+		context->data = data;
+		return;
+	}
+
+	pr_warn("No client context found for %s/%s\n",
+		accel_device->name, client->name);
+}
+EXPORT_SYMBOL(mlx_accel_core_client_data_set);
+
+void *mlx_accel_core_client_data_get(struct mlx_accel_core_device *accel_device,
+				     struct mlx_accel_core_client *client)
+{
+	struct mlx_accel_client_data *context;
+	void *ret = NULL;
+
+	list_for_each_entry(context, &accel_device->client_data_list, list) {
+		if (context->client != client)
+			continue;
+		ret = context->data;
+		goto out;
+	}
+	pr_warn("No client context found for %s/%s\n",
+		accel_device->name, client->name);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(mlx_accel_core_client_data_get);
+
+struct kobject *mlx_accel_core_kobj(struct mlx_accel_core_device *device)
+{
+	/* TODO: return port 0 as parent sysfs node, instead of "ports" node? */
+	return device->ib_dev->ports_parent;
+}
+EXPORT_SYMBOL(mlx_accel_core_kobj);
+
+int mlx_accel_get_sbu_caps(struct mlx_accel_core_device *dev, int size,
+			   void *buf)
+{
+	u64 addr = MLX5_CAP64_FPGA(dev, sandbox_extended_caps_addr);
+	int cap_size = MLX5_CAP_FPGA(dev, sandbox_extended_caps_len);
+	int ret;
+
+	pr_debug("Reading %d bytes SBU caps from addr 0x%llx\n", size, addr);
+
+	if (cap_size < size) {
+		pr_err("get_sbu_caps: Requested Cap size 0x%8x is bigger than HW size 0x%8x",
+		       size, cap_size);
+		return -EINVAL;
+	}
+
+	ret = mlx_accel_core_mem_read(dev, size, addr, buf,
+				      MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (ret < 0)
+		dev_err(&dev->hw_dev->pdev->dev, "Failed read of SBU caps: %d\n",
+			ret);
+	else
+		ret = 0;
+	return ret;
+}
+EXPORT_SYMBOL(mlx_accel_get_sbu_caps);
+
+int mlx_accel_core_device_reload(struct mlx_accel_core_device *accel_device,
+				 enum mlx_accel_fpga_image image)
+{
+	int err;
+
+	mutex_lock(&accel_device->mutex);
+	switch (accel_device->state) {
+	case MLX_ACCEL_FPGA_STATUS_NONE:
+		err = -ENODEV;
+		goto unlock;
+	case MLX_ACCEL_FPGA_STATUS_IN_PROGRESS:
+		err = -EBUSY;
+		goto unlock;
+	case MLX_ACCEL_FPGA_STATUS_SUCCESS:
+		mlx_accel_device_teardown(accel_device);
+		break;
+	case MLX_ACCEL_FPGA_STATUS_FAILURE:
+		break;
+	}
+	if (image <= MLX_ACCEL_IMAGE_MAX) {
+		err = mlx5_fpga_load(accel_device->hw_dev, image);
+		if (err) {
+			dev_err(&accel_device->hw_dev->pdev->dev,
+				"Failed to request FPGA load: %d\n", err);
+		}
+	} else {
+		err = mlx5_fpga_ctrl_op(accel_device->hw_dev,
+					MLX5_FPGA_CTRL_OP_RESET);
+		if (err) {
+			dev_err(&accel_device->hw_dev->pdev->dev,
+				"Failed to request FPGA reset: %d\n", err);
+		}
+	}
+	accel_device->state = MLX_ACCEL_FPGA_STATUS_IN_PROGRESS;
+unlock:
+	mutex_unlock(&accel_device->mutex);
+	return err;
+}
+EXPORT_SYMBOL(mlx_accel_core_device_reload);
+
+int mlx_accel_core_flash_select(struct mlx_accel_core_device *accel_device,
+				enum mlx_accel_fpga_image image)
+{
+	int err;
+
+	mutex_lock(&accel_device->mutex);
+	switch (accel_device->state) {
+	case MLX_ACCEL_FPGA_STATUS_NONE:
+		err = -ENODEV;
+		goto unlock;
+	case MLX_ACCEL_FPGA_STATUS_IN_PROGRESS:
+	case MLX_ACCEL_FPGA_STATUS_SUCCESS:
+	case MLX_ACCEL_FPGA_STATUS_FAILURE:
+		break;
+	}
+
+	err = mlx5_fpga_image_select(accel_device->hw_dev, image);
+	if (err) {
+		dev_err(&accel_device->hw_dev->pdev->dev,
+			"Failed to select FPGA flash image: %d\n", err);
+	}
+unlock:
+	mutex_unlock(&accel_device->mutex);
+	return err;
+}
+EXPORT_SYMBOL(mlx_accel_core_flash_select);
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_sdk.h b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_sdk.h
new file mode 100644
index 0000000..866c3b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_sdk.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX_ACCEL_CORE_SDK_H__
+#define __MLX_ACCEL_CORE_SDK_H__
+
+#include <rdma/ib_verbs.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/en_driver.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/dma-direction.h>
+#include <linux/kobject.h>
+#include <linux/mlx5/accel/accel_sdk.h>
+
+#define MLX_CLIENT_NAME_MAX			64
+#define MLX_ACCEL_DEVICE_NAME_MAX	(MLX5_MAX_NAME_LEN + IB_DEVICE_NAME_MAX)
+
+struct mlx_accel_core_conn;
+
+/* represents an accelerated device */
+struct mlx_accel_core_device {
+	struct mlx5_core_dev *hw_dev;
+	struct ib_device *ib_dev;
+	char name[MLX_ACCEL_DEVICE_NAME_MAX];
+	unsigned int id;
+	u8 port;
+	struct mutex mutex; /* Protects state transitions */
+	enum mlx_accel_fpga_status state;
+	enum mlx_accel_fpga_image last_admin_image;
+	enum mlx_accel_fpga_image last_oper_image;
+	u32			fpga_caps[MLX5_ST_SZ_DW(fpga_cap)];
+
+	struct list_head list;
+	struct list_head client_connections;
+	struct list_head client_data_list;
+	struct mlx_accel_core_conn *core_conn;
+
+	/* Transactions state */
+	struct mlx_accel_trans_device_state *trans;
+
+	/* Parameters for QPs */
+	struct ib_pd *pd;
+	struct ib_mr *mr;
+	union ib_gid gid;
+	u16 pkey_index;
+	u8 sl;
+#ifdef QP_SIMULATOR
+	struct kobject sim_kobj;
+#endif
+};
+
+struct mlx_accel_core_client {
+	/* Informs the client that a core device was created.
+	 * The device is not yet operational at this stage
+	 * This callback is optional
+	 */
+	void (*create)(struct mlx_accel_core_device *);
+	/* Informs the client that a core device is ready and operational.
+	 * Any SBU-specific initialization should happen at this stage
+	 * @return 0 on success, nonzero error value otherwise
+	 */
+	int  (*add)(struct mlx_accel_core_device *);
+	/* Informs the client that a core device is not operational anymore.
+	 * SBU-specific cleanup should happen at this stage
+	 * This callback is called once for every successful call to add()
+	 */
+	void (*remove)(struct mlx_accel_core_device *);
+	/* Informs the client that a core device is being destroyed.
+	 * The device is not operational at this stage
+	 */
+	void (*destroy)(struct mlx_accel_core_device *);
+
+	char name[MLX_CLIENT_NAME_MAX];
+
+	struct list_head list;
+};
+
+struct mlx_accel_core_dma_buf {
+	struct list_head list;
+	void (*complete)(struct mlx_accel_core_conn *conn,
+			 struct mlx_accel_core_dma_buf *buf, struct ib_wc *wc);
+	/* Payload */
+	void *data;
+	size_t data_size;
+	/* Optional second payload */
+	void *more;
+	size_t more_size;
+	/* Private members */
+	u64 data_dma_addr;
+	u64 more_dma_addr;
+	enum dma_data_direction dma_dir;
+};
+
+struct mlx_accel_core_conn_init_attr {
+	unsigned int tx_size;
+	unsigned int rx_size;
+	void (*recv_cb)(void *cb_arg, struct mlx_accel_core_dma_buf *buf);
+	void *cb_arg;
+};
+
+struct mlx_accel_core_conn {
+	struct mlx_accel_core_device *accel_device;
+	u8 port_num;
+
+	atomic_t inflight_sends;
+	atomic_t pending_recvs;
+
+	/* [BP]: TODO - Why not use RCU list? */
+	struct list_head pending_msgs;
+	spinlock_t pending_lock;
+
+	void (*recv_cb)(void *cb_arg, struct mlx_accel_core_dma_buf *buf);
+	void *cb_arg;
+
+	struct completion exit_completion;
+	int exiting;
+
+	struct list_head list;
+
+	/* Parameters for the QP */
+	struct ib_cq *cq;
+	struct ib_qp *qp;
+
+	struct mlx5_fpga_qpc fpga_qpc;
+	int sgid_index;
+	u32 fpga_qpn;
+};
+
+void mlx_accel_core_client_register(struct mlx_accel_core_client *client);
+void mlx_accel_core_client_unregister(struct mlx_accel_core_client *client);
+int mlx_accel_core_client_ops_register(struct net_device *netdev,
+				       struct mlx5e_accel_client_ops *ops);
+void mlx_accel_core_client_ops_unregister(struct net_device *netdev);
+int mlx_accel_core_device_reload(struct mlx_accel_core_device *accel_device,
+				 enum mlx_accel_fpga_image image);
+int mlx_accel_core_flash_select(struct mlx_accel_core_device *accel_device,
+				enum mlx_accel_fpga_image image);
+
+struct mlx_accel_core_conn *
+mlx_accel_core_conn_create(struct mlx_accel_core_device *accel_device,
+			   struct mlx_accel_core_conn_init_attr *attr);
+void mlx_accel_core_conn_destroy(struct mlx_accel_core_conn *conn);
+
+int mlx_accel_core_connect(struct mlx_accel_core_conn *conn);
+
+int mlx_accel_core_sendmsg(struct mlx_accel_core_conn *conn,
+			   struct mlx_accel_core_dma_buf *buf);
+
+u64 mlx_accel_core_ddr_size_get(struct mlx_accel_core_device *dev);
+u64 mlx_accel_core_ddr_base_get(struct mlx_accel_core_device *dev);
+int mlx_accel_core_mem_read(struct mlx_accel_core_device *dev,
+			    size_t size, u64 addr, void *buf,
+			    enum mlx_accel_access_type access_type);
+int mlx_accel_core_mem_write(struct mlx_accel_core_device *dev,
+			     size_t size, u64 addr, void *buf,
+			     enum mlx_accel_access_type access_type);
+
+void mlx_accel_core_client_data_set(struct mlx_accel_core_device *accel_device,
+				    struct mlx_accel_core_client *client,
+				    void *data);
+void *mlx_accel_core_client_data_get(struct mlx_accel_core_device *accel_device,
+				     struct mlx_accel_core_client *client);
+
+struct kobject *mlx_accel_core_kobj(struct mlx_accel_core_device *accel_device);
+int mlx_accel_get_sbu_caps(struct mlx_accel_core_device *dev, int size,
+			   void *buf);
+#endif /* __MLX_ACCEL_CORE_SDK_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_trans.c b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_trans.c
new file mode 100644
index 0000000..fc2afa0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_trans.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "accel_core_trans.h"
+
+static struct mlx_accel_transaction_private *
+mlx_accel_find_tid(struct mlx_accel_core_device *accel_device, u8 tid)
+{
+	if (tid >= MLX_ACCEL_TID_COUNT) {
+		pr_warn("Unexpected transaction ID %u\n", tid);
+		return NULL;
+	}
+	return &accel_device->trans->transactions[tid];
+}
+
+static struct mlx_accel_transaction_private *
+mlx_accel_alloc_tid(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_accel_transaction_private *ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&accel_device->trans->lock, flags);
+
+	if (list_empty(&accel_device->trans->free_queue)) {
+		pr_info("***** No free transaction ID available!");
+		ret = NULL;
+		goto out;
+	}
+
+	ret = container_of(accel_device->trans->free_queue.next,
+			   struct mlx_accel_transaction_private, list_item);
+	list_del(&ret->list_item);
+
+	ret->state = TRANS_STATE_NONE;
+out:
+	spin_unlock_irqrestore(&accel_device->trans->lock, flags);
+	return ret;
+}
+
+static void mlx_accel_free_tid(struct mlx_accel_core_device *accel_device,
+			       struct mlx_accel_transaction_private *trans_priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&accel_device->trans->lock, flags);
+	list_add_tail(&trans_priv->list_item,
+		      &accel_device->trans->free_queue);
+	spin_unlock_irqrestore(&accel_device->trans->lock, flags);
+}
+
+static void
+mlx_accel_trans_complete(struct mlx_accel_transaction_private *trans_priv,
+			 enum ib_wc_status status)
+{
+	unsigned long flags;
+	struct mlx_accel_trans_device_state *trans;
+	const struct mlx_accel_transaction *user_trans;
+
+	pr_debug("Transaction %u is complete with status %u\n",
+		 trans_priv->tid, status);
+	trans = trans_priv->user_trans->conn->accel_device->trans;
+	spin_lock_irqsave(&trans->lock, flags);
+	trans_priv->state = TRANS_STATE_COMPLETE;
+	trans_priv->status = status;
+	spin_unlock_irqrestore(&trans->lock, flags);
+
+	user_trans = trans_priv->user_trans;
+	mlx_accel_free_tid(trans_priv->user_trans->conn->accel_device,
+			   trans_priv);
+
+	if (user_trans->complete)
+		user_trans->complete(user_trans, status);
+}
+
+static void mlx_accel_trans_send_complete(struct mlx_accel_core_conn *conn,
+					  struct mlx_accel_core_dma_buf *buf,
+					  struct ib_wc *wc)
+{
+	unsigned long flags;
+	struct mlx_accel_transaction_private *trans_priv;
+
+	pr_debug("mlx_accel_trans_send_complete. Status: %u\n", wc->status);
+	trans_priv = container_of(buf, struct mlx_accel_transaction_private,
+				  buf);
+	if (wc->status != IB_WC_SUCCESS) {
+		mlx_accel_trans_complete(trans_priv, wc->status);
+		return;
+	}
+
+	spin_lock_irqsave(&conn->accel_device->trans->lock, flags);
+	if (trans_priv->state == TRANS_STATE_SEND)
+		trans_priv->state = TRANS_STATE_WAIT;
+	spin_unlock_irqrestore(&conn->accel_device->trans->lock, flags);
+}
+
+int mlx_accel_trans_validate(struct mlx_accel_core_device *accel_device,
+			     u64 addr, size_t size)
+{
+	if (size > MLX_ACCEL_TRANSACTION_MAX_SIZE) {
+		pr_info("Cannot access %lu bytes at once. Max is %u\n",
+			size, MLX_ACCEL_TRANSACTION_MAX_SIZE);
+		return -EINVAL;
+	}
+	if (size & MLX_ACCEL_TRANSACTION_SEND_ALIGN_BITS) {
+		pr_info("Cannot access %lu bytes. Must be full dwords\n",
+			size);
+		return -EINVAL;
+	}
+	if (size < 1) {
+		pr_info("Cannot access %lu bytes. Empty transaction not allowed\n",
+			size);
+		return -EINVAL;
+	}
+	if (addr & MLX_ACCEL_TRANSACTION_SEND_ALIGN_BITS) {
+		pr_info("Cannot access %lu bytes at unaligned address %llx\n",
+			size, addr);
+		return -EINVAL;
+	}
+	if ((addr >> MLX_ACCEL_TRANSACTION_SEND_PAGE_BITS) !=
+	    ((addr + size - 1) >> MLX_ACCEL_TRANSACTION_SEND_PAGE_BITS)) {
+		pr_info("Cannot access %lu bytes at address %llx. Crosses page boundary\n",
+			size, addr);
+		return -EINVAL;
+	}
+	if (addr < mlx_accel_core_ddr_base_get(accel_device)) {
+		if (size != sizeof(u32)) {
+			pr_info("Cannot access %lu bytes at cr-space address %llx. Must access a single dword\n",
+				size, addr);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int mlx_accel_trans_exec(const struct mlx_accel_transaction *transaction)
+{
+	int rc;
+	struct mlx_accel_core_conn *conn = transaction->conn;
+	struct mlx_accel_transaction_private *trans_priv = NULL;
+
+	if (!transaction->complete) {
+		pr_warn("Transaction must have a completion callback\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = mlx_accel_trans_validate(conn->accel_device,
+				      transaction->addr, transaction->size);
+	if (rc)
+		goto out;
+
+	trans_priv = mlx_accel_alloc_tid(conn->accel_device);
+	if (!trans_priv) {
+		rc = -EBUSY;
+		goto out;
+	}
+	trans_priv->user_trans = transaction;
+
+	memset(&trans_priv->header, 0, sizeof(trans_priv->header));
+	memset(&trans_priv->buf, 0, sizeof(trans_priv->buf));
+	MLX5_SET(fpga_shell_qp_packet, trans_priv->header, type,
+		 (transaction->direction == MLX_ACCEL_WRITE) ?
+		 MLX5_FPGA_MSG_WRITE : MLX5_FPGA_MSG_READ);
+	MLX5_SET(fpga_shell_qp_packet, trans_priv->header, tid,
+		 trans_priv->tid);
+	MLX5_SET(fpga_shell_qp_packet, trans_priv->header, len,
+		 transaction->size);
+	MLX5_SET(fpga_shell_qp_packet, trans_priv->header, address_h,
+		 transaction->addr >> 32);
+	MLX5_SET(fpga_shell_qp_packet, trans_priv->header, address_l,
+		 transaction->addr & 0xFFFFFFFF);
+
+	trans_priv->buf.data = &trans_priv->header;
+	trans_priv->buf.data_size = sizeof(trans_priv->header);
+	if (transaction->direction == MLX_ACCEL_WRITE) {
+		trans_priv->buf.more = transaction->data;
+		trans_priv->buf.more_size = transaction->size;
+	}
+
+	trans_priv->buf.complete = mlx_accel_trans_send_complete;
+	trans_priv->state = TRANS_STATE_SEND;
+
+	rc = mlx_accel_core_rdma_post_send(conn, &trans_priv->buf);
+	if (rc)
+		goto out_buf_tid;
+	goto out;
+
+out_buf_tid:
+	mlx_accel_free_tid(conn->accel_device, trans_priv);
+out:
+	return rc;
+}
+
+void mlx_accel_trans_recv(void *cb_arg, struct mlx_accel_core_dma_buf *buf)
+{
+	struct mlx_accel_core_device *accel_device = cb_arg;
+	struct mlx_accel_transaction_private *trans_priv;
+	size_t payload_len;
+	enum ib_wc_status status = IB_WC_SUCCESS;
+
+	pr_debug("Rx QP message on %s core conn; %ld bytes\n",
+		 accel_device->name, buf->data_size);
+
+	if (buf->data_size < MLX5_ST_SZ_BYTES(fpga_shell_qp_packet)) {
+		pr_warn("Short message %lu bytes from device\n",
+			buf->data_size);
+		goto out;
+	}
+	payload_len = buf->data_size - MLX5_ST_SZ_BYTES(fpga_shell_qp_packet);
+
+	trans_priv = mlx_accel_find_tid(accel_device,
+					MLX5_GET(fpga_shell_qp_packet,
+						 buf->data, tid));
+	if (!trans_priv)
+		goto out;
+
+	/* Note: header addr and len are always 0 */
+	switch (MLX5_GET(fpga_shell_qp_packet, buf->data, type)) {
+	case MLX5_FPGA_MSG_READ_RESPONSE:
+		if (trans_priv->user_trans->direction != MLX_ACCEL_READ) {
+			pr_warn("Wrong answer type %u to a %u transaction\n",
+				MLX5_GET(fpga_shell_qp_packet, buf->data, type),
+				trans_priv->user_trans->direction);
+			status = IB_WC_BAD_RESP_ERR;
+			goto complete;
+		}
+		if (payload_len != trans_priv->user_trans->size) {
+			pr_warn("Incorrect transaction payload length %lu expected %lu\n",
+				payload_len, trans_priv->user_trans->size);
+			goto complete;
+		}
+		memcpy(trans_priv->user_trans->data,
+		       MLX5_ADDR_OF(fpga_shell_qp_packet, buf->data, data),
+		       payload_len);
+		break;
+	case MLX5_FPGA_MSG_WRITE_RESPONSE:
+		if (trans_priv->user_trans->direction != MLX_ACCEL_WRITE) {
+			pr_warn("Wrong answer type %u to a %u transaction\n",
+				MLX5_GET(fpga_shell_qp_packet, buf->data, type),
+				trans_priv->user_trans->direction);
+			status = IB_WC_BAD_RESP_ERR;
+			goto complete;
+		}
+		break;
+	default:
+		pr_warn("Unexpected message type %u len %lu from device\n",
+			MLX5_GET(fpga_shell_qp_packet, buf->data, type),
+			buf->data_size);
+		status = IB_WC_BAD_RESP_ERR;
+		goto complete;
+	}
+
+complete:
+	mlx_accel_trans_complete(trans_priv, status);
+out:
+	return;
+}
+
+int mlx_accel_trans_device_init(struct mlx_accel_core_device *accel_device)
+{
+	int ret = 0;
+	int tid;
+
+	accel_device->trans = kzalloc(sizeof(*accel_device->trans), GFP_KERNEL);
+	if (!accel_device->trans) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&accel_device->trans->free_queue);
+	for (tid = 0; tid < MLX_ACCEL_TID_COUNT; tid++) {
+		accel_device->trans->transactions[tid].tid = tid;
+		list_add_tail(&accel_device->trans->transactions[tid].list_item,
+			      &accel_device->trans->free_queue);
+	}
+
+	spin_lock_init(&accel_device->trans->lock);
+
+out:
+	return ret;
+}
+
+void mlx_accel_trans_device_deinit(struct mlx_accel_core_device *accel_device)
+{
+	kfree(accel_device->trans);
+}
+
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_trans.h b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_trans.h
new file mode 100644
index 0000000..b37ca39
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_trans.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX_ACCEL_CORE_TRANS_H__
+#define __MLX_ACCEL_CORE_TRANS_H__
+
+#include "accel_core.h"
+
+#define MLX_ACCEL_TRANSACTION_MAX_SIZE	      1008
+#define MLX_ACCEL_TRANSACTION_SEND_ALIGN_BITS 3
+#define MLX_ACCEL_TRANSACTION_SEND_PAGE_BITS  12
+#define MLX_ACCEL_TID_COUNT		      256
+
+enum mlx_accel_direction {
+	MLX_ACCEL_READ,
+	MLX_ACCEL_WRITE,
+};
+
+enum mlx_accel_transaction_state {
+	TRANS_STATE_NONE,
+	TRANS_STATE_SEND,
+	TRANS_STATE_WAIT,
+	TRANS_STATE_COMPLETE,
+};
+
+struct mlx_accel_transaction_private {
+	const struct mlx_accel_transaction *user_trans;
+	u8 tid;
+	enum mlx_accel_transaction_state state;
+	enum ib_wc_status status;
+	u32 header[MLX5_ST_SZ_DW(fpga_shell_qp_packet)];
+	struct mlx_accel_core_dma_buf buf;
+	struct list_head list_item;
+};
+
+struct mlx_accel_transaction {
+	struct mlx_accel_core_conn *conn;
+	enum mlx_accel_direction direction;
+	size_t size;
+	u64 addr;
+	u8 *data;
+	void (*complete)(const struct mlx_accel_transaction *complete,
+			 enum ib_wc_status status);
+};
+
+struct mlx_accel_trans_device_state {
+	spinlock_t lock; /* Protects all members of this struct */
+	struct list_head free_queue;
+	struct mlx_accel_transaction_private transactions[MLX_ACCEL_TID_COUNT];
+};
+
+int mlx_accel_trans_device_init(struct mlx_accel_core_device *accel_device);
+void mlx_accel_trans_device_deinit(struct mlx_accel_core_device *accel_device);
+
+int mlx_accel_trans_exec(const struct mlx_accel_transaction *transaction);
+
+void mlx_accel_trans_recv(void *cb_arg, struct mlx_accel_core_dma_buf *buf);
+
+#endif /* __MLX_ACCEL_CORE_TRANS_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_xfer.c b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_xfer.c
new file mode 100644
index 0000000..83e3b2f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_xfer.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "accel_core_xfer.h"
+
+struct xfer_state {
+	const struct mlx_accel_transaction *xfer;
+	/* Total transactions */
+	unsigned int start_count;
+	unsigned int done_count;
+	unsigned int error_count;
+	enum ib_wc_status status;
+	/* Inflight transactions */
+	unsigned int inflight_count;
+	/* Chunking state */
+	size_t pos;
+	spinlock_t lock; /* Protects all members of this struct */
+};
+
+struct xfer_transaction {
+	struct xfer_state *xfer_state;
+	struct mlx_accel_transaction transaction;
+};
+
+static int mlx_accel_xfer_exec_more(struct xfer_state *xfer_state);
+
+void mlx_accel_xfer_complete(struct xfer_state *xfer_state)
+{
+	const struct mlx_accel_transaction *xfer = xfer_state->xfer;
+	enum ib_wc_status status = xfer_state->status;
+
+	kfree(xfer_state);
+	xfer->complete(xfer, status);
+}
+
+void mlx_accel_xfer_comp_trans(const struct mlx_accel_transaction *complete,
+			       enum ib_wc_status status)
+{
+	struct xfer_transaction *xfer_trans;
+	struct xfer_state *xfer_state;
+	unsigned long flags;
+	bool done = false;
+	int ret;
+
+	xfer_trans = container_of(complete, struct xfer_transaction,
+				  transaction);
+	xfer_state = xfer_trans->xfer_state;
+	kfree(xfer_trans);
+
+	spin_lock_irqsave(&xfer_state->lock, flags);
+
+	if (status != IB_WC_SUCCESS) {
+		xfer_state->error_count++;
+		pr_warn("Transaction failed during transfer. %u started %u inflight %u done %u error\n",
+			xfer_state->start_count, xfer_state->inflight_count,
+			xfer_state->done_count, xfer_state->error_count);
+		if (xfer_state->status == IB_WC_SUCCESS)
+			xfer_state->status = status;
+	} else {
+		xfer_state->done_count++;
+	}
+	xfer_state->inflight_count--;
+	if (!xfer_state->inflight_count)
+		done = true;
+
+	ret = mlx_accel_xfer_exec_more(xfer_state);
+
+	spin_unlock_irqrestore(&xfer_state->lock, flags);
+
+	if (done)
+		mlx_accel_xfer_complete(xfer_state);
+}
+
+/* Xfer state spin lock must be locked */
+static int mlx_accel_xfer_exec_more(struct xfer_state *xfer_state)
+{
+	u64 pos_addr, ddr_base;
+	u8 *pos_data;
+	size_t left, cur_size, page_size;
+	struct xfer_transaction *xfer_trans;
+	int ret = 0;
+	bool more;
+
+	ddr_base = mlx_accel_core_ddr_base_get(xfer_state->xfer->conn->
+					       accel_device);
+	page_size = (xfer_state->xfer->addr + xfer_state->pos < ddr_base) ?
+		    sizeof(u32) : (1 << MLX_ACCEL_TRANSACTION_SEND_PAGE_BITS);
+
+	do {
+		more = false;
+		if (xfer_state->status != IB_WC_SUCCESS) {
+			ret = -EIO;
+			break;
+		}
+
+		left = xfer_state->xfer->size - xfer_state->pos;
+		if (!left)
+			break;
+
+		xfer_trans = kzalloc(sizeof(*xfer_trans), GFP_KERNEL);
+		if (!xfer_trans) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		pos_addr = xfer_state->xfer->addr + xfer_state->pos;
+		pos_data = xfer_state->xfer->data + xfer_state->pos;
+
+		/* Determine largest possible transaction at this point */
+		cur_size = page_size - (pos_addr & (page_size - 1));
+		if (cur_size > MLX_ACCEL_TRANSACTION_MAX_SIZE)
+			cur_size = MLX_ACCEL_TRANSACTION_MAX_SIZE;
+		if (cur_size > left)
+			cur_size = left;
+
+		xfer_trans->xfer_state = xfer_state;
+		xfer_trans->transaction.addr = pos_addr;
+		xfer_trans->transaction.complete = mlx_accel_xfer_comp_trans;
+		xfer_trans->transaction.conn = xfer_state->xfer->conn;
+		xfer_trans->transaction.data = pos_data;
+		xfer_trans->transaction.direction = xfer_state->xfer->direction;
+		xfer_trans->transaction.size = cur_size;
+
+		xfer_state->start_count++;
+		xfer_state->inflight_count++;
+		ret = mlx_accel_trans_exec(&xfer_trans->transaction);
+		if (ret) {
+			xfer_state->start_count--;
+			xfer_state->inflight_count--;
+			if (ret == -EBUSY)
+				ret = 0;
+
+			if (ret) {
+				pr_warn("Transfer failed to start transaction: %d. %u started %u done %u error\n",
+					ret, xfer_state->start_count,
+					xfer_state->done_count,
+					xfer_state->error_count);
+				xfer_state->status = IB_WC_GENERAL_ERR;
+			}
+			kfree(xfer_trans);
+			break;
+		}
+		xfer_state->pos += cur_size;
+		more = (cur_size != left);
+	} while (more);
+
+	return ret;
+}
+
+int mlx_accel_xfer_exec(const struct mlx_accel_transaction *xfer)
+{
+	int ret = 0;
+	struct xfer_state *xfer_state;
+	u64 base = mlx_accel_core_ddr_base_get(xfer->conn->accel_device);
+	u64 size = mlx_accel_core_ddr_size_get(xfer->conn->accel_device);
+	unsigned long flags;
+	bool done = false;
+
+	if (xfer->addr + xfer->size > base + size) {
+		pr_warn("Transfer ends at %llx outside of DDR range %llx\n",
+			xfer->addr + xfer->size, base + size);
+		return -EINVAL;
+	}
+
+	if (xfer->addr & MLX_ACCEL_TRANSACTION_SEND_ALIGN_BITS) {
+		pr_warn("Transfer address %llx not aligned\n", xfer->addr);
+		return -EINVAL;
+	}
+
+	if (xfer->size & MLX_ACCEL_TRANSACTION_SEND_ALIGN_BITS) {
+		pr_warn("Transfer size %lu not aligned\n", xfer->size);
+		return -EINVAL;
+	}
+
+	if (xfer->size < 1) {
+		pr_warn("Empty transfer size %lu not allowed\n", xfer->size);
+		return -EINVAL;
+	}
+
+	xfer_state = kzalloc(sizeof(*xfer_state), GFP_KERNEL);
+	xfer_state->xfer = xfer;
+	xfer_state->status = IB_WC_SUCCESS;
+	spin_lock_init(&xfer_state->lock);
+	spin_lock_irqsave(&xfer_state->lock, flags);
+
+	ret = mlx_accel_xfer_exec_more(xfer_state);
+	if (ret && (xfer_state->start_count == 0))
+		done = true;
+
+	spin_unlock_irqrestore(&xfer_state->lock, flags);
+
+	if (done)
+		mlx_accel_xfer_complete(xfer_state);
+	return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/accelerator/core/accel_core_xfer.h b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_xfer.h
new file mode 100644
index 0000000..3f73b42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/core/accel_core_xfer.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX_ACCEL_CORE_XFER_H__
+#define __MLX_ACCEL_CORE_XFER_H__
+
+#include "accel_core.h"
+#include "accel_core_trans.h"
+
+int mlx_accel_xfer_exec(const struct mlx_accel_transaction *xfer);
+
+#endif /* __MLX_ACCEL_CORE_XFER_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/Kconfig b/drivers/net/ethernet/mellanox/accelerator/ipsec/Kconfig
new file mode 100644
index 0000000..c24e6e7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/Kconfig
@@ -0,0 +1,11 @@
+#
+# Mellanox ipsec accelerator driver configuration
+#
+
+config MLX_ACCEL_IPSEC
+	tristate "Mellanox Technologies IPSec accelarator driver"
+	depends on MLX_ACCEL_CORE
+	default n
+	---help---
+	  IPsec accelarator driver by Mellanox Technologies.
+
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/Makefile b/drivers/net/ethernet/mellanox/accelerator/ipsec/Makefile
new file mode 100644
index 0000000..10ab23d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MLX_ACCEL_IPSEC)		+= mlx_ipsec.o
+
+mlx_ipsec-y :=	ipsec_main.o ipsec_sysfs.o ipsec.o ipsec_hw.o ipsec_counters.o
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec.c b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec.c
new file mode 100644
index 0000000..ad52485
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "../core/accel_core_sdk.h"
+#include "ipsec.h"
+#include "ipsec_sysfs.h"
+#include "ipsec_hw.h"
+#include <linux/netdevice.h>
+#include <linux/mlx5/qp.h>
+#include <crypto/aead.h>
+
+static LIST_HEAD(mlx_ipsec_devs);
+static DEFINE_MUTEX(mlx_ipsec_mutex);
+static int mlx_xfrm_add_state(struct xfrm_state *x);
+static void mlx_xfrm_del_state(struct xfrm_state *x);
+static void mlx_xfrm_free_state(struct xfrm_state *x);
+static bool mlx_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static struct sk_buff *mlx_ipsec_rx_handler(struct sk_buff *skb);
+static struct sk_buff *mlx_ipsec_tx_handler(struct sk_buff *,
+					    struct mlx5e_swp_info *swp_info);
+static u16             mlx_ipsec_mtu_handler(u16 mtu, bool is_sw2hw);
+static netdev_features_t mlx_ipsec_feature_chk(struct sk_buff *skb,
+					       struct net_device *netdev,
+					       netdev_features_t features,
+					       bool *done);
+
+#define MAX_LSO_MSS 2048
+/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
+static __be16 inverse_table[MAX_LSO_MSS];
+
+static const struct xfrmdev_ops mlx_xfrmdev_ops = {
+	.xdo_dev_state_add	= mlx_xfrm_add_state,
+	.xdo_dev_state_delete	= mlx_xfrm_del_state,
+	.xdo_dev_state_free	= mlx_xfrm_free_state,
+	.xdo_dev_offload_ok	= mlx_ipsec_offload_ok,
+};
+
+static struct mlx5e_accel_client_ops mlx_ipsec_client_ops = {
+	.rx_handler   = mlx_ipsec_rx_handler,
+	.tx_handler   = mlx_ipsec_tx_handler,
+	.feature_chk  = mlx_ipsec_feature_chk,
+	.mtu_handler  = mlx_ipsec_mtu_handler,
+	.get_count    = mlx_ipsec_get_count,
+	.get_strings  = mlx_ipsec_get_strings,
+	.get_stats    = mlx_ipsec_get_stats,
+};
+
+/* must hold mlx_ipsec_mutex to call this function */
+static struct mlx_ipsec_dev *find_mlx_ipsec_dev_by_netdev(
+		struct net_device *netdev)
+{
+	struct mlx_ipsec_dev *dev;
+
+	list_for_each_entry(dev, &mlx_ipsec_devs, accel_dev_list) {
+		if (dev->netdev == netdev)
+			return dev;
+	}
+
+	return NULL;
+}
+
+struct mlx_ipsec_dev *mlx_ipsec_find_dev_by_netdev(struct net_device *netdev)
+{
+	struct mlx_ipsec_dev *dev;
+
+	mutex_lock(&mlx_ipsec_mutex);
+	dev = find_mlx_ipsec_dev_by_netdev(netdev);
+	mutex_unlock(&mlx_ipsec_mutex);
+	return dev;
+}
+
+static void mlx_ipsec_set_clear_bypass(struct mlx_ipsec_dev *dev, bool set)
+{
+	int res;
+	u32 dw;
+
+	res = mlx_accel_core_mem_read(dev->accel_device, 4,
+				      IPSEC_BYPASS_ADDR, &dw,
+				      MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (res != 4) {
+		pr_warn("IPSec bypass clear failed on read\n");
+		return;
+	}
+
+	dw = set ? dw | IPSEC_BYPASS_BIT : dw & ~IPSEC_BYPASS_BIT;
+	res = mlx_accel_core_mem_write(dev->accel_device, 4,
+				       IPSEC_BYPASS_ADDR, &dw,
+				       MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (res != 4) {
+		pr_warn("IPSec bypass clear failed on write\n");
+		return;
+	}
+}
+
+/*
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int mlx_xfrm_add_state(struct xfrm_state *x)
+{
+	struct net_device *netdev = x->xso.dev;
+	struct mlx_ipsec_dev *dev;
+	struct mlx_ipsec_sa_entry *sa_entry = NULL;
+	unsigned long flags;
+	int res;
+
+	if (x->props.mode != XFRM_MODE_TUNNEL) {
+		dev_info(&netdev->dev, "Only tunnel xfrm state may be offloaded\n");
+		return -EINVAL;
+	}
+	if (x->props.aalgo != SADB_AALG_NONE) {
+		dev_info(&netdev->dev, "Cannot offload authenticated xfrm states\n");
+		return -EINVAL;
+	}
+	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+		dev_info(&netdev->dev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
+		return -EINVAL;
+	}
+	if (x->props.calgo != SADB_X_CALG_NONE) {
+		dev_info(&netdev->dev, "Cannot offload compressed xfrm states\n");
+		return -EINVAL;
+	}
+	if (x->props.flags & XFRM_STATE_ESN) {
+		dev_info(&netdev->dev, "Cannot offload ESN xfrm states\n");
+		return -EINVAL;
+	}
+	if (x->props.family != AF_INET) {
+		dev_info(&netdev->dev, "Only IPv4 xfrm state may be offloaded\n");
+		return -EINVAL;
+	}
+	if (x->id.proto != IPPROTO_ESP) {
+		dev_info(&netdev->dev, "Only ESP xfrm state may be offloaded\n");
+		return -EINVAL;
+	}
+	if (x->encap) {
+		dev_info(&netdev->dev, "Encapsulated xfrm state may not be offloaded\n");
+		return -EINVAL;
+	}
+	if (!x->aead) {
+		dev_info(&netdev->dev, "Cannot offload xfrm states without aead\n");
+		return -EINVAL;
+	}
+	if (x->aead->alg_icv_len != 128) {
+		dev_info(&netdev->dev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+		return -EINVAL;
+	}
+	if ((x->aead->alg_key_len != 128 + 32) &&
+	    (x->aead->alg_key_len != 256 + 32)) {
+		dev_info(&netdev->dev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+		return -EINVAL;
+	}
+	if (x->tfcpad) {
+		dev_info(&netdev->dev, "Cannot offload xfrm states without tfc padding\n");
+		return -EINVAL;
+	}
+	if (!x->geniv) {
+		dev_info(&netdev->dev, "Cannot offload xfrm states without geniv\n");
+		return -EINVAL;
+	}
+	if (strcmp(x->geniv, "seqiv")) {
+		dev_info(&netdev->dev, "Cannot offload xfrm states with geniv other than seqiv\n");
+		return -EINVAL;
+	}
+
+	dev = mlx_ipsec_find_dev_by_netdev(netdev);
+	if (!dev) {
+		res = -EINVAL;
+		goto out;
+	}
+
+	sa_entry = kzalloc(sizeof(struct mlx_ipsec_sa_entry), GFP_ATOMIC);
+	if (!sa_entry) {
+		res = -ENOMEM;
+		goto out;
+	}
+
+	sa_entry->hw_sa_id = UNASSIGNED_SA_ID;
+	sa_entry->sw_sa_id = atomic_inc_return(&dev->next_sw_sa_id);
+	sa_entry->x = x;
+	sa_entry->dev = dev;
+
+	/* Add the SA to handle processed incoming packets before the add SA
+	 * completion was received
+	 */
+	if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+		spin_lock_irqsave(&dev->sw_sa_id2xfrm_state_lock, flags);
+		hash_add_rcu(dev->sw_sa_id2xfrm_state_table, &sa_entry->hlist,
+				sa_entry->sw_sa_id);
+		spin_unlock_irqrestore(&dev->sw_sa_id2xfrm_state_lock, flags);
+	}
+
+	res = mlx_ipsec_hw_sadb_add(sa_entry);
+	if (res)
+		goto err_hash_rcu;
+
+	x->xso.offload_handle = (unsigned long)sa_entry;
+	try_module_get(THIS_MODULE);
+	goto out;
+
+err_hash_rcu:
+	if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+		spin_lock_irqsave(
+				&dev->sw_sa_id2xfrm_state_lock,
+				flags);
+		hash_del_rcu(&sa_entry->hlist);
+		spin_unlock_irqrestore(
+				&dev->sw_sa_id2xfrm_state_lock,
+				flags);
+		synchronize_rcu();
+	}
+
+	kfree(sa_entry);
+	sa_entry = NULL;
+out:
+	return res;
+}
+
+static void mlx_xfrm_del_state(struct xfrm_state *x)
+{
+	struct mlx_ipsec_sa_entry *sa_entry;
+
+	if (!x->xso.offload_handle)
+		return;
+
+	sa_entry = (struct mlx_ipsec_sa_entry *)x->xso.offload_handle;
+	WARN_ON(sa_entry->x != x);
+
+	if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+		hash_del_rcu(&sa_entry->hlist);
+}
+
+static void mlx_xfrm_free_state(struct xfrm_state *x)
+{
+	struct mlx_ipsec_sa_entry *sa_entry;
+
+	if (!x->xso.offload_handle)
+		return;
+
+	sa_entry = (struct mlx_ipsec_sa_entry *)x->xso.offload_handle;
+	WARN_ON(sa_entry->x != x);
+
+	mlx_ipsec_hw_sadb_del(sa_entry);
+
+	if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+		synchronize_rcu();
+
+	kfree(sa_entry);
+	module_put(THIS_MODULE);
+}
+
+static struct xfrm_state *mlx_sw_sa_id_to_xfrm_state(struct mlx_ipsec_dev *dev,
+		unsigned int sw_sa_id) {
+	struct mlx_ipsec_sa_entry *sa_entry;
+
+	rcu_read_lock();
+	hash_for_each_possible_rcu(dev->sw_sa_id2xfrm_state_table, sa_entry,
+				hlist, sw_sa_id) {
+		if (sa_entry->sw_sa_id == sw_sa_id) {
+			rcu_read_unlock();
+			return sa_entry->x;
+		}
+	}
+	rcu_read_unlock();
+	pr_warn("mlx_sw_sa_id_to_xfrm_state(): didn't find SA entry for %x\n",
+		sw_sa_id);
+	return NULL;
+}
+
+static void remove_pet(struct sk_buff *skb, struct pet *pet)
+{
+	struct ethhdr *old_eth;
+	struct ethhdr *new_eth;
+
+	memcpy(pet, skb->data, sizeof(*pet));
+	old_eth = (struct ethhdr *)(skb->data - sizeof(struct ethhdr));
+	new_eth = (struct ethhdr *)(skb_pull_inline(skb, sizeof(struct pet)) -
+		sizeof(struct ethhdr));
+	skb->mac_header += sizeof(struct pet);
+
+	memmove(new_eth, old_eth, 2 * ETH_ALEN);
+	/* Ethertype is already in its new place */
+}
+
+static void remove_dummy_dword(struct sk_buff *skb)
+{
+	struct iphdr *iphdr = (struct iphdr *)skb->data;
+	unsigned char *old;
+	unsigned char *new;
+	unsigned int iphdr_len = iphdr->ihl * 4;
+
+	/* We expect IP header right after the PET
+	 * with no IP options, all other are not offloaded for now
+	 */
+	if (be16_to_cpu(skb->protocol) != ETH_P_IP)
+		pr_warn("expected ETH_P_IP but received %04x\n",
+			be16_to_cpu(skb->protocol));
+	if (iphdr_len > sizeof(struct iphdr))
+		pr_warn("expected ETH_P_IP without IP options\n");
+
+	if (iphdr->protocol != IPPROTO_DUMMY_DWORD)
+		return;
+
+	old = skb->data - sizeof(struct ethhdr);
+	new = skb_pull_inline(skb, sizeof(struct dummy_dword)) -
+			      sizeof(struct ethhdr);
+	iphdr->protocol = IPPROTO_ESP; /* TODO */
+	iphdr->tot_len = htons(ntohs(iphdr->tot_len) - 4);
+	iphdr->check = htons(~(~ntohs(iphdr->check) - 0xd1));
+
+	memmove(new, old, ETH_HLEN + iphdr_len);
+
+	skb->mac_header += sizeof(struct dummy_dword);
+}
+
+static struct pet *insert_pet(struct sk_buff *skb)
+{
+	struct ethhdr *eth;
+	struct pet *pet;
+
+	if (skb_cow_head(skb, sizeof(struct pet)))
+		return ERR_PTR(-ENOMEM);
+
+	eth = (struct ethhdr *)skb_push(skb, sizeof(struct pet));
+	skb->mac_header -= sizeof(struct pet);
+	pet = (struct pet *)(eth+1);
+
+	memmove(skb->data, skb->data + sizeof(struct pet), 2 * ETH_ALEN);
+
+	eth->h_proto = cpu_to_be16(MLX_IPSEC_PET_ETHERTYPE);
+
+	memset(pet->content.raw, 0, sizeof(pet->content.raw));
+
+	return pet;
+}
+
+static bool mlx_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+	return true;
+}
+
+static u16 mlx_ipsec_mtu_handler(u16 mtu, bool is_sw2hw)
+{
+	u16 mtu_diff = sizeof(struct pet) + sizeof(struct dummy_dword);
+
+	if (is_sw2hw)
+		return mtu + mtu_diff;
+	else
+		return mtu - mtu_diff;
+}
+
+static __be16 mlx_ipsec_mss_inv(struct sk_buff *skb)
+{
+	return inverse_table[skb_shinfo(skb)->gso_size];
+}
+
+static netdev_features_t mlx_ipsec_feature_chk(struct sk_buff *skb,
+					       struct net_device *netdev,
+					       netdev_features_t features,
+					       bool *done)
+{
+	struct xfrm_state *x;
+
+	if (skb->sp && skb->sp->len) {
+		x = skb->sp->xvec[0];
+		if (x && x->xso.offload_handle)
+			*done = true;
+	}
+	return features;
+}
+
+static void remove_trailer(struct sk_buff *skb, struct xfrm_state *x,
+			   u8 *proto)
+{
+	skb_frag_t *frag;
+	u8 *vaddr;
+	u8 *trailer;
+	unsigned char last_frag;
+	struct crypto_aead *aead = x->data;
+	int alen = crypto_aead_authsize(aead);
+	int plen;
+	unsigned int trailer_len = alen;
+	struct iphdr *iphdr = (struct iphdr *)skb_network_header(skb);
+
+	if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags) {
+		last_frag = skb_shinfo(skb)->nr_frags - 1;
+		frag = &skb_shinfo(skb)->frags[last_frag];
+
+		skb_frag_ref(skb, last_frag);
+		vaddr = kmap_atomic(skb_frag_page(frag));
+
+		trailer = vaddr + frag->page_offset;
+		plen = trailer[skb_frag_size(frag) - alen - 2];
+		dev_dbg(&skb->dev->dev, "   Last frag page addr %p offset %u size %u\n",
+			vaddr, frag->page_offset, frag->size);
+		print_hex_dump_bytes("Last frag ", DUMP_PREFIX_OFFSET,
+				     trailer, frag->size);
+
+		*proto = trailer[skb_frag_size(frag) - alen - 1];
+
+		kunmap_atomic(vaddr);
+		skb_frag_unref(skb, last_frag);
+
+		dev_dbg(&skb->dev->dev, "   Frag pad len is %u bytes; alen is %u\n",
+			plen, alen);
+	} else {
+		plen = *(skb_tail_pointer(skb) - alen - 2);
+		*proto = *(skb_tail_pointer(skb) - alen - 1);
+		dev_dbg(&skb->dev->dev, "   Pad len is %u bytes; alen is %u\n",
+			plen, alen);
+	}
+	trailer_len += plen + 2;
+
+	dev_dbg(&skb->dev->dev, "   Removing trailer %u bytes\n", trailer_len);
+	pskb_trim(skb, skb->len - trailer_len);
+	iphdr->tot_len = htons(ntohs(iphdr->tot_len) - trailer_len);
+	iphdr->check = htons(~(~ntohs(iphdr->check) - trailer_len));
+}
+
+static struct sk_buff *mlx_ipsec_tx_handler(struct sk_buff *skb,
+					    struct mlx5e_swp_info *swp_info)
+{
+	struct tcphdr *tcph;
+	struct ip_esp_hdr *esph;
+	struct iphdr *iiph;
+	struct xfrm_state *x;
+	struct pet *pet;
+	int iv_offset;
+	__be64 seqno;
+
+	dev_dbg(&skb->dev->dev, ">> tx_handler %u bytes\n", skb->len);
+
+	if (!skb->sp) {
+		dev_dbg(&skb->dev->dev, "   no sp\n");
+		goto out;
+	}
+
+	if (skb->sp->len != 1) {
+		pr_warn_ratelimited("Cannot offload crypto for a bundle of %u XFRM states\n",
+				    skb->sp->len);
+		goto out;
+	}
+
+	x = skb->sp->xvec[0];
+	if (!x) {
+		pr_warn_ratelimited("Crypto-offload packet has no xfrm_state\n");
+		goto out;
+	}
+
+	if (x->xso.offload_handle &&
+	    skb->protocol == htons(ETH_P_IP)) {
+		iiph = (struct iphdr *)skb_inner_network_header(skb);
+		pet = insert_pet(skb);
+		if (IS_ERR(pet)) {
+			pr_warn("insert_pet failed: %ld\n", PTR_ERR(pet));
+			kfree_skb(skb);
+			skb = NULL;
+			goto out;
+		}
+
+		/* Offsets are in 2-byte words, counting from start of frame */
+		swp_info->outer_l3_ofs = skb_network_offset(skb) / 2;
+		swp_info->inner_l3_ofs = skb_inner_network_offset(skb) / 2;
+		switch (iiph->protocol) {
+		case IPPROTO_UDP:
+			swp_info->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+			/* Fall through */
+		case IPPROTO_TCP:
+			swp_info->inner_l4_ofs =
+				skb_inner_transport_offset(skb) / 2;
+			break;
+		}
+
+		/* Place the SN in the IV field */
+		seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
+			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
+		iv_offset = skb->transport_header + sizeof(struct ip_esp_hdr)
+					- skb_headroom(skb);
+		skb_store_bits(skb, iv_offset, &seqno, 8);
+
+		if (skb_is_gso(skb)) {
+			/* Add LSO PET indication */
+			esph = (struct ip_esp_hdr *)skb_transport_header(skb);
+			tcph = inner_tcp_hdr(skb);
+			dev_dbg(&skb->dev->dev, "   Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
+				skb->len, skb_shinfo(skb)->gso_size,
+				ntohs(tcph->source), ntohs(tcph->dest),
+				ntohl(tcph->seq), ntohl(esph->seq_no));
+			pet->syndrome = PET_SYNDROME_OFFLOAD_WITH_LSO_TCP;
+			pet->content.send.mss_inv = mlx_ipsec_mss_inv(skb);
+			pet->content.send.seq = htons(ntohl(tcph->seq) &
+						0xFFFF);
+			pet->content.send.esp_next_proto = skb->sp->proto;
+		} else {
+			pet->syndrome = PET_SYNDROME_OFFLOAD;
+			remove_trailer(skb, x,
+				       &pet->content.send.esp_next_proto);
+		}
+
+		dev_dbg(&skb->dev->dev, "   TX PKT len %u linear %u bytes + %u bytes in %u frags\n",
+			skb->len, skb_headlen(skb), skb->data_len,
+			skb->data_len ? skb_shinfo(skb)->nr_frags : 0);
+		dev_dbg(&skb->dev->dev, "   TX PET syndrome %u proto %u mss_inv %04x seq %04x\n",
+			pet->syndrome, pet->content.send.esp_next_proto,
+			ntohs(pet->content.send.mss_inv),
+			ntohs(pet->content.send.seq));
+	}
+out:
+	dev_dbg(&skb->dev->dev, "<< tx_handler\n");
+	return skb;
+}
+
+static struct sk_buff *mlx_ipsec_rx_handler(struct sk_buff *skb)
+{
+	struct pet pet;
+	struct xfrm_offload_state *xos;
+	struct mlx_ipsec_dev *dev;
+	struct net_device *netdev = skb->dev;
+	struct xfrm_state *xs;
+
+	if (skb->protocol != cpu_to_be16(MLX_IPSEC_PET_ETHERTYPE))
+		goto out;
+
+	dev_dbg(&netdev->dev, ">> rx_handler %u bytes\n", skb->len);
+	remove_pet(skb, &pet);
+	dev_dbg(&netdev->dev, "   RX PET: size %lu, etherType %04X, syndrome %02x, sw_sa_id %x\n",
+		sizeof(pet), be16_to_cpu(pet.ethertype), pet.syndrome,
+		be32_to_cpu(pet.content.rcv.sa_id));
+
+	skb->protocol = pet.ethertype;
+
+	remove_dummy_dword(skb);
+
+	WARN_ON(skb->sp != NULL);
+	skb->sp = secpath_dup(skb->sp);
+	if (unlikely(!skb->sp)) { /* drop */
+		pr_warn("Failed to allocate secpath - dropping!\n");
+		goto drop;
+	}
+
+	dev = find_mlx_ipsec_dev_by_netdev(netdev);
+	xs = mlx_sw_sa_id_to_xfrm_state(dev,
+			be32_to_cpu(pet.content.rcv.sa_id));
+
+	if (!xs) {
+		pr_warn("No xfrm_state found for processed packet\n");
+		goto drop;
+	}
+
+	/* xfrm_input expects us to hold the xfrm_state */
+	xfrm_state_hold(xs);
+	skb->sp->xvec[skb->sp->len++] = xs;
+
+	xos = xfrm_offload_input(skb);
+	xos->flags = CRYPTO_DONE;
+	switch (pet.syndrome) {
+	case PET_SYNDROME_DECRYPTED:
+		xos->status = CRYPTO_SUCCESS;
+		break;
+	case PET_SYNDROME_AUTH_FAILED:
+		xos->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
+		break;
+	default:
+		pr_warn("Unknown metadata syndrom %d\n", pet.syndrome);
+		goto drop;
+	}
+	goto out;
+
+drop:
+	kfree_skb(skb);
+	dev_dbg(&netdev->dev, "   rx_handler: dropping packet\n");
+	skb = NULL;
+out:
+	dev_dbg(&netdev->dev, "<< rx_handler\n");
+	return skb;
+}
+
+/* Must hold mlx_ipsec_mutex to call this function.
+ * Assumes that dev->core_ctx is destroyed be the caller
+ */
+static void mlx_ipsec_free(struct mlx_ipsec_dev *dev)
+{
+	list_del(&dev->accel_dev_list);
+	kobject_put(&dev->kobj);
+}
+
+void mlx_ipsec_dev_release(struct kobject *kobj)
+{
+	struct mlx_ipsec_dev *ipsec_dev =
+			container_of(kobj, struct mlx_ipsec_dev, kobj);
+
+	/*
+	 * [BP]: TODO - Test the corner case of removing the last reference
+	 * while receiving packets that should be handled by the rx_handler.
+	 * Do we need some sync here?
+	 */
+
+	dev_put(ipsec_dev->netdev);
+
+	kfree(ipsec_dev);
+}
+
+int mlx_ipsec_netdev_event(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+	struct mlx_ipsec_dev *accel_dev = NULL;
+
+	if (!netdev)
+		goto out;
+
+	pr_debug("mlx_ipsec_netdev_event: %lu\n", event);
+
+	/* We are interested only in net devices going down */
+	if (event != NETDEV_UNREGISTER)
+		goto out;
+
+	/* Take down all connections using a netdev that is going down */
+	mutex_lock(&mlx_ipsec_mutex);
+	accel_dev = find_mlx_ipsec_dev_by_netdev(netdev);
+	if (!accel_dev) {
+		pr_debug("mlx_ipsec_netdev_event: Failed to find ipsec device for net device\n");
+		goto unlock;
+	}
+	mlx_accel_core_client_ops_unregister(netdev);
+	mlx_ipsec_free(accel_dev);
+
+unlock:
+	mutex_unlock(&mlx_ipsec_mutex);
+out:
+	return NOTIFY_DONE;
+}
+
+int mlx_ipsec_add_one(struct mlx_accel_core_device *accel_device)
+{
+	int ret = 0;
+	int i;
+	struct mlx_ipsec_dev *dev = NULL;
+	struct net_device *netdev = NULL;
+#ifdef MLX_IPSEC_SADB_RDMA
+	struct mlx_accel_core_conn_init_attr init_attr = {0};
+#endif
+
+	pr_debug("mlx_ipsec_add_one called for %s\n", accel_device->name);
+
+	if (MLX5_CAP_FPGA(accel_device, sandbox_product_id) !=
+			MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dev = kzalloc(sizeof(struct mlx_ipsec_dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = mlx_accel_get_sbu_caps(accel_device, sizeof(dev->ipsec_caps),
+				     (void *)dev->ipsec_caps);
+	if (ret) {
+		pr_err("Failed to retrieve ipsec extended capabilities\n");
+		goto err_dev;
+	}
+	/*Need to reverse endianness to use MLX5_GET macros*/
+	for (i = 0; i < sizeof(dev->ipsec_caps) / 4; i++)
+		dev->ipsec_caps[i] = cpu_to_be32(dev->ipsec_caps[i]);
+
+	init_waitqueue_head(&dev->wq);
+	INIT_LIST_HEAD(&dev->accel_dev_list);
+	INIT_KFIFO(dev->fifo_sa_cmds);
+	hash_init(dev->sw_sa_id2xfrm_state_table);
+	spin_lock_init(&dev->sw_sa_id2xfrm_state_lock);
+	spin_lock_init(&dev->fifo_sa_cmds_lock);
+	atomic_set(&dev->next_sw_sa_id, 0);
+	dev->accel_device = accel_device;
+
+#ifdef MLX_IPSEC_SADB_RDMA
+	/* [BP]: TODO: Move these constants to a header */
+	init_attr.rx_size = 8;
+	init_attr.tx_size = 8;
+	init_attr.recv_cb = mlx_ipsec_hw_qp_recv_cb;
+	init_attr.cb_arg = dev;
+	/* [AY]: TODO: fix port 1 issue */
+	dev->conn = mlx_accel_core_conn_create(accel_device, &init_attr);
+	if (IS_ERR(dev->conn)) {
+		ret = PTR_ERR(dev->conn);
+		pr_err("mlx_ipsec_add_one(): Got error while creating connection %d\n",
+				ret);
+		goto err_dev;
+	}
+	ret = mlx_accel_core_connect(dev->conn);
+	if (ret) {
+		pr_err("Failed to connect IPSec QP: %d\n", ret);
+		goto err_conn;
+	}
+#endif
+	netdev = accel_device->ib_dev->get_netdev(accel_device->ib_dev,
+						  accel_device->port);
+	if (!netdev) {
+		pr_err("mlx_ipsec_add_one(): Failed to retrieve net device from ib device\n");
+		ret = -EINVAL;
+		goto err_conn;
+	}
+	dev->netdev = netdev;
+
+	netif_keep_dst(dev->netdev);
+
+	ret = mlx_accel_core_client_ops_register(netdev, &mlx_ipsec_client_ops);
+	if (ret) {
+		pr_err("mlx_ipsec_add_one(): Failed to register client ops %d\n",
+		       ret);
+		goto err_netdev;
+	}
+
+	ret = ipsec_sysfs_init_and_add(&dev->kobj,
+			mlx_accel_core_kobj(dev->accel_device),
+			"%s",
+			"accel_dev");
+	if (ret) {
+		pr_err("mlx_ipsec_add_one(): Got error from kobject_init_and_add %d\n", ret);
+		goto err_ops_register;
+	}
+
+	mutex_lock(&mlx_ipsec_mutex);
+	list_add(&dev->accel_dev_list, &mlx_ipsec_devs);
+	mutex_unlock(&mlx_ipsec_mutex);
+
+	dev->netdev->xfrmdev_ops = &mlx_xfrmdev_ops;
+	if (MLX5_GET(ipsec_extended_cap, dev->ipsec_caps, esp)) {
+		dev->netdev->wanted_features |= NETIF_F_HW_ESP |
+						NETIF_F_HW_ESP_TX_CSUM;
+		dev->netdev->hw_features |= NETIF_F_HW_ESP |
+					    NETIF_F_HW_ESP_TX_CSUM;
+		if (MLX5_GET(ipsec_extended_cap, dev->ipsec_caps, lso)) {
+			dev_dbg(&dev->netdev->dev, "ESP GSO capability turned on\n");
+			dev->netdev->wanted_features |= NETIF_F_GSO_ESP;
+			dev->netdev->hw_features |= NETIF_F_GSO_ESP;
+			dev->netdev->hw_enc_features |= NETIF_F_GSO_ESP;
+		}
+	}
+
+	rtnl_lock();
+	netdev_change_features(dev->netdev);
+	rtnl_unlock();
+
+	mlx_ipsec_set_clear_bypass(dev, false);
+	dev_info(&dev->netdev->dev, "mlx_ipsec added on device %s\n",
+		 accel_device->name);
+	goto out;
+
+err_ops_register:
+	mlx_accel_core_client_ops_unregister(netdev);
+err_netdev:
+	dev_put(netdev);
+err_conn:
+#ifdef MLX_IPSEC_SADB_RDMA
+	mlx_accel_core_conn_destroy(dev->conn);
+#endif
+err_dev:
+	kfree(dev);
+out:
+	return ret;
+}
+
+/* [BP]: TODO - Remove all SA entries on mlx_xfrm_del_state */
+/* [BP]: TODO - How do we make sure that all packets inflight are dropped? */
+void mlx_ipsec_remove_one(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_ipsec_dev *dev;
+	struct net_device *netdev = NULL;
+
+	pr_debug("mlx_ipsec_remove_one called for %s\n", accel_device->name);
+
+	mutex_lock(&mlx_ipsec_mutex);
+
+	list_for_each_entry(dev, &mlx_ipsec_devs, accel_dev_list) {
+		if (dev->accel_device == accel_device) {
+			dev->netdev->wanted_features &= ~(NETIF_F_HW_ESP |
+				NETIF_F_HW_ESP_TX_CSUM | NETIF_F_GSO_ESP);
+			dev->netdev->hw_enc_features &= ~NETIF_F_GSO_ESP;
+			netdev = dev->netdev;
+#ifdef MLX_IPSEC_SADB_RDMA
+			mlx_accel_core_conn_destroy(dev->conn);
+#endif
+			mlx_ipsec_set_clear_bypass(dev, true);
+			mlx_accel_core_client_ops_unregister(netdev);
+			mlx_ipsec_free(dev);
+			break;
+		}
+	}
+	mutex_unlock(&mlx_ipsec_mutex);
+
+	/* Remove NETIF_F_HW_ESP feature.
+	 * We assume that xfrm ops are assigned by xfrm_dev notifier callback
+	 */
+	if (netdev) {
+		rtnl_lock();
+		netdev_change_features(netdev);
+		rtnl_unlock();
+	}
+}
+
+void mlx_ipsec_init_inverse_table(void)
+{
+	u32 mss;
+
+	inverse_table[1] = 0xFFFF;
+	for (mss = 2; mss < MAX_LSO_MSS; mss++)
+		inverse_table[mss] = htons(((1ULL << 32) / mss) >> 16);
+}
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec.h b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec.h
new file mode 100644
index 0000000..e4d0563
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __IPSEC_H__
+#define __IPSEC_H__
+
+#define MLX_IPSEC_SADB_RDMA
+
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/hashtable.h>
+#include <linux/mlx5/en_driver.h>
+#include <linux/mlx5/device.h>
+#include <net/sock.h>
+#include <net/inet_common.h>
+#include <net/xfrm.h>
+
+#include "../core/accel_core_sdk.h"
+#include "ipsec_cmds.h"
+
+#define DRIVER_NAME		"mlx_ipsec"
+#define DRIVER_VERSION	"0.1"
+#define DRIVER_RELDATE	"January 2016"
+
+#define MLX_IPSEC_DEVICE_NAME					"mlx_ipsec"
+/* TODO: Consider moving this to include/uapi/linux/if_ether.h */
+#define MLX_IPSEC_PET_ETHERTYPE					(0x8CE4)
+
+#define MLX_IPSEC_SA_HASH_TABLE_BITS			10
+#define MLX_SA_HW2SW_FIFO_SIZE				8
+
+struct mlx_ipsec_sa_entry {
+	unsigned int sw_sa_id;
+	unsigned int hw_sa_id;	/* unused */
+	struct hlist_node hlist;
+	struct xfrm_state *x;
+	enum ipsec_response_syndrome status;
+	struct mlx_ipsec_dev *dev;
+};
+
+struct mlx_ipsec_dev {
+	struct kobject kobj;
+	struct list_head accel_dev_list;
+	struct mlx_accel_core_device *accel_device;
+	struct mlx_accel_core_conn *conn;
+	/* [BP]: TODO - move this to mlx_accel_core_ctx */
+	struct net_device *netdev;
+	DECLARE_KFIFO(fifo_sa_cmds, struct mlx_ipsec_sa_entry *,
+			MLX_SA_HW2SW_FIFO_SIZE);
+	DECLARE_HASHTABLE(sw_sa_id2xfrm_state_table,
+			MLX_IPSEC_SA_HASH_TABLE_BITS);
+	spinlock_t fifo_sa_cmds_lock; /* Protects fifo_sa_cmds */
+	spinlock_t sw_sa_id2xfrm_state_lock; /* Protects sw_sa_id2xfrm_state */
+	atomic_t next_sw_sa_id;
+	wait_queue_head_t wq;
+	u32 ipsec_caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
+};
+
+void mlx_ipsec_dev_release(struct kobject *kobj);
+
+int mlx_ipsec_netdev_event(struct notifier_block *this,
+		unsigned long event, void *ptr);
+
+int mlx_ipsec_add_one(struct mlx_accel_core_device *accel_device);
+void mlx_ipsec_remove_one(struct mlx_accel_core_device *accel_device);
+
+int mlx_xfrm_offload_input(struct xfrm_state *x, struct sk_buff **skb);
+int mlx_xfrm_offload_output(struct xfrm_state *x, struct sk_buff **skb);
+
+struct mlx_ipsec_dev *mlx_ipsec_find_dev_by_netdev(struct net_device *netdev);
+int mlx_ipsec_get_count(struct net_device *netdev);
+int mlx_ipsec_get_strings(struct net_device *netdev, uint8_t *data);
+int mlx_ipsec_get_stats(struct net_device *netdev, u64 *data);
+
+void mlx_ipsec_init_inverse_table(void);
+
+#endif	/* __IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_cmds.h b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_cmds.h
new file mode 100644
index 0000000..1bc4e54
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_cmds.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef MLX_IPSEC_CMDS_H
+#define MLX_IPSEC_CMDS_H
+
+#define UNASSIGNED_SA_ID ((u32)~0)
+
+enum rcv_pet_syndrom {
+	/*PET_SYNDROME_DECRYPTED_WITH_DUMMY_IP	= 0x00,*/
+	PET_SYNDROME_DECRYPTED			= 0x11,
+	PET_SYNDROME_AUTH_FAILED		= 0x12,
+};
+
+struct rcv_pet_content {
+	unsigned char   reserved;
+	__be32		sa_id;
+} __packed;
+
+enum send_pet_syndrome {
+	PET_SYNDROME_OFFLOAD = 0x8,
+	PET_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
+	PET_SYNDROME_OFFLOAD_WITH_LSO_IPV4 = 0xA,
+	PET_SYNDROME_OFFLOAD_WITH_LSO_IPV6 = 0xB,
+};
+
+struct send_pet_content {
+	__be16 mss_inv;		/* 1/MSS in 16bit fixed point, only for LSO */
+	__be16 seq;		/* LSBs of the first TCP seq, only for LSO */
+	u8     esp_next_proto;  /* Next protocol of ESP */
+} __packed;
+
+struct pet {
+	unsigned char syndrome;
+	union {
+		unsigned char raw[5];
+		/* from FPGA to host, on successful decrypt */
+		struct rcv_pet_content rcv;
+		/* from host to FPGA */
+		struct send_pet_content send;
+	} __packed content;
+	/* packet type ID field	*/
+	__be16 ethertype;
+} __packed;
+
+#define IPPROTO_DUMMY_DWORD 0xff
+
+struct dummy_dword {
+	unsigned char next_proto;
+	unsigned char len;
+	__be16 reserved;
+} __packed;
+
+enum direction {
+	RX_DIRECTION = 0,
+	TX_DIRECTION = 1
+};
+
+enum crypto_identifier {
+	IPSEC_OFFLOAD_CRYPTO_NONE			= 0,
+	IPSEC_OFFLOAD_CRYPTO_AES_GCM_128	= 1,
+	IPSEC_OFFLOAD_CRYPTO_AES_GCM_256	= 2,
+};
+
+enum auth_identifier {
+	IPSEC_OFFLOAD_AUTH_NONE			= 0,
+	IPSEC_OFFLOAD_AUTH_AES_GCM_128	= 1,
+	IPSEC_OFFLOAD_AUTH_AES_GCM_256	= 2,
+};
+
+#define IPSEC_BYPASS_ADDR	0x0
+#define IPSEC_BYPASS_BIT	0x400000
+
+struct __attribute__((__packed__)) sadb_entry {
+	u8 key[32];
+	__be32 sip;
+	__be32 sip_mask;
+	__be32 dip;
+	__be32 dip_mask;
+	__be32 spi;
+	__be32 salt;
+	u8 salt_iv[8];
+	__be32 sw_sa_handle;
+	__be16 sport;
+	__be16 dport;
+	u8 ip_proto;
+	u8 enc_auth_mode;
+	u8 enable;
+	u8 pad;
+	__be16 tfclen;
+	__be16 pad2;
+};
+
+#define SADB_DIR_SX      BIT(7)
+#define SADB_SA_VALID    BIT(6)
+#define SADB_SPI_EN      BIT(5)
+#define SADB_IP_PROTO_EN BIT(4)
+#define SADB_SPORT_EN    BIT(3)
+#define SADB_DPORT_EN    BIT(2)
+#define SADB_TUNNEL      BIT(1)
+#define SADB_TUNNEL_EN   BIT(0)
+
+enum ipsec_response_syndrome {
+	IPSEC_RESPONSE_SUCCESS = 0,
+	IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
+	IPSEC_RESPONSE_SADB_ISSUE = 2,
+	IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
+	IPSEC_SA_PENDING = 0xff,
+};
+
+#ifdef MLX_IPSEC_SADB_RDMA
+
+enum ipsec_hw_cmd {
+	IPSEC_CMD_ADD_SA = 0,
+	IPSEC_CMD_DEL_SA = 1,
+};
+
+struct sa_cmd_v4 {
+	__be32 cmd;
+	struct sadb_entry entry;
+};
+
+struct ipsec_hw_response {
+	__be32 syndrome;
+	__be32 sw_sa_handle;
+	u8 rsvd[24];
+};
+
+#else
+
+#define IPSEC_FLUSH_CACHE_ADDR	0x144
+#define IPSEC_FLUSH_CACHE_BIT	0x100
+#define SADB_SLOT_SIZE		0x80
+
+#endif	/*  MLX_IPSEC_SADB_RDMA */
+
+#endif /* MLX_IPSEC_CMDS_H */
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_counters.c b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_counters.c
new file mode 100644
index 0000000..9086aa1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_counters.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include "linux/ethtool.h"
+#include "ipsec.h"
+#include "../core/accel_core_sdk.h"
+
+static const char * const ipsec_stats_desc[] = {
+	"ipsec_dec_in_packets",
+	"ipsec_dec_out_packets",
+	"ipsec_dec_bypass_packets",
+	"ipsec_enc_in_packets",
+	"ipsec_enc_out_packets",
+	"ipsec_enc_bypass_packets",
+	"ipsec_dec_drop_packets",
+	"ipsec_dec_auth_fail_packets",
+	"ipsec_enc_drop_packets",
+	"ipsec_add_sa_success",
+	"ipsec_add_sa_fail",
+	"ipsec_del_sa_success",
+	"ipsec_del_sa_fail",
+	"ipsec_cmd_drop",
+};
+
+static int mlx_ipsec_counters_count(struct mlx_ipsec_dev *dev)
+{
+	u32 num_ipsec_cnt = MLX5_GET(ipsec_extended_cap, dev->ipsec_caps,
+						number_of_ipsec_counters);
+	if (num_ipsec_cnt > ARRAY_SIZE(ipsec_stats_desc))
+		num_ipsec_cnt = ARRAY_SIZE(ipsec_stats_desc);
+
+	return num_ipsec_cnt;
+}
+
+int mlx_ipsec_get_count(struct net_device *netdev)
+{
+	struct mlx_ipsec_dev *dev = mlx_ipsec_find_dev_by_netdev(netdev);
+	u32 num_ipsec_cnt;
+
+	if (!dev) {
+		dev_warn(&netdev->dev, "mlx_ipsec_get_count: no dev\n");
+		return 0;
+	}
+	num_ipsec_cnt = mlx_ipsec_counters_count(dev);
+
+	dev_dbg(&netdev->dev, "get_count: num_ipsec_counters=%d\n",
+		num_ipsec_cnt);
+	return num_ipsec_cnt;
+}
+
+int mlx_ipsec_get_strings(struct net_device *netdev, uint8_t *data)
+{
+	int i;
+	struct mlx_ipsec_dev *dev = mlx_ipsec_find_dev_by_netdev(netdev);
+	u32 num_ipsec_cnt;
+
+	if (!dev) {
+		dev_warn(&netdev->dev, "mlx_ipsec_get_strings: no dev\n");
+		return 0;
+	}
+	num_ipsec_cnt = mlx_ipsec_counters_count(dev);
+
+	for (i = 0; i < num_ipsec_cnt; i++)
+		strcpy(data + (i * ETH_GSTRING_LEN), ipsec_stats_desc[i]);
+
+	return num_ipsec_cnt;
+}
+
+int mlx_ipsec_get_stats(struct net_device *netdev, u64 *data)
+{
+	int ret;
+	struct mlx_ipsec_dev *dev = mlx_ipsec_find_dev_by_netdev(netdev);
+	u32 num_ipsec_cnt;
+	u64 addr;
+
+	if (!dev) {
+		dev_warn(&netdev->dev, "mlx_ipsec_get_stats: no dev\n");
+		return 0;
+	}
+	num_ipsec_cnt = mlx_ipsec_counters_count(dev);
+	addr = (u64)MLX5_GET(ipsec_extended_cap, dev->ipsec_caps,
+						ipsec_counters_start_addr);
+
+	ret = mlx_accel_core_mem_read(dev->accel_device,
+				      num_ipsec_cnt * sizeof(u64), addr, data,
+				      MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (ret < 0) {
+		dev_err(&netdev->dev, "Failed to read IPSec counters from HW: %d\n",
+			ret);
+		return 0;
+	}
+	return num_ipsec_cnt;
+}
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_hw.c b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_hw.c
new file mode 100644
index 0000000..a570dbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_hw.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "ipsec_hw.h"
+#include <linux/inetdevice.h>
+#include <crypto/internal/geniv.h>
+#include <crypto/aead.h>
+
+static enum auth_identifier
+mlx_ipsec_get_auth_identifier(struct xfrm_state *x)
+{
+	unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
+
+	switch (key_len) {
+	case 16:
+		return IPSEC_OFFLOAD_AUTH_AES_GCM_128;
+	case 32:
+		return IPSEC_OFFLOAD_AUTH_AES_GCM_256;
+	default:
+		pr_warn("Bad key len: %d for alg %s\n", key_len,
+			x->aead->alg_name);
+		return -1;
+	}
+}
+
+static enum crypto_identifier
+mlx_ipsec_get_crypto_identifier(struct xfrm_state *x)
+{
+	unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
+
+	switch (key_len) {
+	case 16:
+		return IPSEC_OFFLOAD_CRYPTO_AES_GCM_128;
+	case 32:
+		return IPSEC_OFFLOAD_CRYPTO_AES_GCM_256;
+	default:
+		pr_warn("Bad key len: %d for alg %s\n", key_len,
+			x->aead->alg_name);
+		return -1;
+	}
+}
+
+static void mlx_ipsec_build_hw_entry(struct mlx_ipsec_sa_entry *sa,
+				     struct sadb_entry *hw_entry,
+				     bool valid)
+{
+	unsigned int crypto_data_len;
+	unsigned int key_len;
+	struct crypto_aead *aead;
+	struct aead_geniv_ctx *geniv_ctx;
+	int ivsize;
+
+	BUILD_BUG_ON((sizeof(struct sadb_entry) & 3) != 0);
+
+	memset(hw_entry, 0, sizeof(*hw_entry));
+
+	if (valid) {
+		crypto_data_len = (sa->x->aead->alg_key_len + 7) / 8;
+		key_len = crypto_data_len - 4; /* 4 bytes salt at end */
+		aead = sa->x->data;
+		geniv_ctx = crypto_aead_ctx(aead);
+		ivsize = crypto_aead_ivsize(aead);
+
+		memcpy(&hw_entry->key, sa->x->aead->alg_key, key_len);
+		/* Duplicate 128 bit key twice according to HW layout */
+		if (key_len == 16)
+			memcpy(&hw_entry->key[16], sa->x->aead->alg_key,
+			       key_len);
+		memcpy(&hw_entry->salt_iv, geniv_ctx->salt, ivsize);
+		hw_entry->salt = *((__be32 *)(sa->x->aead->alg_key + key_len));
+	}
+
+	hw_entry->enable |= SADB_SA_VALID | SADB_SPI_EN;
+	hw_entry->sip = sa->x->props.saddr.a4;
+	hw_entry->sip_mask = inet_make_mask(sa->x->sel.prefixlen_s);
+	hw_entry->dip = sa->x->id.daddr.a4;
+	hw_entry->dip_mask = inet_make_mask(sa->x->sel.prefixlen_d);
+	hw_entry->spi = sa->x->id.spi;
+	hw_entry->sw_sa_handle = htonl(sa->sw_sa_id);
+	hw_entry->sport = htons(sa->x->sel.sport);
+	hw_entry->enable |= sa->x->sel.sport_mask ? SADB_SPORT_EN : 0;
+	hw_entry->dport = htons(sa->x->sel.dport);
+	hw_entry->enable |= sa->x->sel.dport_mask ? SADB_DPORT_EN : 0;
+	hw_entry->ip_proto = sa->x->id.proto;
+	if (hw_entry->ip_proto)
+		hw_entry->enable |= SADB_IP_PROTO_EN;
+	hw_entry->enc_auth_mode = mlx_ipsec_get_auth_identifier(sa->x) << 4;
+	hw_entry->enc_auth_mode |= mlx_ipsec_get_crypto_identifier(sa->x);
+	if (!(sa->x->xso.flags & XFRM_OFFLOAD_INBOUND))
+		hw_entry->enable |= SADB_DIR_SX;
+	if (sa->x->props.mode)
+		hw_entry->enable |= SADB_TUNNEL | SADB_TUNNEL_EN;
+}
+
+#ifndef MLX_IPSEC_SADB_RDMA
+
+static u64 mlx_ipsec_sadb_addr(struct mlx_ipsec_sa_entry *sa)
+{
+	unsigned long sa_index;
+
+	sa_index = (ntohl(sa->x->id.daddr.a4) ^ ntohl(sa->x->id.spi)) & 0xFFFFF;
+	pr_debug("sa DIP %08x SPI %08x -> Index %lu\n",
+		 sa->x->id.daddr.a4, sa->x->id.spi, sa_index);
+	return mlx_accel_core_ddr_base_get(sa->dev->accel_device) +
+	       (sa_index * SADB_SLOT_SIZE);
+}
+
+static void mlx_ipsec_flush_cache(struct mlx_ipsec_dev *dev)
+{
+	int res;
+	u32 dw;
+
+	res = mlx_accel_core_mem_read(dev->accel_device, 4,
+				      IPSEC_FLUSH_CACHE_ADDR, &dw,
+				      MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (res != 4) {
+		pr_warn("IPSec cache flush failed on read\n");
+		return;
+	}
+
+	dw ^= IPSEC_FLUSH_CACHE_BIT;
+	res = mlx_accel_core_mem_write(dev->accel_device, 4,
+				       IPSEC_FLUSH_CACHE_ADDR, &dw,
+				       MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (res != 4) {
+		pr_warn("IPSec cache flush failed on write\n");
+		return;
+	}
+}
+
+int mlx_ipsec_hw_sadb_add(struct mlx_ipsec_sa_entry *sa)
+{
+	struct sadb_entry hw_entry;
+	u64 sa_addr = mlx_ipsec_sadb_addr(sa);
+	int res;
+
+	pr_debug("sa Address %llx\n", sa_addr);
+
+	mlx_ipsec_build_hw_entry(sa, &hw_entry, true);
+
+	res = mlx_accel_core_mem_write(sa->dev->accel_device, sizeof(hw_entry),
+				       sa_addr, &hw_entry,
+				       MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+	if (res != sizeof(hw_entry)) {
+		pr_warn("Writing SA to HW memory failed %d\n", res);
+		goto out;
+	}
+	res = 0;
+	mlx_ipsec_flush_cache(sa->dev);
+
+out:
+	return res;
+}
+
+void mlx_ipsec_hw_sadb_del(struct mlx_ipsec_sa_entry *sa)
+{
+	struct sadb_entry hw_entry;
+	u64 sa_addr;
+	int res;
+
+	if (sa->dev) {
+		sa_addr = mlx_ipsec_sadb_addr(sa);
+		pr_debug("del_sa Address %llx\n", sa_addr);
+
+		memset(&hw_entry, 0, sizeof(hw_entry));
+
+		res = mlx_accel_core_mem_write(sa->dev->accel_device,
+					       sizeof(hw_entry), sa_addr,
+					       &hw_entry,
+					       MLX_ACCEL_ACCESS_TYPE_DONTCARE);
+		if (res != sizeof(hw_entry))
+			pr_warn("Deleting SA in HW memory failed %d\n", res);
+		mlx_ipsec_flush_cache(sa->dev);
+	}
+}
+
+void mlx_ipsec_hw_qp_recv_cb(void *cb_arg, struct mlx_accel_core_dma_buf *buf)
+{
+	WARN_ON(buf);
+}
+
+#else /* MLX_IPSEC_SADB_RDMA */
+
+static int mlx_ipsec_hw_cmd(struct mlx_ipsec_sa_entry *sa, u32 cmd_id)
+{
+	struct mlx_accel_core_dma_buf *buf = NULL;
+	struct sa_cmd_v4 *cmd;
+	int res = 0;
+	unsigned long flags;
+
+	buf = kzalloc(sizeof(*buf) + sizeof(*cmd), GFP_ATOMIC);
+	if (!buf) {
+		res = -ENOMEM;
+		goto out;
+	}
+
+	buf->data_size = sizeof(*cmd);
+	buf->data = buf + 1;
+	cmd = buf->data;
+	cmd->cmd = htonl(cmd_id);
+
+	mlx_ipsec_build_hw_entry(sa, &cmd->entry, cmd_id == IPSEC_CMD_ADD_SA);
+
+	/* Serialize fifo access */
+	pr_debug("adding to fifo: sa %p ID 0x%08x\n", sa, sa->sw_sa_id);
+	spin_lock_irqsave(&sa->dev->fifo_sa_cmds_lock, flags);
+	res = kfifo_put(&sa->dev->fifo_sa_cmds, sa);
+	spin_unlock_irqrestore(&sa->dev->fifo_sa_cmds_lock, flags);
+
+	if (!res) {
+		dev_warn(&sa->dev->netdev->dev, "IPSec command FIFO is full\n");
+		goto err_buf;
+	}
+
+	sa->status = IPSEC_SA_PENDING;
+	mlx_accel_core_sendmsg(sa->dev->conn, buf);
+	/* After this point buf will be freed in mlx_accel_core */
+
+	res = wait_event_killable(sa->dev->wq, sa->status != IPSEC_SA_PENDING);
+	if (res != 0) {
+		pr_warn("Failure waiting for IPSec command response from HW\n");
+		goto out;
+	}
+
+	res = sa->status;
+	if (sa->status != IPSEC_RESPONSE_SUCCESS)
+		pr_warn("IPSec command %u failed with error %08x\n",
+			cmd_id, sa->status);
+	goto out;
+
+err_buf:
+	kfree(buf);
+out:
+	return res;
+}
+
+int mlx_ipsec_hw_sadb_add(struct mlx_ipsec_sa_entry *sa)
+{
+	return mlx_ipsec_hw_cmd(sa, IPSEC_CMD_ADD_SA);
+}
+
+void mlx_ipsec_hw_sadb_del(struct mlx_ipsec_sa_entry *sa)
+{
+	mlx_ipsec_hw_cmd(sa, IPSEC_CMD_DEL_SA);
+}
+
+void mlx_ipsec_hw_qp_recv_cb(void *cb_arg, struct mlx_accel_core_dma_buf *buf)
+{
+	struct mlx_ipsec_dev *dev = cb_arg;
+	struct ipsec_hw_response *resp = buf->data;
+	struct mlx_ipsec_sa_entry *sa_entry;
+
+	if (buf->data_size < sizeof(*resp)) {
+		pr_warn("Short receive from FPGA IPSec: %zu < %lu bytes\n",
+			buf->data_size, sizeof(*resp));
+		return;
+	}
+
+	pr_debug("mlx_ipsec recv_cb syndrome %08x sa_id %x\n",
+		 ntohl(resp->syndrome), ntohl(resp->sw_sa_handle));
+
+	/* [BP]: This should never fail - consider reset if it does */
+	if (!kfifo_get(&dev->fifo_sa_cmds, &sa_entry)) {
+		pr_warn("sa_hw2sw_id FIFO empty on recv callback\n");
+		return;
+	}
+	pr_debug("Got from FIFO: sa %p ID 0x%08x\n",
+		 sa_entry, sa_entry->sw_sa_id);
+
+	if (sa_entry->sw_sa_id != ntohl(resp->sw_sa_handle)) {
+		pr_warn("mismatch sw_sa_id. FIFO 0x%08x vs resp 0x%08x\n",
+			sa_entry->sw_sa_id, ntohl(resp->sw_sa_handle));
+	}
+
+	if (ntohl(resp->syndrome) != IPSEC_RESPONSE_SUCCESS) {
+		pr_warn("Error syndrome from FPGA: %u\n",
+			ntohl(resp->syndrome));
+	}
+	sa_entry->status = ntohl(resp->syndrome);
+	wake_up_all(&dev->wq);
+}
+
+#endif /* MLX_IPSEC_SADB_RDMA */
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_hw.h b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_hw.h
new file mode 100644
index 0000000..4d85fb2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_hw.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __IPSEC_HW_H__
+#define __IPSEC_HW_H__
+
+#include "ipsec.h"
+
+int mlx_ipsec_hw_sadb_add(struct mlx_ipsec_sa_entry *sa);
+void mlx_ipsec_hw_sadb_del(struct mlx_ipsec_sa_entry *sa);
+void mlx_ipsec_hw_qp_recv_cb(void *cb_arg, struct mlx_accel_core_dma_buf *buf);
+
+#endif	/* __IPSEC_HW_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_main.c b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_main.c
new file mode 100644
index 0000000..4529061
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_main.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+
+#include "ipsec.h"
+#include "ipsec_hw.h"
+
+MODULE_AUTHOR("Boris Pismenny <borisp@mellanox.com>");
+MODULE_AUTHOR("Ilan Tayari <ilant@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Innova IPsec Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+/* [BP] TODO: add capabilities */
+static struct mlx_accel_core_client mlx_ipsec_client = {
+	.name   = "mlx_ipsec",
+	.add    = mlx_ipsec_add_one,
+	.remove = mlx_ipsec_remove_one,
+};
+
+static struct notifier_block mlx_ipsec_netdev_notifier = {
+	.notifier_call = mlx_ipsec_netdev_event,
+};
+
+static int __init mlx_ipsec_init(void)
+{
+	int err = 0;
+
+	mlx_ipsec_init_inverse_table();
+
+	err = register_netdevice_notifier(&mlx_ipsec_netdev_notifier);
+	if (err) {
+		pr_warn("mlx_ipsec_init error in register_netdevice_notifier %d\n",
+				err);
+		goto out;
+	}
+
+	mlx_accel_core_client_register(&mlx_ipsec_client);
+
+out:
+	return err;
+}
+
+static void __exit mlx_ipsec_exit(void)
+{
+	/* [BP]: TODO - delete all SA entries. Verify that no inflight packets
+	 * are going to be offloaded while we are unloading
+	 */
+	mlx_accel_core_client_unregister(&mlx_ipsec_client);
+	unregister_netdevice_notifier(&mlx_ipsec_netdev_notifier);
+}
+
+module_init(mlx_ipsec_init);
+module_exit(mlx_ipsec_exit);
+
+
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_sysfs.c b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_sysfs.c
new file mode 100644
index 0000000..9c21927
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_sysfs.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <rdma/ib_verbs.h>
+
+#include "ipsec_sysfs.h"
+
+struct mlx_ipsec_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct mlx_ipsec_dev *dev, char *buf);
+	ssize_t (*store)(struct mlx_ipsec_dev *dev, const char *buf,
+			size_t count);
+};
+
+#define MLX_IPSEC_ATTR(_name, _mode, _show, _store) \
+	struct mlx_ipsec_attribute mlx_ipsec_attr_##_name = { \
+			.attr = {.name = __stringify(_name), .mode = _mode}, \
+			.show = _show, \
+			.store = _store, \
+	}
+#define to_mlx_ipsec_dev(obj)	\
+		container_of(obj, struct mlx_ipsec_dev, kobj)
+#define to_mlx_ipsec_attr(_attr)	\
+		container_of(_attr, struct mlx_ipsec_attribute, attr)
+
+static ssize_t mlx_ipsec_attr_show(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct mlx_ipsec_dev *dev = to_mlx_ipsec_dev(kobj);
+	struct mlx_ipsec_attribute *mlx_ipsec_attr = to_mlx_ipsec_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (mlx_ipsec_attr->show)
+		ret = mlx_ipsec_attr->show(dev, buf);
+
+	return ret;
+}
+
+static ssize_t mlx_ipsec_attr_store(struct kobject *kobj,
+		struct attribute *attr, const char *buf, size_t count)
+{
+	struct mlx_ipsec_dev *dev = to_mlx_ipsec_dev(kobj);
+	struct mlx_ipsec_attribute *mlx_ipsec_attr = to_mlx_ipsec_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (mlx_ipsec_attr->store)
+		ret = mlx_ipsec_attr->store(dev, buf, count);
+
+	return ret;
+}
+
+static ssize_t mlx_ipsec_sqpn_read(struct mlx_ipsec_dev *dev, char *buf)
+{
+	return sprintf(buf, "%d\n", dev->conn->qp->qp_num);
+}
+
+static ssize_t mlx_ipsec_sgid_read(struct mlx_ipsec_dev *dev, char *buf)
+{
+	__be16 *sgid = (__be16 *)&dev->conn->fpga_qpc.remote_ip;
+
+	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+			be16_to_cpu(sgid[0]),
+			be16_to_cpu(sgid[1]),
+			be16_to_cpu(sgid[2]),
+			be16_to_cpu(sgid[3]),
+			be16_to_cpu(sgid[4]),
+			be16_to_cpu(sgid[5]),
+			be16_to_cpu(sgid[6]),
+			be16_to_cpu(sgid[7]));
+}
+
+static ssize_t mlx_ipsec_dqpn_read(struct mlx_ipsec_dev *dev, char *buf)
+{
+	return sprintf(buf, "%d\n", dev->conn->fpga_qpn);
+}
+
+static ssize_t mlx_ipsec_dqpn_write(struct mlx_ipsec_dev *dev, const char *buf,
+		size_t count)
+{
+	if (sscanf(buf, "%u\n", &dev->conn->fpga_qpn) != 1)
+		return -EINVAL;
+	/* [SR] TODO: We are planning on keeping this interface in
+	 * final version as well? If so, how will we know what DQPN to
+	 * use? I guess we should have "allocate-user-QP-slot" API in
+	 * the core.
+	 */
+	mlx_accel_core_connect(dev->conn);
+	return count;
+}
+
+static ssize_t mlx_ipsec_dgid_read(struct mlx_ipsec_dev *dev, char *buf)
+{
+	__be16 *dgid = (__be16 *)&dev->conn->fpga_qpc.fpga_ip;
+
+	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+			be16_to_cpu(dgid[0]),
+			be16_to_cpu(dgid[1]),
+			be16_to_cpu(dgid[2]),
+			be16_to_cpu(dgid[3]),
+			be16_to_cpu(dgid[4]),
+			be16_to_cpu(dgid[5]),
+			be16_to_cpu(dgid[6]),
+			be16_to_cpu(dgid[7]));
+}
+
+static ssize_t mlx_ipsec_dgid_write(struct mlx_ipsec_dev *dev, const char *buf,
+		size_t count)
+{
+	__be16 *dgid = (__be16 *)&dev->conn->fpga_qpc.fpga_ip;
+	int i = 0;
+	if (sscanf(buf, "%04hx:%04hx:%04hx:%04hx:%04hx:%04hx:%04hx:%04hx\n",
+		   &dgid[0], &dgid[1], &dgid[2], &dgid[3],
+		   &dgid[4], &dgid[5], &dgid[6], &dgid[7]) != 8)
+		return -EINVAL;
+
+	for (i = 0; i < 8; i++)
+		dgid[i] = cpu_to_be16(dgid[i]);
+	return count;
+}
+
+static MLX_IPSEC_ATTR(sqpn, 0444, mlx_ipsec_sqpn_read, NULL);
+static MLX_IPSEC_ATTR(sgid, 0444, mlx_ipsec_sgid_read, NULL);
+static MLX_IPSEC_ATTR(dqpn, 0666, mlx_ipsec_dqpn_read, mlx_ipsec_dqpn_write);
+static MLX_IPSEC_ATTR(dgid, 0666, mlx_ipsec_dgid_read, mlx_ipsec_dgid_write);
+
+struct attribute *mlx_ipsec_def_attrs[] = {
+		&mlx_ipsec_attr_sqpn.attr,
+		&mlx_ipsec_attr_sgid.attr,
+		&mlx_ipsec_attr_dqpn.attr,
+		&mlx_ipsec_attr_dgid.attr,
+		NULL,
+};
+
+const struct sysfs_ops mlx_ipsec_dev_sysfs_ops = {
+	.show  = mlx_ipsec_attr_show,
+	.store = mlx_ipsec_attr_store,
+};
+
+static struct kobj_type mlx_ipsec_dev_type = {
+	.release        = mlx_ipsec_dev_release,
+	.sysfs_ops      = &mlx_ipsec_dev_sysfs_ops,
+	.default_attrs  = mlx_ipsec_def_attrs,
+};
+
+int ipsec_sysfs_init_and_add(struct kobject *kobj,
+			 struct kobject *parent, const char *fmt, char *arg)
+{
+	return kobject_init_and_add(kobj, &mlx_ipsec_dev_type,
+			parent,
+			fmt, arg);
+}
+
diff --git a/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_sysfs.h b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_sysfs.h
new file mode 100644
index 0000000..0784749
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/ipsec/ipsec_sysfs.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __IPSEC_SYSFS_H__
+#define __IPSEC_SYSFS_H__
+
+#include <linux/sysfs.h>
+
+#include "ipsec.h"
+
+int ipsec_sysfs_init_and_add(struct kobject *kobj,
+			 struct kobject *parent, const char *fmt, char *arg);
+
+#endif	/* __IPSEC_SYSFS_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/Kconfig b/drivers/net/ethernet/mellanox/accelerator/tools/Kconfig
new file mode 100644
index 0000000..d7c06cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/Kconfig
@@ -0,0 +1,11 @@
+#
+# Mellanox accelerator tools driver configuration
+#
+
+config MLX_ACCEL_TOOLS
+	tristate "Mellanox Technologies accelarator tools driver"
+	depends on MLX_ACCEL_CORE
+	default n
+	---help---
+	  Accelarator tools driver by Mellanox Technologies.
+
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/Makefile b/drivers/net/ethernet/mellanox/accelerator/tools/Makefile
new file mode 100644
index 0000000..e61d286
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MLX_ACCEL_TOOLS)		+= mlx_accel_tools.o
+
+mlx_accel_tools-y := tools_main.o tools.o tools_char.o
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/tools.c b/drivers/net/ethernet/mellanox/accelerator/tools/tools.c
new file mode 100644
index 0000000..bc5f3bd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/tools.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "tools.h"
+#include "tools_char.h"
+
+struct mlx_accel_tools_dev *
+mlx_accel_tools_alloc(struct mlx_accel_core_device *device)
+{
+	int ret;
+	struct mlx_accel_tools_dev *sb_dev;
+
+	sb_dev = kzalloc(sizeof(*sb_dev), GFP_KERNEL);
+	if (!sb_dev)
+		goto out;
+
+	sb_dev->accel_device = device;
+	mutex_init(&sb_dev->mutex);
+	ret = mlx_accel_tools_char_add_one(sb_dev);
+	if (ret)
+		goto err_free;
+
+	goto out;
+
+err_free:
+	kfree(sb_dev);
+	sb_dev = NULL;
+
+out:
+	return sb_dev;
+}
+
+void mlx_accel_tools_free(struct mlx_accel_tools_dev *sb_dev)
+{
+	mlx_accel_tools_char_remove_one(sb_dev);
+	kfree(sb_dev);
+}
+
+int mlx_accel_tools_mem_write(struct mlx_accel_tools_dev *sb_dev,
+			      /*const*/ void *buf,
+			      size_t count, u64 address,
+			      enum mlx_accel_access_type access_type)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&sb_dev->mutex);
+	if (ret)
+		goto out;
+
+	ret = mlx_accel_core_mem_write(sb_dev->accel_device, count, address,
+				       buf, access_type);
+	if (ret < 0) {
+		pr_err("mlx_accel_tools_mem_write: Failed to write %lu bytes at address 0x%llx: %d\n",
+		       count, address, ret);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&sb_dev->mutex);
+
+out:
+	return ret;
+}
+
+int mlx_accel_tools_mem_read(struct mlx_accel_tools_dev *sb_dev, void *buf,
+			     size_t count, u64 address,
+			     enum mlx_accel_access_type access_type)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&sb_dev->mutex);
+	if (ret)
+		goto out;
+
+	ret = mlx_accel_core_mem_read(sb_dev->accel_device, count, address, buf,
+				      access_type);
+	if (ret < 0) {
+		pr_err("mlx_accel_tools_mem_read: Failed to read %lu bytes at address 0x%llx: %d\n",
+		       count, address, ret);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&sb_dev->mutex);
+
+out:
+	return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/tools.h b/drivers/net/ethernet/mellanox/accelerator/tools/tools.h
new file mode 100644
index 0000000..0e3b19a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/tools.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __TOOLS_H__
+#define __TOOLS_H__
+
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include "../core/accel_core_sdk.h"
+
+#define MLX_ACCEL_TOOLS_DRIVER_NAME "mlx_accel_tools"
+
+struct mlx_accel_tools_dev {
+	/* Core device and connection to FPGA */
+	struct mlx_accel_core_device *accel_device;
+
+	/* Driver state per device */
+	struct mutex mutex;
+
+	/* Char device state */
+	struct cdev cdev;
+	dev_t dev;
+	struct device *char_device;
+	atomic_t open_count;
+};
+
+struct mlx_accel_tools_dev *
+mlx_accel_tools_alloc(struct mlx_accel_core_device *device);
+void mlx_accel_tools_free(struct mlx_accel_tools_dev *sb_dev);
+
+int mlx_accel_tools_mem_write(struct mlx_accel_tools_dev *sb_dev,
+			      void *buf, size_t count, u64 address,
+			      enum mlx_accel_access_type access_type);
+int mlx_accel_tools_mem_read(struct mlx_accel_tools_dev *sb_dev, void *buf,
+			     size_t count, u64 address,
+			     enum mlx_accel_access_type access_type);
+
+#endif	/* __TOOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/tools_char.c b/drivers/net/ethernet/mellanox/accelerator/tools/tools_char.c
new file mode 100644
index 0000000..af8bc62
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/tools_char.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "tools_char.h"
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/fs.h>
+#include <linux/mlx5/accel/tools_chardev.h>
+
+#define CHUNK_SIZE (32 * 1024)
+
+static int major_number;
+static struct class *char_class;
+
+struct file_context {
+	struct mlx_accel_tools_dev *sb_dev;
+	enum mlx_accel_access_type access_type;
+};
+
+static int tools_char_open(struct inode *inodep, struct file *filep)
+{
+	struct mlx_accel_tools_dev *sb_dev =
+			container_of(inodep->i_cdev,
+				     struct mlx_accel_tools_dev,
+				     cdev);
+	struct file_context *context;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	context->sb_dev = sb_dev;
+	context->access_type = MLX_ACCEL_ACCESS_TYPE_DONTCARE;
+	filep->private_data = context;
+	atomic_inc(&sb_dev->open_count);
+	pr_debug("mlx tools %u char device opened %d times\n",
+		 sb_dev->accel_device->id,
+		 atomic_read(&sb_dev->open_count));
+	return 0;
+}
+
+static int tools_char_release(struct inode *inodep, struct file *filep)
+{
+	struct file_context *context = filep->private_data;
+
+	WARN_ON(atomic_read(&context->sb_dev->open_count) < 1);
+	atomic_dec(&context->sb_dev->open_count);
+	pr_debug("mlx tools %u char device closed. Still open %d times\n",
+		 context->sb_dev->accel_device->id,
+		 atomic_read(&context->sb_dev->open_count));
+	kfree(context);
+	return 0;
+}
+
+static ssize_t tools_char_read(struct file *filep, char __user *buffer,
+			       size_t len, loff_t *offset)
+{
+	int ret = 0;
+	void *kbuf = NULL;
+	struct file_context *context = filep->private_data;
+
+	pr_debug("mlx tools %u char device reading %lu bytes at 0x%llx\n",
+		 context->sb_dev->accel_device->id, len, *offset);
+
+	if (len < 1)
+		return len;
+	if (len > CHUNK_SIZE)
+		len = CHUNK_SIZE;
+
+	kbuf = kmalloc(len, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = mlx_accel_tools_mem_read(context->sb_dev, kbuf, len, *offset,
+				       context->access_type);
+	if (ret <= 0)
+		goto out;
+	*offset += ret;
+	if (copy_to_user(buffer, kbuf, len)) {
+		pr_err("Failed to copy data to user buffer\n");
+		ret = -EFAULT;
+		goto out;
+	}
+out:
+	kfree(kbuf);
+	return ret;
+}
+
+static ssize_t tools_char_write(struct file *filep, const char __user *buffer,
+				size_t len, loff_t *offset)
+{
+	int ret = 0;
+	void *kbuf = NULL;
+	struct file_context *context = filep->private_data;
+
+	pr_debug("mlx tools %u char device writing %lu bytes at 0x%llx\n",
+		 context->sb_dev->accel_device->id, len, *offset);
+
+	if (len < 1)
+		return len;
+	if (len > CHUNK_SIZE)
+		len = CHUNK_SIZE;
+
+	kbuf = kmalloc(len, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	if (copy_from_user(kbuf, buffer, len)) {
+		pr_err("Failed to copy data from user buffer\n");
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = mlx_accel_tools_mem_write(context->sb_dev, kbuf, len, *offset,
+					context->access_type);
+	if (ret <= 0)
+		goto out;
+	*offset += ret;
+out:
+	kfree(kbuf);
+	return ret;
+}
+
+static loff_t tools_char_llseek(struct file *filep, loff_t offset, int whence)
+{
+	loff_t new_offset;
+	struct file_context *context = filep->private_data;
+	u64 max = mlx_accel_core_ddr_base_get(context->sb_dev->accel_device) +
+		  mlx_accel_core_ddr_size_get(context->sb_dev->accel_device);
+	new_offset = fixed_size_llseek(filep, offset, whence, max);
+	if (new_offset >= 0)
+		pr_debug("tools char device seeked to 0x%llx\n", new_offset);
+	return new_offset;
+}
+
+long tools_char_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct file_context *context = filep->private_data;
+	struct mlx_accel_fpga_query query;
+	struct mlx_accel_core_device *dev = context->sb_dev->accel_device;
+
+	if (!dev)
+		return -ENXIO;
+
+	switch (cmd) {
+	case IOCTL_ACCESS_TYPE:
+		if (arg > MLX_ACCEL_ACCESS_TYPE_MAX) {
+			pr_err("unknown access type %lu\n", arg);
+			err = -EINVAL;
+			break;
+		}
+		context->access_type = arg;
+		break;
+	case IOCTL_FPGA_LOAD:
+		if (arg > MLX_ACCEL_IMAGE_MAX) {
+			pr_err("unknown image type %lu\n", arg);
+			err = -EINVAL;
+			break;
+		}
+		err = mlx_accel_core_device_reload(dev, arg);
+		break;
+	case IOCTL_FPGA_RESET:
+		err = mlx_accel_core_device_reload(dev,
+						   MLX_ACCEL_IMAGE_MAX + 1);
+		break;
+	case IOCTL_FPGA_IMAGE_SEL:
+		if (arg > MLX_ACCEL_IMAGE_MAX) {
+			pr_err("unknown image type %lu\n", arg);
+			err = -EINVAL;
+			break;
+		}
+		err = mlx_accel_core_flash_select(dev, arg);
+		break;
+	case IOCTL_FPGA_QUERY:
+		query.status = dev->state;
+		query.admin_image = dev->last_admin_image;
+		query.oper_image = dev->last_oper_image;
+		if (err)
+			break;
+
+		if (copy_to_user((void __user *)arg, &query, sizeof(query))) {
+			pr_err("Failed to copy data to user buffer\n");
+			err = -EFAULT;
+		}
+		break;
+	default:
+		pr_err("unknown ioctl command 0x%08x\n", cmd);
+		err = -ENOIOCTLCMD;
+	}
+	return err;
+}
+
+static const struct file_operations tools_fops = {
+		.owner = THIS_MODULE,
+		.open = tools_char_open,
+		.release = tools_char_release,
+		.read = tools_char_read,
+		.write = tools_char_write,
+		.llseek = tools_char_llseek,
+		.unlocked_ioctl = tools_char_ioctl,
+};
+
+int mlx_accel_tools_char_add_one(struct mlx_accel_tools_dev *sb_dev)
+{
+	int ret = 0;
+
+	sb_dev->dev = MKDEV(major_number, sb_dev->accel_device->id);
+
+	atomic_set(&sb_dev->open_count, 0);
+	cdev_init(&sb_dev->cdev, &tools_fops);
+	ret = cdev_add(&sb_dev->cdev, sb_dev->dev, 1);
+	if (ret) {
+		pr_err("Failed to add cdev: %d\n", ret);
+		goto out;
+	}
+
+	sb_dev->char_device = device_create(char_class, NULL, sb_dev->dev, NULL,
+					    "%s%s",
+					    sb_dev->accel_device->name,
+					    MLX_ACCEL_TOOLS_NAME_SUFFIX);
+	if (IS_ERR(sb_dev->char_device)) {
+		ret = PTR_ERR(sb_dev->char_device);
+		sb_dev->char_device = NULL;
+		pr_err("Failed to create a char device: %d\n", ret);
+		goto out;
+	}
+
+	pr_debug("mlx_accel_tools char device %u:%u created\n", major_number,
+		 sb_dev->accel_device->id);
+	goto out;
+
+out:
+	return ret;
+}
+
+void mlx_accel_tools_char_remove_one(struct mlx_accel_tools_dev *sb_dev)
+{
+	WARN_ON(atomic_read(&sb_dev->open_count) > 0);
+	device_destroy(char_class,
+		       MKDEV(major_number, sb_dev->accel_device->id));
+	cdev_del(&sb_dev->cdev);
+	pr_debug("mlx_accel_tools char device %u:%u destroyed\n", major_number,
+		 sb_dev->accel_device->id);
+}
+
+int mlx_accel_tools_char_init(void)
+{
+	int ret = 0;
+
+	major_number = register_chrdev(0, MLX_ACCEL_TOOLS_DRIVER_NAME,
+				       &tools_fops);
+	if (major_number < 0) {
+		ret = major_number;
+		pr_err("Failed to register major number for char device: %d\n",
+		       ret);
+		goto out;
+	}
+	pr_debug("tools major number is %d\n", major_number);
+
+	char_class = class_create(THIS_MODULE, MLX_ACCEL_TOOLS_DRIVER_NAME);
+	if (IS_ERR(char_class)) {
+		ret = PTR_ERR(char_class);
+		pr_err("Failed to create char class: %d\n", ret);
+		goto err_chrdev;
+	}
+
+	goto out;
+
+err_chrdev:
+	unregister_chrdev(major_number, MLX_ACCEL_TOOLS_DRIVER_NAME);
+
+out:
+	return ret;
+}
+
+void mlx_accel_tools_char_deinit(void)
+{
+	class_destroy(char_class);
+	unregister_chrdev(major_number, MLX_ACCEL_TOOLS_DRIVER_NAME);
+	pr_debug("tools major number freed\n");
+}
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/tools_char.h b/drivers/net/ethernet/mellanox/accelerator/tools/tools_char.h
new file mode 100644
index 0000000..7aa7164
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/tools_char.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __TOOLS_CHAR_H__
+#define __TOOLS_CHAR_H__
+
+#include "tools.h"
+
+int mlx_accel_tools_char_init(void);
+void mlx_accel_tools_char_deinit(void);
+
+int mlx_accel_tools_char_add_one(struct mlx_accel_tools_dev *sb_dev);
+void mlx_accel_tools_char_remove_one(struct mlx_accel_tools_dev *sb_dev);
+
+#endif	/* __TOOLS_CHAR_H__ */
diff --git a/drivers/net/ethernet/mellanox/accelerator/tools/tools_main.c b/drivers/net/ethernet/mellanox/accelerator/tools/tools_main.c
new file mode 100644
index 0000000..30fa18c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/accelerator/tools/tools_main.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "tools.h"
+#include <linux/module.h>
+#include "tools_char.h"
+
+MODULE_AUTHOR("Ilan Tayari <ilant@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Innova Tools Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.1");
+
+static void mlx_accel_tools_create(struct mlx_accel_core_device *accel_device);
+static int mlx_accel_tools_add(struct mlx_accel_core_device *accel_device);
+static void mlx_accel_tools_remove(struct mlx_accel_core_device *accel_device);
+static void mlx_accel_tools_destroy(struct mlx_accel_core_device *accel_device);
+
+static struct mlx_accel_core_client mlx_accel_tools_client = {
+	.name = MLX_ACCEL_TOOLS_DRIVER_NAME,
+	.create = mlx_accel_tools_create,
+	.add = mlx_accel_tools_add,
+	.remove = mlx_accel_tools_remove,
+	.destroy = mlx_accel_tools_destroy,
+};
+
+static void mlx_accel_tools_create(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_accel_tools_dev *dev = NULL;
+
+	pr_debug("mlx_accel_tools_add_one called for %s\n", accel_device->name);
+
+	dev = mlx_accel_tools_alloc(accel_device);
+	if (!dev)
+		return;
+
+	mlx_accel_core_client_data_set(accel_device,
+				       &mlx_accel_tools_client, dev);
+}
+
+static int mlx_accel_tools_add(struct mlx_accel_core_device *accel_device)
+{
+	return 0;
+}
+
+static void mlx_accel_tools_remove(struct mlx_accel_core_device *accel_device)
+{
+}
+
+static void mlx_accel_tools_destroy(struct mlx_accel_core_device *accel_device)
+{
+	struct mlx_accel_tools_dev *dev;
+
+	pr_debug("mlx_accel_tools_destroy called for %s\n",
+		 accel_device->name);
+
+	dev = mlx_accel_core_client_data_get(accel_device,
+					     &mlx_accel_tools_client);
+	if (dev)
+		mlx_accel_tools_free(dev);
+}
+
+static int __init mlx_accel_tools_init(void)
+{
+	int ret = mlx_accel_tools_char_init();
+
+	if (ret)
+		return ret;
+	mlx_accel_core_client_register(&mlx_accel_tools_client);
+	return 0;
+}
+
+static void __exit mlx_accel_tools_exit(void)
+{
+	mlx_accel_core_client_unregister(&mlx_accel_tools_client);
+	mlx_accel_tools_char_deinit();
+}
+
+module_init(mlx_accel_tools_init);
+module_exit(mlx_accel_tools_exit);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 0343725..6c8be2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,7 +2,7 @@
 
 mlx5_core-y :=	main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 		health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
-		mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+		mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fpga.o \
 		fs_counters.o rl.o lag.o dev.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 1e639f8..214d7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -318,6 +318,7 @@
 	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 	case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
 	case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+	case MLX5_CMD_OP_FPGA_DESTROY_QP:
 		return MLX5_CMD_STAT_OK;
 
 	case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -424,6 +425,10 @@
 	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 	case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+	case MLX5_CMD_OP_FPGA_CREATE_QP:
+	case MLX5_CMD_OP_FPGA_MODIFY_QP:
+	case MLX5_CMD_OP_FPGA_QUERY_QP:
+	case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
 		*status = MLX5_DRIVER_STATUS_ABORTED;
 		*synd = MLX5_DRIVER_SYND;
 		return -EIO;
@@ -580,6 +585,11 @@
 	MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
 	MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
 	MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
+	MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
+	MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
+	MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
+	MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
+	MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
 	default: return "unknown command opcode";
 	}
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 7dd4763..254f52a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -38,6 +38,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/en_driver.h>
 #include <linux/mlx5/qp.h>
 #include <linux/mlx5/cq.h>
 #include <linux/mlx5/port.h>
@@ -649,6 +650,8 @@
 	u16 q_counter;
 	const struct mlx5e_profile *profile;
 	void                      *ppriv;
+
+	struct mlx5e_accel_client_ops *accel_client_ops  __rcu;
 };
 
 void mlx5e_build_ptys2ethtool_map(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 27ff401..6dba388 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -173,7 +173,8 @@
 		       NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
 		       MLX5E_NUM_RQ_STATS(priv) +
 		       MLX5E_NUM_SQ_STATS(priv) +
-		       MLX5E_NUM_PFC_COUNTERS(priv);
+		       MLX5E_NUM_PFC_COUNTERS(priv) +
+		       priv->accel_client_ops->get_count(dev);
 	case ETH_SS_PRIV_FLAGS:
 		return ARRAY_SIZE(mlx5e_priv_flags);
 	/* fallthrough */
@@ -252,6 +253,10 @@
 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
 					sq_stats_desc[j].format,
 					priv->channeltc_to_txq_map[i][tc]);
+
+	/* Accelerator counters */
+	idx += priv->accel_client_ops->get_strings(priv->netdev, data +
+						   idx * ETH_GSTRING_LEN);
 }
 
 static void mlx5e_get_strings(struct net_device *dev,
@@ -350,6 +355,9 @@
 			for (j = 0; j < NUM_SQ_STATS; j++)
 				data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
 								   sq_stats_desc, j);
+
+	/* Accelerator counters */
+	idx += priv->accel_client_ops->get_stats(dev, data + idx);
 }
 
 static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8595b50..8b8d521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -69,6 +69,73 @@
 	struct mlx5e_cq_param      icosq_cq;
 };
 
+static netdev_features_t mlx5e_accel_feature_chk(struct sk_buff *skb,
+						 struct net_device *netdev,
+						 netdev_features_t features,
+						 bool *done)
+{
+	return features;
+}
+
+static struct sk_buff *mlx5e_accel_tx_handler(struct sk_buff *skb,
+					      struct mlx5e_swp_info *swp)
+{
+	return skb;
+}
+
+static struct sk_buff *mlx5e_accel_rx_handler(struct sk_buff *skb)
+{
+	return skb;
+}
+
+static u16 mlx5e_accel_mtu_handler(u16 mtu, bool is_sw2hw)
+{
+	return mtu;
+}
+
+static int mlx5e_accel_get_count(struct net_device *netdev)
+{
+	return 0;
+}
+
+static int mlx5e_accel_get_strings(struct net_device *netdev, uint8_t *data)
+{
+	return 0;
+}
+
+static int mlx5e_accel_get_stats(struct net_device *netdev, u64 *data)
+{
+	return 0;
+}
+
+static struct mlx5e_accel_client_ops accel_ops_default = {
+	.rx_handler  = mlx5e_accel_rx_handler,
+	.tx_handler  = mlx5e_accel_tx_handler,
+	.feature_chk = mlx5e_accel_feature_chk,
+	.mtu_handler = mlx5e_accel_mtu_handler,
+	.get_count   = mlx5e_accel_get_count,
+	.get_strings = mlx5e_accel_get_strings,
+	.get_stats   = mlx5e_accel_get_stats,
+};
+
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+static inline u16 mlx5e_mtu(u16 mtu, bool is_sw2hw, struct mlx5e_priv *priv)
+{
+	u16 new_mtu;
+	struct mlx5e_accel_client_ops *accel_client_ops;
+
+	new_mtu = is_sw2hw ? MLX5E_SW2HW_MTU(mtu) : MLX5E_HW2SW_MTU(mtu);
+
+	rcu_read_lock();
+	accel_client_ops = rcu_dereference(priv->accel_client_ops);
+	new_mtu = accel_client_ops->mtu_handler(new_mtu, is_sw2hw);
+	rcu_read_unlock();
+
+	return new_mtu;
+}
+
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
@@ -295,9 +362,6 @@
 	synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
 static inline int mlx5e_get_wqe_mtt_sz(void)
 {
 	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -411,6 +475,7 @@
 	int wq_sz;
 	int err;
 	int i;
+	u16 hw_mtu;
 
 	param->wq.db_numa_node = cpu_to_node(c->cpu);
 
@@ -423,14 +488,6 @@
 
 	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 
-	rq->wq_type = priv->params.rq_wq_type;
-	rq->pdev    = c->pdev;
-	rq->netdev  = c->netdev;
-	rq->tstamp  = &priv->tstamp;
-	rq->channel = c;
-	rq->ix      = c->ix;
-	rq->priv    = c->priv;
-
 	switch (priv->params.rq_wq_type) {
 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 		rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
@@ -460,9 +517,11 @@
 		rq->alloc_wqe = mlx5e_alloc_rx_wqe;
 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
 
+		hw_mtu = mlx5e_mtu(priv->netdev->mtu, true, priv);
+
 		rq->wqe_sz = (priv->params.lro_en) ?
 				priv->params.lro_wqe_sz :
-				MLX5E_SW2HW_MTU(priv->netdev->mtu);
+				hw_mtu;
 		rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
 		byte_count = rq->wqe_sz;
 		byte_count |= MLX5_HW_START_PADDING;
@@ -850,6 +909,8 @@
 	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
 	MLX5_SET(sqc,  sqc, tis_lst_sz,		param->icosq ? 0 : 1);
 	MLX5_SET(sqc,  sqc, flush_in_error_en,	1);
+	if (MLX5_CAP_GEN(mdev, fpga) && !param->icosq)
+		MLX5_SET(sqc, sqc, allow_swp, true);
 
 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
 	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
@@ -1838,9 +1899,11 @@
 static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
-	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+	u16 hw_mtu;
 	int err;
 
+	hw_mtu = mlx5e_mtu(mtu, true, priv);
+
 	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
 	if (err)
 		return err;
@@ -1860,7 +1923,7 @@
 	if (err || !hw_mtu) /* fallback to port oper mtu */
 		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
 
-	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
+	*mtu = mlx5e_mtu(hw_mtu, false, priv);
 }
 
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
@@ -2669,7 +2732,7 @@
 
 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
-	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+	max_mtu = mlx5e_mtu(max_mtu, false, priv);
 	min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
 
 	if (new_mtu > max_mtu || new_mtu < min_mtu) {
@@ -2865,13 +2928,20 @@
 					      struct net_device *netdev,
 					      netdev_features_t features)
 {
+	struct mlx5e_accel_client_ops *accel_client_ops;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	bool done = false;
 
 	features = vlan_features_check(skb, features);
 	features = vxlan_features_check(skb, features);
 
+	rcu_read_lock();
+	accel_client_ops = rcu_dereference(priv->accel_client_ops);
+	features = accel_client_ops->feature_chk(skb, netdev, features, &done);
+	rcu_read_unlock();
+
 	/* Validate if the tunneled packet is being offloaded by HW */
-	if (skb->encapsulation &&
+	if (!done && skb->encapsulation &&
 	    (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
 		return mlx5e_vxlan_features_check(priv, skb, features);
 
@@ -3099,6 +3169,47 @@
 	}
 }
 
+int
+mlx5e_register_accel_ops(struct net_device *dev,
+			 struct mlx5e_accel_client_ops *ops)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_accel_client_ops *accel_client_ops;
+
+	WARN_ON(!ops->mtu_handler || !ops->tx_handler || !ops->rx_handler);
+	WARN_ON(!ops->get_count || !ops->get_strings || !ops->get_stats);
+	WARN_ON(!ops->feature_chk);
+
+	rcu_read_lock();
+	accel_client_ops = rcu_dereference(priv->accel_client_ops);
+	rcu_read_unlock();
+	if (accel_client_ops != &accel_ops_default) {
+		pr_err("mlx5e_register_accel_ops(): Error registering client_ops over non-default pointer\n");
+		return -EACCES;
+	}
+	rcu_assign_pointer(priv->accel_client_ops, ops);
+
+	rtnl_lock();
+	dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+	rtnl_unlock();
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5e_register_accel_ops);
+
+void mlx5e_unregister_accel_ops(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	rcu_assign_pointer(priv->accel_client_ops, &accel_ops_default);
+	synchronize_rcu();
+
+	rtnl_lock();
+	dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+	rtnl_unlock();
+}
+EXPORT_SYMBOL(mlx5e_unregister_accel_ops);
+
 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
 					struct net_device *netdev,
 					const struct mlx5e_profile *profile,
@@ -3197,6 +3308,8 @@
 	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
 	INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
 	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+	rcu_assign_pointer(priv->accel_client_ops, &accel_ops_default);
 }
 
 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -3755,11 +3868,18 @@
 {
 	const struct mlx5e_profile *profile = priv->profile;
 	struct net_device *netdev = priv->netdev;
+	struct mlx5e_accel_client_ops *accel_client_ops;
 
 	unregister_netdev(netdev);
 	destroy_workqueue(priv->wq);
 	if (profile->cleanup)
 		profile->cleanup(priv);
+
+	rcu_read_lock();
+	accel_client_ops = rcu_dereference(priv->accel_client_ops);
+	rcu_read_unlock();
+	WARN_ON(accel_client_ops != &accel_ops_default);
+
 	free_netdev(netdev);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dc86779..1a56c23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -650,9 +650,21 @@
 					 u32 cqe_bcnt,
 					 struct sk_buff *skb)
 {
+	struct mlx5e_accel_client_ops *accel_client_ops;
+
 	rq->stats.packets++;
 	rq->stats.bytes += cqe_bcnt;
 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+
+	rcu_read_lock();
+	accel_client_ops = rcu_dereference(rq->priv->accel_client_ops);
+	skb = accel_client_ops->rx_handler(skb);
+	if (!skb) {
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
 	napi_gro_receive(rq->cq.napi, skb);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index eb0e725..39369fd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -215,7 +215,8 @@
 	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
 }
 
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb,
+				 struct mlx5e_swp_info *swp_info)
 {
 	struct mlx5_wq_cyc       *wq   = &sq->wq;
 
@@ -280,6 +281,12 @@
 		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 	}
 
+	eseg->swp_outer_l3_offset = swp_info->outer_l3_ofs;
+	eseg->swp_outer_l4_offset = swp_info->outer_l4_ofs;
+	eseg->swp_inner_l3_offset = swp_info->inner_l3_ofs;
+	eseg->swp_inner_l4_offset = swp_info->inner_l4_ofs;
+	eseg->swp_flags = swp_info->swp_flags;
+
 	wi->num_bytes = num_bytes;
 
 	if (skb_vlan_tag_present(skb)) {
@@ -390,9 +397,23 @@
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+	struct mlx5e_sq *sq = NULL;
+	struct mlx5e_accel_client_ops *accel_client_ops;
+	struct mlx5e_swp_info swp_info = {0};
 
-	return mlx5e_sq_xmit(sq, skb);
+	rcu_read_lock();
+	accel_client_ops = rcu_dereference(priv->accel_client_ops);
+	skb = accel_client_ops->tx_handler(skb, &swp_info);
+	if (!skb) {
+		rcu_read_unlock();
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	rcu_read_unlock();
+
+	sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+
+	return mlx5e_sq_xmit(sq, skb, &swp_info);
 }
 
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index aaca090..e96541e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -151,11 +151,26 @@
 		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
 	case MLX5_EVENT_TYPE_PAGE_FAULT:
 		return "MLX5_EVENT_TYPE_PAGE_FAULT";
+	case MLX5_EVENT_TYPE_FPGA_ERROR:
+		return "MLX5_EVENT_TYPE_FPGA_ERROR";
+	case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+		return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
 	default:
 		return "Unrecognized event";
 	}
 }
 
+static enum mlx5_dev_event event_type_to_dev_event(u8 event_type)
+{
+	switch (event_type) {
+	case MLX5_EVENT_TYPE_FPGA_ERROR:
+		return MLX5_DEV_EVENT_FPGA_ERROR;
+	case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+		return MLX5_DEV_EVENT_FPGA_QP_ERROR;
+	}
+	return -1;
+}
+
 static enum mlx5_dev_event port_subtype_event(u8 subtype)
 {
 	switch (subtype) {
@@ -285,6 +300,13 @@
 			mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
 			break;
 #endif
+		case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+		case MLX5_EVENT_TYPE_FPGA_ERROR:
+			if (dev->event)
+				dev->event(dev,
+					   event_type_to_dev_event(eqe->type),
+					   (unsigned long)&eqe->data.raw);
+			break;
 		default:
 			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
 				       eqe->type, eq->eqn);
@@ -469,7 +491,7 @@
 int mlx5_start_eqs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
 	int err;
 
 	if (MLX5_CAP_GEN(dev, pg))
@@ -480,6 +502,10 @@
 	    mlx5_core_is_pf(dev))
 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
 
+	if (MLX5_CAP_GEN(dev, fpga))
+		async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
+				    (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
+
 	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
 				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
 				 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga.c
new file mode 100644
index 0000000..de83854
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cmd.h>
+#include <linux/etherdevice.h>
+#include "mlx5_core.h"
+
+int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
+			 u8 *buf, bool write)
+{
+	u32 in[MLX5_ST_SZ_DW(fpga_access_reg) + MLX5_FPGA_ACCESS_REG_SIZE_MAX];
+	u32 out[MLX5_ST_SZ_DW(fpga_access_reg) + MLX5_FPGA_ACCESS_REG_SIZE_MAX];
+	int err, i;
+
+	if (size & 3)
+		return -EINVAL;
+	if (addr & 3)
+		return -EINVAL;
+	if (size > MLX5_FPGA_ACCESS_REG_SIZE_MAX)
+		return -EINVAL;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_access_reg, in, size, size);
+	MLX5_SET(fpga_access_reg, in, address_h, addr >> 32);
+	MLX5_SET(fpga_access_reg, in, address_l, addr & 0xFFFFFFFF);
+	if (write) {
+		for (i = 0; i < size; ++i)
+			MLX5_SET(fpga_access_reg, in, data[i], buf[i]);
+	}
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+				   MLX5_REG_FPGA_ACCESS_REG, 0, write);
+	if (err)
+		return err;
+
+	if (!write) {
+		for (i = 0; i < size; i++)
+			buf[i] = MLX5_GET(fpga_access_reg, out, data[i]);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_access_reg);
+
+static int mlx5_fpga_ctrl_write(struct mlx5_core_dev *dev, u8 op,
+				enum mlx_accel_fpga_image image)
+{
+	u32 in[MLX5_ST_SZ_DW(fpga_ctrl)];
+	u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_ctrl, in, operation, op);
+	MLX5_SET(fpga_ctrl, in, image_select_admin, image);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+				    MLX5_REG_FPGA_CTRL, 0, true);
+}
+
+int mlx5_fpga_load(struct mlx5_core_dev *dev, enum mlx_accel_fpga_image image)
+{
+	return mlx5_fpga_ctrl_write(dev, MLX5_FPGA_CTRL_OP_LOAD, image);
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_load);
+
+int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op)
+{
+	if (op > MLX5_FPGA_CTRL_OP_IMAGE_SEL) {
+		pr_warn("Skipping undelivered FPGA_CTRL op %u\n", op);
+		return 0;
+	}
+	return mlx5_fpga_ctrl_write(dev, op, 0);
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_ctrl_op);
+
+int mlx5_fpga_image_select(struct mlx5_core_dev *dev,
+			   enum mlx_accel_fpga_image image)
+{
+	return mlx5_fpga_ctrl_write(dev, MLX5_FPGA_CTRL_OP_IMAGE_SEL, image);
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_image_select);
+
+int mlx5_fpga_query(struct mlx5_core_dev *dev,
+		    enum mlx_accel_fpga_status *status,
+		    enum mlx_accel_fpga_image *admin_image,
+		    enum mlx_accel_fpga_image *oper_image)
+{
+	u32 in[MLX5_ST_SZ_DW(fpga_ctrl)];
+	u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+				   MLX5_REG_FPGA_CTRL, 0, false);
+	if (err)
+		goto out;
+
+	if (status)
+		*status = MLX5_GET(fpga_ctrl, out, status);
+	if (admin_image)
+		*admin_image = MLX5_GET(fpga_ctrl, out, image_select_admin);
+	if (oper_image)
+		*oper_image = MLX5_GET(fpga_ctrl, out, image_select_oper);
+
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_query);
+
+static void fpga_qpc_to_mailbox(struct mlx5_fpga_qpc *fpga_qpc, u8 *in)
+{
+	u8 *dst;
+
+	MLX5_SET(fpga_qpc, in, state, fpga_qpc->state);
+	MLX5_SET(fpga_qpc, in, qp_type, fpga_qpc->qp_type);
+	MLX5_SET(fpga_qpc, in, st, fpga_qpc->st);
+	MLX5_SET(fpga_qpc, in, traffic_class, fpga_qpc->tclass);
+	MLX5_SET(fpga_qpc, in, ether_type, fpga_qpc->ether_type);
+	MLX5_SET(fpga_qpc, in, prio, fpga_qpc->pcp);
+	MLX5_SET(fpga_qpc, in, dei, fpga_qpc->dei);
+	MLX5_SET(fpga_qpc, in, vid, fpga_qpc->vlan_id);
+	MLX5_SET(fpga_qpc, in, next_rcv_psn, fpga_qpc->next_rcv_psn);
+	MLX5_SET(fpga_qpc, in, next_send_psn, fpga_qpc->next_send_psn);
+	MLX5_SET(fpga_qpc, in, pkey, fpga_qpc->pkey);
+	MLX5_SET(fpga_qpc, in, remote_qpn, fpga_qpc->remote_qpn);
+	MLX5_SET(fpga_qpc, in, rnr_retry, fpga_qpc->rnr_retry);
+	MLX5_SET(fpga_qpc, in, retry_count, fpga_qpc->retry_count);
+
+	dst = MLX5_ADDR_OF(fpga_qpc, in, remote_mac_47_32);
+	ether_addr_copy(dst, fpga_qpc->remote_mac);
+	dst = MLX5_ADDR_OF(fpga_qpc, in, remote_ip);
+	memcpy(dst, &fpga_qpc->remote_ip, sizeof(struct in6_addr));
+	dst = MLX5_ADDR_OF(fpga_qpc, in, fpga_mac_47_32);
+	ether_addr_copy(dst, fpga_qpc->fpga_mac);
+	dst = MLX5_ADDR_OF(fpga_qpc, in, fpga_ip);
+	memcpy(dst, &fpga_qpc->fpga_ip, sizeof(struct in6_addr));
+}
+
+static void fpga_qpc_from_mailbox(struct mlx5_fpga_qpc *fpga_qpc, u8 *out)
+{
+	u8 *src;
+
+	fpga_qpc->state = MLX5_GET(fpga_qpc, out, state);
+	fpga_qpc->qp_type = MLX5_GET(fpga_qpc, out, qp_type);
+	fpga_qpc->st = MLX5_GET(fpga_qpc, out, st);
+	fpga_qpc->tclass = MLX5_GET(fpga_qpc, out, traffic_class);
+	fpga_qpc->ether_type = MLX5_GET(fpga_qpc, out, ether_type);
+	fpga_qpc->pcp = MLX5_GET(fpga_qpc, out, prio);
+	fpga_qpc->dei = MLX5_GET(fpga_qpc, out, dei);
+	fpga_qpc->vlan_id = MLX5_GET(fpga_qpc, out, vid);
+	fpga_qpc->next_rcv_psn = MLX5_GET(fpga_qpc, out, next_rcv_psn);
+	fpga_qpc->next_send_psn = MLX5_GET(fpga_qpc, out, next_send_psn);
+	fpga_qpc->pkey = MLX5_GET(fpga_qpc, out, pkey);
+	fpga_qpc->remote_qpn = MLX5_GET(fpga_qpc, out, remote_qpn);
+	fpga_qpc->rnr_retry = MLX5_GET(fpga_qpc, out, rnr_retry);
+	fpga_qpc->retry_count = MLX5_GET(fpga_qpc, out, retry_count);
+
+	src = MLX5_ADDR_OF(fpga_qpc, out, remote_mac_47_32);
+	ether_addr_copy(fpga_qpc->remote_mac, src);
+	src = MLX5_ADDR_OF(fpga_qpc, out, remote_ip);
+	memcpy(&fpga_qpc->remote_ip, src, sizeof(struct in6_addr));
+	src = MLX5_ADDR_OF(fpga_qpc, out, fpga_mac_47_32);
+	ether_addr_copy(fpga_qpc->fpga_mac, src);
+	src = MLX5_ADDR_OF(fpga_qpc, out, fpga_ip);
+	memcpy(&fpga_qpc->fpga_ip, src, sizeof(struct in6_addr));
+}
+
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev,
+			struct mlx5_fpga_qpc *fpga_qpc, u32 *fpga_qpn)
+{
+	int ret;
+	u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)];
+	u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
+	fpga_qpc_to_mailbox(fpga_qpc,
+			    MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc));
+
+	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (ret)
+		goto out;
+
+	fpga_qpc_from_mailbox(fpga_qpc,
+			      MLX5_ADDR_OF(fpga_create_qp_out, out, fpga_qpc));
+	*fpga_qpn = MLX5_GET(fpga_create_qp_out, out, fpga_qpn);
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_create_qp);
+
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+			enum mlx5_fpga_qpc_field_select fields,
+			struct mlx5_fpga_qpc *fpga_qpc)
+{
+	u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)];
+	u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
+	MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
+	MLX5_SET(fpga_modify_qp_in, in, fpga_qpn, fpga_qpn);
+	fpga_qpc_to_mailbox(fpga_qpc,
+			    MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc));
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_modify_qp);
+
+int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
+		       u32 fpga_qpn, struct mlx5_fpga_qpc *fpga_qpc)
+{
+	int ret;
+	u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)];
+	u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
+	MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
+
+	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (ret)
+		goto out;
+
+	fpga_qpc_from_mailbox(fpga_qpc,
+			      MLX5_ADDR_OF(fpga_query_qp_out, out, fpga_qpc));
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_query_qp);
+
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
+{
+	u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)];
+	u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
+	MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_destroy_qp);
+
+int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
+				bool clear, struct mlx5_fpga_qp_counters *data)
+{
+	int ret;
+	u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)];
+	u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(fpga_query_qp_counters_in, in, opcode,
+		 MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS);
+	MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
+	MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
+
+	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (ret)
+		goto out;
+
+	data->rx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+					  rx_ack_packets);
+	data->rx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+					   rx_send_packets);
+	data->tx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+					  tx_ack_packets);
+	data->tx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+					   tx_send_packets);
+	data->rx_total_drop = MLX5_GET64(fpga_query_qp_counters_out, out,
+					 rx_total_drop);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_query_qp_counters);
+
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+{
+	int err;
+	u32 in[MLX5_ST_SZ_DW(fpga_cap)];
+	u32 out[MLX5_ST_SZ_DW(fpga_cap)];
+
+	memset(in, 0, sizeof(in));
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+				   MLX5_REG_FPGA_CAP, 0, 0);
+	if (err)
+		return err;
+
+	memcpy(caps, out, sizeof(out));
+#ifdef DEBUG
+	print_hex_dump_bytes("FPGA caps ", DUMP_PREFIX_OFFSET, out,
+			     sizeof(out));
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_fpga_caps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9..eb6293c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -96,6 +96,7 @@
 u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
 void mlx5_cq_tasklet_cb(unsigned long data);
+int mlx5_fpga_get_caps(struct mlx5_core_dev *dev);
 
 void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
 void mlx5_lag_remove(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/accel/accel_sdk.h b/include/linux/mlx5/accel/accel_sdk.h
new file mode 100644
index 0000000..01d1a45
--- /dev/null
+++ b/include/linux/mlx5/accel/accel_sdk.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef SDK_H
+#define SDK_H
+
+enum mlx_accel_access_type {
+	MLX_ACCEL_ACCESS_TYPE_I2C = 0x0,
+	MLX_ACCEL_ACCESS_TYPE_RDMA,
+	MLX_ACCEL_ACCESS_TYPE_DONTCARE,
+	MLX_ACCEL_ACCESS_TYPE_MAX = MLX_ACCEL_ACCESS_TYPE_DONTCARE,
+};
+
+enum mlx_accel_fpga_image {
+	MLX_ACCEL_IMAGE_USER = 0x0,
+	MLX_ACCEL_IMAGE_FACTORY,
+	MLX_ACCEL_IMAGE_MAX = MLX_ACCEL_IMAGE_FACTORY,
+};
+
+enum mlx_accel_fpga_status {
+	MLX_ACCEL_FPGA_STATUS_SUCCESS = 0,
+	MLX_ACCEL_FPGA_STATUS_FAILURE = 1,
+	MLX_ACCEL_FPGA_STATUS_IN_PROGRESS = 2,
+	MLX_ACCEL_FPGA_STATUS_NONE = 0xFFFF,
+};
+
+#endif /* SDK_H */
diff --git a/include/linux/mlx5/accel/tools_chardev.h b/include/linux/mlx5/accel/tools_chardev.h
new file mode 100644
index 0000000..20647ec
--- /dev/null
+++ b/include/linux/mlx5/accel/tools_chardev.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef TOOLS_CHARDEV_H
+#define TOOLS_CHARDEV_H
+
+#include <linux/ioctl.h>
+#include "accel_sdk.h"
+
+#define MLX_ACCEL_TOOLS_NAME_SUFFIX "_accel_tools"
+
+struct mlx_accel_fpga_query {
+	enum mlx_accel_fpga_image  admin_image;
+	enum mlx_accel_fpga_image  oper_image;
+	enum mlx_accel_fpga_status status;
+};
+
+/* Set the memory access type */
+#define IOCTL_ACCESS_TYPE    _IOW('m', 0x80, enum mlx_accel_access_type)
+/* Load FPGA image from flash */
+#define IOCTL_FPGA_LOAD      _IOW('m', 0x81, enum mlx_accel_fpga_image)
+/* Reset FPGA hardware logic */
+#define IOCTL_FPGA_RESET      _IO('m', 0x82)
+/* Select image for next reset or power-on */
+#define IOCTL_FPGA_IMAGE_SEL _IOW('m', 0x83, enum mlx_accel_fpga_image)
+/* Query selected and running images */
+#define IOCTL_FPGA_QUERY     _IOR('m', 0x84, struct mlx_accel_fpga_query *)
+
+#endif /* TOOLS_CHARDEV_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 77c1417..af28438 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -278,6 +278,9 @@
 
 	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
 	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
+
+	MLX5_EVENT_TYPE_FPGA_ERROR         = 0x20,
+	MLX5_EVENT_TYPE_FPGA_QP_ERROR      = 0x21,
 };
 
 enum {
@@ -846,6 +849,15 @@
 	MLX5_L3_PROT_TYPE_IPV6		= 1,
 };
 
+#define MLX5_CAP_FPGA(adev, cap) \
+	MLX5_GET(fpga_cap, adev->fpga_caps, cap)
+
+#define MLX5_CAP64_FPGA(adev, cap) \
+	MLX5_GET64(fpga_cap, adev->fpga_caps, cap)
+
+#define MLX5_CAP_FPGA_SHELL(adev, cap) \
+	MLX5_GET(fpga_cap, adev->fpga_caps, shell_caps.cap)
+
 enum {
 	MLX5_L4_PROT_TYPE_TCP		= 0,
 	MLX5_L4_PROT_TYPE_UDP		= 1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786..0543d26 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -47,6 +47,7 @@
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
 #include <linux/mlx5/srq.h>
+#include <linux/mlx5/accel/accel_sdk.h>
 
 enum {
 	MLX5_BOARD_ID_LEN = 64,
@@ -104,6 +105,9 @@
 enum {
 	MLX5_REG_QETCR		 = 0x4005,
 	MLX5_REG_QTCT		 = 0x400a,
+	MLX5_REG_FPGA_CAP			 = 0x4022,
+	MLX5_REG_FPGA_CTRL			 = 0x4023,
+	MLX5_REG_FPGA_ACCESS_REG	 = 0x4024,
 	MLX5_REG_PCAP		 = 0x5001,
 	MLX5_REG_PMTU		 = 0x5003,
 	MLX5_REG_PTYS		 = 0x5004,
@@ -163,6 +167,8 @@
 	MLX5_DEV_EVENT_PKEY_CHANGE,
 	MLX5_DEV_EVENT_GUID_CHANGE,
 	MLX5_DEV_EVENT_CLIENT_REREG,
+	MLX5_DEV_EVENT_FPGA_ERROR,
+	MLX5_DEV_EVENT_FPGA_QP_ERROR,
 };
 
 enum mlx5_port_status {
@@ -712,6 +718,54 @@
 	bool			grh_required;
 };
 
+enum mlx5_fpga_qp_state {
+	MLX5_FPGA_QP_STATE_INIT = 0,
+	MLX5_FPGA_QP_STATE_ACTIVE = 1,
+	MLX5_FPGA_QP_STATE_ERROR = 2,
+};
+
+enum mlx5_fpga_qp_type {
+	MLX5_FPGA_QP_TYPE_SHELL = 0,
+	MLX5_FPGA_QP_TYPE_SANDBOX = 1,
+};
+
+enum mlx5_fpga_qp_service_type {
+	MLX5_FPGA_QP_SERVICE_TYPE_RC = 0,
+};
+
+enum mlx5_fpga_qpc_field_select {
+	MLX5_FPGA_QPC_STATE = BIT(0),
+};
+
+struct mlx5_fpga_qpc {
+	enum mlx5_fpga_qp_state		state;
+	enum mlx5_fpga_qp_type		qp_type;
+	enum mlx5_fpga_qp_service_type	st;
+	u8				tclass;
+	u16				ether_type;
+	u8				pcp;
+	u8				dei;
+	u16				vlan_id;
+	u32				next_rcv_psn;
+	u32				next_send_psn;
+	u16				pkey;
+	u32				remote_qpn;
+	u8				rnr_retry;
+	u8				retry_count;
+	u8				remote_mac[ETH_ALEN];
+	struct in6_addr			remote_ip;
+	u8				fpga_mac[ETH_ALEN];
+	struct in6_addr			fpga_ip;
+};
+
+struct mlx5_fpga_qp_counters {
+	u64 rx_ack_packets;
+	u64 rx_send_packets;
+	u64 tx_ack_packets;
+	u64 tx_send_packets;
+	u64 rx_total_drop;
+};
+
 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
 {
 		return buf->direct.buf + offset;
@@ -893,6 +947,28 @@
 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
 
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
+			 u8 *buf, bool write);
+int mlx5_fpga_load(struct mlx5_core_dev *dev, enum mlx_accel_fpga_image image);
+int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
+int mlx5_fpga_image_select(struct mlx5_core_dev *dev,
+			   enum mlx_accel_fpga_image image);
+int mlx5_fpga_query(struct mlx5_core_dev *dev,
+		    enum mlx_accel_fpga_status *status,
+		    enum mlx_accel_fpga_image *admin_image,
+		    enum mlx_accel_fpga_image *oper_image);
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev,
+			struct mlx5_fpga_qpc *fpga_qpc, u32 *fpga_qpn);
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+			enum mlx5_fpga_qpc_field_select fields,
+			struct mlx5_fpga_qpc *fpga_qpc);
+int mlx5_fpga_query_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+		       struct mlx5_fpga_qpc *fpga_qpc);
+int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
+				bool clear, struct mlx5_fpga_qp_counters *data);
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn);
+
 static inline int fw_initializing(struct mlx5_core_dev *dev)
 {
 	return ioread32be(&dev->iseg->initializing) >> 31;
diff --git a/include/linux/mlx5/en_driver.h b/include/linux/mlx5/en_driver.h
new file mode 100644
index 0000000..c666868
--- /dev/null
+++ b/include/linux/mlx5/en_driver.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef MLX5_EN_DRIVER_H
+#define MLX5_EN_DRIVER_H
+
+struct mlx5e_swp_info {
+	u8 outer_l4_ofs;
+	u8 outer_l3_ofs;
+	u8 inner_l4_ofs;
+	u8 inner_l3_ofs;
+	u8 swp_flags;
+};
+
+struct mlx5e_accel_client_ops {
+	struct sk_buff  *(*rx_handler)(struct sk_buff *skb);
+	struct sk_buff  *(*tx_handler)(struct sk_buff *skb,
+				       struct mlx5e_swp_info *swp);
+	netdev_features_t (*feature_chk)(struct sk_buff *skb,
+					 struct net_device *netdev,
+					 netdev_features_t features,
+					 bool *done);
+	u16 (*mtu_handler)(u16 mtu, bool hw_sw_);
+	int (*get_count)(struct net_device *netdev);
+	int (*get_strings)(struct net_device *netdev, uint8_t *data);
+	int (*get_stats)(struct net_device *netdev, u64 *data);
+};
+
+int
+mlx5e_register_accel_ops(struct net_device *netdev,
+			struct mlx5e_accel_client_ops *client_ops);
+void mlx5e_unregister_accel_ops(struct net_device *netdev);
+#endif /* MLX5_EN_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6045d4d..cfe81d6 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -56,7 +56,9 @@
 	MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT                      = 0x1b,
 	MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT         = 0x1f,
 	MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION        = 0xa,
-	MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb
+	MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb,
+	MLX5_EVENT_TYPE_CODING_FPGA_ERROR                          = 0x20,
+	MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR                       = 0x21
 };
 
 enum {
@@ -220,6 +222,11 @@
 	MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
 	MLX5_CMD_OP_ALLOC_ENCAP_HEADER            = 0x93d,
 	MLX5_CMD_OP_DEALLOC_ENCAP_HEADER          = 0x93e,
+	MLX5_CMD_OP_FPGA_CREATE_QP                = 0x960,
+	MLX5_CMD_OP_FPGA_MODIFY_QP                = 0x961,
+	MLX5_CMD_OP_FPGA_QUERY_QP                 = 0x962,
+	MLX5_CMD_OP_FPGA_DESTROY_QP               = 0x963,
+	MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS        = 0x964,
 	MLX5_CMD_OP_MAX
 };
 
@@ -818,7 +825,8 @@
 	u8         max_tc[0x4];
 	u8         reserved_at_1d0[0x1];
 	u8         dcbx[0x1];
-	u8         reserved_at_1d2[0x4];
+	u8         reserved_at_1d2[0x3];
+	u8         fpga[0x1];
 	u8         rol_s[0x1];
 	u8         rol_g[0x1];
 	u8         reserved_at_1d8[0x1];
@@ -2311,7 +2319,8 @@
 	u8	   min_wqe_inline_mode[0x3];
 	u8         state[0x4];
 	u8         reg_umr[0x1];
-	u8         reserved_at_d[0x13];
+	u8         allow_swp[0x1];
+	u8         reserved_at_d[0x12];
 
 	u8         reserved_at_20[0x8];
 	u8         user_index[0x18];
@@ -7601,6 +7610,132 @@
 	u8         reserved_at_80a0[0x17fc0];
 };
 
+#define MLX5_FPGA_ACCESS_REG_SIZE_MAX 64
+
+struct mlx5_ifc_fpga_access_reg_bits {
+	u8         reserved_auto1[0x00020];
+
+	u8         reserved_auto2[0x00010];
+	u8         size[0x00010];
+
+	u8         address_h[0x00020];
+
+	u8         address_l[0x00020];
+
+	u8         data[MLX5_FPGA_ACCESS_REG_SIZE_MAX][0x00008];
+};
+
+enum {
+	MLX5_FPGA_CTRL_OP_LOAD = 1,
+	MLX5_FPGA_CTRL_OP_RESET = 2,
+	MLX5_FPGA_CTRL_OP_IMAGE_SEL = 3,
+	MLX5_FPGA_CTRL_OP_SB_BYPASS_ON = 4,
+	MLX5_FPGA_CTRL_OP_SB_BYPASS_OFF = 5,
+	MLX5_FPGA_CTRL_OP_RESET_SB = 6,
+};
+
+struct mlx5_ifc_fpga_ctrl_bits {
+	u8         reserved_auto1[0x00008];
+	u8         operation[0x00008];
+	u8         reserved_auto2[0x00008];
+	u8         status[0x00008];
+
+	u8         reserved_auto3[0x00008];
+	u8         image_select_admin[0x00008];
+	u8         reserved_auto4[0x00008];
+	u8         image_select_oper[0x00008];
+
+	u8         reserved_auto5[0x00040];
+};
+
+enum {
+	MLX5_FPGA_SHELL_CAPS_QP_TYPE_SHELL_QP    = 0x1,
+	MLX5_FPGA_SHELL_CAPS_QP_TYPE_SANDBOX_QP  = 0x2,
+};
+
+struct mlx5_ifc_fpga_shell_caps_bits {
+	u8         max_num_qps[0x10];
+	u8         reserved_0[0x8];
+	u8         total_rcv_credits[0x8];
+
+	u8         reserved_1[0xe];
+	u8         qp_type[0x2];
+	u8         reserved_2[0x5];
+	u8         rae[0x1];
+	u8         rwe[0x1];
+	u8         rre[0x1];
+	u8         reserved_3[0x4];
+	u8         dc[0x1];
+	u8         ud[0x1];
+	u8         uc[0x1];
+	u8         rc[0x1];
+
+	u8         reserved_4[0x1a];
+	u8         log_ddr_size[0x6];
+
+	u8         max_fpga_qp_msg_size[0x20];
+
+	u8         reserved_5[0x180];
+};
+
+enum {
+	MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_UNKNOWN    = 0x0,
+	MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_EXAMPLE    = 0x1,
+	MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC      = 0x2,
+	MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS        = 0x3,
+};
+
+struct mlx5_ifc_fpga_cap_bits {
+	u8         fpga_id[0x8];
+	u8         fpga_device[0x18];
+
+	u8         register_file_ver[0x20];
+
+	u8         fpga_ctrl_modify[0x1];
+	u8         reserved_0[0x5];
+	u8         access_reg_query_mode[0x2];
+	u8         reserved_1[0x6];
+	u8         access_reg_modify_mode[0x2];
+	u8         reserved_2[0x10];
+
+	u8         reserved_3[0x20];
+
+	u8         image_version[0x20];
+
+	u8         image_date[0x20];
+
+	u8         image_time[0x20];
+
+	u8         shell_version[0x20];
+
+	u8         reserved_4[0x80];
+
+	struct mlx5_ifc_fpga_shell_caps_bits shell_caps;
+
+	u8         reserved_5[0x8];
+	u8         ieee_vendor_id[0x18];
+
+	u8         sandbox_product_version[0x10];
+	u8         sandbox_product_id[0x10];
+
+	u8         sandbox_basic_caps[0x20];
+
+	u8         reserved_6[0x10];
+	u8         sandbox_extended_caps_len[0x10];
+
+	u8         sandbox_extended_caps_addr[0x40];
+
+	u8         fpga_ddr_start_addr[0x40];
+
+	u8         fpga_cr_space_start_addr[0x40];
+
+	u8         fpga_ddr_size[0x20];
+
+	u8         fpga_cr_space_size[0x20];
+
+	u8         reserved_7[0x300];
+};
+
 union mlx5_ifc_ports_control_registers_document_bits {
 	struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
 	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -7645,6 +7780,9 @@
 	struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
 	struct mlx5_ifc_slrg_reg_bits slrg_reg;
 	struct mlx5_ifc_sltp_reg_bits sltp_reg;
+	struct mlx5_ifc_fpga_access_reg_bits fpga_access_reg;
+	struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits;
+	struct mlx5_ifc_fpga_cap_bits fpga_cap_bits;
 	u8         reserved_at_0[0x60e0];
 };
 
@@ -7971,4 +8109,323 @@
 	u8         reserved_at_40[0x40];
 };
 
+struct mlx5_ifc_fpga_qpc_bits {
+	u8         state[0x00004];
+	u8         reserved_auto1[0x0001b];
+	u8         qp_type[0x00001];
+
+	u8         reserved_auto2[0x00004];
+	u8         st[0x00004];
+	u8         reserved_auto3[0x00010];
+	u8         traffic_class[0x00008];
+
+	u8         ether_type[0x00010];
+	u8         prio[0x00003];
+	u8         dei[0x00001];
+	u8         vid[0x0000c];
+
+	u8         reserved_auto4[0x00020];
+
+	u8         reserved_auto5[0x00008];
+	u8         next_rcv_psn[0x00018];
+
+	u8         reserved_auto6[0x00008];
+	u8         next_send_psn[0x00018];
+
+	u8         reserved_auto7[0x00010];
+	u8         pkey[0x00010];
+
+	u8         reserved_auto8[0x00008];
+	u8         remote_qpn[0x00018];
+
+	u8         reserved_auto9[0x00015];
+	u8         rnr_retry[0x00003];
+	u8         reserved_auto10[0x00005];
+	u8         retry_count[0x00003];
+
+	u8         reserved_auto11[0x00020];
+
+	u8         reserved_auto12[0x00010];
+	u8         remote_mac_47_32[0x00010];
+
+	u8         remote_mac_31_0[0x00020];
+
+	u8         remote_ip[16][0x00008];
+
+	u8         reserved_auto13[0x00040];
+
+	u8         reserved_auto14[0x00010];
+	u8         fpga_mac_47_32[0x00010];
+
+	u8         fpga_mac_31_0[0x00020];
+
+	u8         fpga_ip[16][0x00008];
+};
+
+struct mlx5_ifc_fpga_query_qp_out_bits {
+	u8         status[0x00008];
+	u8         reserved_auto1[0x00018];
+
+	u8         syndrome[0x00020];
+
+	u8         reserved_auto2[0x00040];
+
+	struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
+};
+
+struct mlx5_ifc_fpga_query_qp_in_bits {
+	u8         opcode[0x00010];
+	u8         reserved_auto1[0x00010];
+
+	u8         reserved_auto2[0x00010];
+	u8         op_mod[0x00010];
+
+	u8         reserved_auto3[0x00008];
+	u8         fpga_qpn[0x00018];
+
+	u8         reserved_auto4[0x00020];
+};
+
+struct mlx5_ifc_fpga_modify_qp_out_bits {
+	u8         status[0x00008];
+	u8         reserved_auto1[0x00018];
+
+	u8         syndrome[0x00020];
+
+	u8         reserved_auto2[0x00040];
+};
+
+struct mlx5_ifc_fpga_modify_qp_in_bits {
+	u8         opcode[0x00010];
+	u8         reserved_auto1[0x00010];
+
+	u8         reserved_auto2[0x00010];
+	u8         op_mod[0x00010];
+
+	u8         reserved_auto3[0x00008];
+	u8         fpga_qpn[0x00018];
+
+	u8         field_select[0x00020];
+
+	struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
+};
+
+struct mlx5_ifc_fpga_destroy_qp_out_bits {
+	u8         status[0x00008];
+	u8         reserved_auto1[0x00018];
+
+	u8         syndrome[0x00020];
+
+	u8         reserved_auto2[0x00040];
+};
+
+struct mlx5_ifc_fpga_destroy_qp_in_bits {
+	u8         opcode[0x00010];
+	u8         reserved_auto1[0x00010];
+
+	u8         reserved_auto2[0x00010];
+	u8         op_mod[0x00010];
+
+	u8         reserved_auto3[0x00008];
+	u8         fpga_qpn[0x00018];
+
+	u8         reserved_auto4[0x00020];
+};
+
+struct mlx5_ifc_fpga_create_qp_out_bits {
+	u8         status[0x00008];
+	u8         reserved_auto1[0x00018];
+
+	u8         syndrome[0x00020];
+
+	u8         reserved_auto2[0x00008];
+	u8         fpga_qpn[0x00018];
+
+	u8         reserved_auto3[0x00020];
+
+	struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
+};
+
+struct mlx5_ifc_fpga_create_qp_in_bits {
+	u8         opcode[0x00010];
+	u8         reserved_auto1[0x00010];
+
+	u8         reserved_auto2[0x00010];
+	u8         op_mod[0x00010];
+
+	u8         reserved_auto3[0x00040];
+
+	struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
+};
+
+struct mlx5_ifc_fpga_query_qp_counters_out_bits {
+	u8                              status[0x00008];
+	u8                              reserved_auto1[0x00018];
+
+	u8                              syndrome[0x00020];
+
+	u8                              reserved_auto2[0x00040];
+
+	struct mlx5_ifc_uint64_bits     rx_ack_packets;
+
+	struct mlx5_ifc_uint64_bits     rx_send_packets;
+
+	struct mlx5_ifc_uint64_bits     tx_ack_packets;
+
+	struct mlx5_ifc_uint64_bits     tx_send_packets;
+
+	struct mlx5_ifc_uint64_bits     rx_total_drop;
+
+	u8                              reserved_auto3[0x001c0];
+};
+
+struct mlx5_ifc_fpga_query_qp_counters_in_bits {
+	u8      opcode[0x00010];
+	u8      reserved_auto1[0x00010];
+
+	u8      reserved_auto2[0x00010];
+	u8      op_mod[0x00010];
+
+	u8      clear[0x00001];
+	u8      reserved_auto3[0x00007];
+	u8      fpga_qpn[0x00018];
+
+	u8      reserved_auto4[0x00020];
+};
+
+struct mlx5_ifc_fpga_shell_counters_bits {
+	u8         reserved_0[0x20];
+
+	u8         clear[0x1];
+	u8         reserved_1[0x1f];
+
+	u8         reserved_2[0x40];
+
+	u8         ddr_read_requests[0x40];
+
+	u8         ddr_write_requests[0x40];
+
+	u8         ddr_read_bytes[0x40];
+
+	u8         ddr_write_bytes[0x40];
+
+	u8         reserved_3[0x200];
+};
+
+enum {
+	MLX5_FPGA_MSG_READ = 0,
+	MLX5_FPGA_MSG_WRITE = 1,
+	MLX5_FPGA_MSG_READ_RESPONSE = 2,
+	MLX5_FPGA_MSG_WRITE_RESPONSE = 3,
+};
+
+struct mlx5_ifc_fpga_shell_qp_packet_bits {
+	u8         version[0x4];
+	u8         syndrome[0x4];
+	u8         reserved_0[0x4];
+	u8         type[0x4];
+	u8         reserved_1[0x8];
+	u8         tid[0x8];
+
+	u8         len[0x20];
+
+	u8         address_h[0x20];
+
+	u8         address_l[0x20];
+
+	u8         data[0][0x8];
+};
+
+struct mlx5_ifc_ipsec_extended_cap_bits {
+	u8         encapsulation[0x20];
+
+	u8         reserved_0[0x15];
+	u8         ipv4_fragment[0x1];
+	u8         ipv6[0x1];
+	u8         esn[0x1];
+	u8         lso[0x1];
+	u8         transport_and_tunnel_mode[0x1];
+	u8         tunnel_mode[0x1];
+	u8         transport_mode[0x1];
+	u8         ah_esp[0x1];
+	u8         esp[0x1];
+	u8         ah[0x1];
+	u8         ipv4_options[0x1];
+
+	u8         auth_alg[0x20];
+
+	u8         enc_alg[0x20];
+
+	u8         sa_cap[0x20];
+
+	u8         reserved_1[0x10];
+	u8         number_of_ipsec_counters[0x10];
+
+	u8         ipsec_counters_start_addr[0x20];
+};
+
+struct mlx5_ifc_ipsec_counters_bits {
+	u8         dec_in_packets[0x40];
+
+	u8         dec_out_packets[0x40];
+
+	u8         dec_bypass_packets[0x40];
+
+	u8         enc_in_packets[0x40];
+
+	u8         enc_out_packets[0x40];
+
+	u8         enc_bypass_packets[0x40];
+
+	u8         drop_dec_packets[0x40];
+
+	u8         failed_auth_dec_packets[0x40];
+
+	u8         drop_enc_packets[0x40];
+
+	u8         success_add_sa[0x40];
+
+	u8         fail_add_sa[0x40];
+
+	u8         success_delete_sa[0x40];
+
+	u8         fail_delete_sa[0x40];
+
+	u8         dropped_cmd[0x40];
+};
+
+enum {
+	MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED  = 0x1,
+	MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED            = 0x2,
+};
+
+struct mlx5_ifc_fpga_qp_error_event_bits {
+	u8         reserved_0[0x40];
+
+	u8         reserved_1[0x18];
+	u8         syndrome[0x8];
+
+	u8         reserved_2[0x60];
+
+	u8         reserved_3[0x8];
+	u8         fpga_qpn[0x18];
+};
+
+enum {
+	MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR        = 0x1,
+	MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT        = 0x2,
+	MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR  = 0x3,
+	MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_HW_FAILURE  = 0x4,
+	MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE          = 0x5,
+	MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED        = 0x6,
+};
+
+struct mlx5_ifc_fpga_error_event_bits {
+	u8         reserved_0[0x40];
+
+	u8         reserved_1[0x18];
+	u8         syndrome[0x8];
+
+	u8         reserved_2[0x80];
+};
 #endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 0aacb2a..dc093484 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -223,10 +223,20 @@
 	MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
 };
 
+enum {
+	MLX5_ETH_WQE_SWP_INNER_L3_IPV6  = 1 << 0,
+	MLX5_ETH_WQE_SWP_INNER_L4_UDP   = 1 << 1,
+	MLX5_ETH_WQE_SWP_OUTER_L3_IPV6  = 1 << 4,
+	MLX5_ETH_WQE_SWP_OUTER_L4_UDP   = 1 << 5,
+};
+
 struct mlx5_wqe_eth_seg {
-	u8              rsvd0[4];
+	u8              swp_outer_l4_offset;
+	u8              swp_outer_l3_offset;
+	u8              swp_inner_l4_offset;
+	u8              swp_inner_l3_offset;
 	u8              cs_flags;
-	u8              rsvd1;
+	u8              swp_flags;
 	__be16          mss;
 	__be32          rsvd2;
 	__be16          inline_hdr_sz;
diff --git a/include/linux/mlx5_ib/driver.h b/include/linux/mlx5_ib/driver.h
new file mode 100644
index 0000000..6ef6d4f
--- /dev/null
+++ b/include/linux/mlx5_ib/driver.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_IB_DRIVER_H
+#define MLX5_IB_DRIVER_H
+
+#include <rdma/ib_verbs.h>
+
+bool mlx5_ib_is_gid_reserved(struct ib_device *ib_dev, u8 port, int index);
+int mlx5_ib_reserved_gid_add(struct ib_device *ib_dev, u8 port,
+			     enum ib_gid_type gid_type, union ib_gid *gid,
+			     u8 *mac, bool vlan, u16 vlan_id, int *gid_index);
+void mlx5_ib_reserved_gid_del(struct ib_device *ib_dev, u8 port, int gid_index);
+
+#endif /* MLX5_IB_DRIVER_H */