Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
 "This patch consists of the usual driver updates (megaraid_sas,
  scsi_debug, qla2xxx, qla4xxx, lpfc, bnx2fc, be2iscsi, hpsa, ipr) plus
  an assortment of minor fixes and the first precursors of SCSI-MQ (the
  code path simplifications) and the bug fix for the USB oops on remove
  (which involves an infrastructure change, so is sent via the main tree
  with a delayed backport after a cycle in which it is shown to
  introduce no new bugs)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (196 commits)
  [SCSI] sd: Quiesce mode sense error messages
  [SCSI] add support for per-host cmd pools
  [SCSI] simplify command allocation and freeing a bit
  [SCSI] megaraid: simplify internal command handling
  [SCSI] ses: Use vpd information from scsi_device
  [SCSI] Add EVPD page 0x83 and 0x80 to sysfs
  [SCSI] Return VPD page length in scsi_vpd_inquiry()
  [SCSI] scsi_sysfs: Implement 'is_visible' callback
  [SCSI] hpsa: update driver version to 3.4.4-1
  [SCSI] hpsa: fix bad endif placement in RAID 5 mapper code
  [SCSI] qla2xxx: Fix build errors related to invalid print fields on some architectures.
  [SCSI] bfa: Replace large udelay() with mdelay()
  [SCSI] vmw_pvscsi: Some improvements in pvscsi driver.
  [SCSI] vmw_pvscsi: Add support for I/O requests coalescing.
  [SCSI] vmw_pvscsi: Fix pvscsi_abort() function.
  [SCSI] remove deprecated IRQF_DISABLED from SCSI
  [SCSI] bfa: Updating Maintainers email ids
  [SCSI] ipr: Add new CCIN definition for Grand Canyon support
  [SCSI] ipr: Format HCAM overlay ID 0x21
  [SCSI] ipr: Use pci_enable_msi_range() and pci_enable_msix_range()
  ...
diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host
index 29a4f89..0eb255e 100644
--- a/Documentation/ABI/testing/sysfs-class-scsi_host
+++ b/Documentation/ABI/testing/sysfs-class-scsi_host
@@ -11,3 +11,19 @@
 		guaranteed.  The 'isci_id' attribute unambiguously identifies
 		the controller index: '0' for the first controller,
 		'1' for the second.
+
+What:		/sys/class/scsi_host/hostX/acciopath_status
+Date:		November 2013
+Contact:	Stephen M. Cameron <scameron@beardog.cce.hp.com>
+Description:	This file contains the current status of the "SSD Smart Path"
+		feature of HP Smart Array RAID controllers using the hpsa
+		driver.  SSD Smart Path, when enabled permits the driver to
+		send i/o requests directly to physical devices that are part
+		of a logical drive, bypassing the controllers firmware RAID
+		stack for a performance advantage when possible.  A value of
+		'1' indicates the feature is enabled, and the controller may
+		use the direct i/o path to physical devices.  A value of zero
+		means the feature is disabled and the controller may not use
+		the direct i/o path to physical devices.  This setting is
+		controller wide, affecting all configured logical drives on the
+		controller.  This file is readable and writable.
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 6edaa65..91ba58e 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,16 @@
+Release Date    : Mon. Mar 10, 2014 17:00:00 PST 2014 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+			Kashyap Desai
+			Sumit Saxena
+Current Version : 06.803.01.00-rc1
+Old Version     : 06.700.06.00-rc1
+    1. Load correct raid context timeout value for multipathing & clustering.
+    2. Fix megasas_ioc_init_fusion to use local stack variable.
+    3. Return leaked MPT frames to MPT command pool.
+    4. Add Dell PowerEdge VRTX SR-IOV VF device support.
+    5. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Sat. Aug 31, 2013 17:00:00 PST 2013 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
diff --git a/MAINTAINERS b/MAINTAINERS
index 7fe5977..c1b982c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1921,8 +1921,8 @@
 F:	include/linux/bcma/
 
 BROCADE BFA FC SCSI DRIVER
-M:	Anil Gurumurthy <agurumur@brocade.com>
-M:	Vijaya Mohan Guvva <vmohan@brocade.com>
+M:	Anil Gurumurthy <anil.gurumurthy@qlogic.com>
+M:	Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/bfa/
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 1e9d6ad..bcd2238 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -584,7 +584,7 @@
 	NCR5380_setup(instance);
 
 	for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
-		if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0))
+		if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
 			trying_irqs |= mask;
 
 	timeout = jiffies + (250 * HZ / 1000);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9323d05..eaaf870 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30200
+# define AAC_DRIVER_BUILD 30300
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dada38a..5c6a870 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -480,7 +480,7 @@
 
 static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
 {
-	u32 var;
+	u32 var = 0;
 
 	if (!(dev->supplement_adapter_info.SupportedOptions2 &
 	  AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
@@ -500,13 +500,14 @@
 		if (bled && (bled != -ETIMEDOUT))
 			return -EINVAL;
 	}
-	if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+	if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
 		rx_writel(dev, MUnit.reserved2, 3);
 		msleep(5000); /* Delay 5 seconds */
 		var = 0x00000001;
 	}
-	if (var != 0x00000001)
+	if (bled && (var != 0x00000001))
 		return -EINVAL;
+	ssleep(5);
 	if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
 		return -ENODEV;
 	if (startup_timeout < 300)
@@ -646,7 +647,7 @@
 	dev->sync_mode = 0;	/* sync. mode not supported */
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+			IRQF_SHARED, "aacraid", dev) < 0) {
 		if (dev->msi)
 			pci_disable_msi(dev->pdev);
 		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 2244f31..e66477c 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -387,8 +387,7 @@
 		goto error_irq;
 	dev->sync_mode = 0;	/* sync. mode not supported */
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-			IRQF_SHARED|IRQF_DISABLED,
-			"aacraid", (void *)dev ) < 0) {
+			IRQF_SHARED, "aacraid", (void *)dev) < 0) {
 		printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
 			name, instance);
 		goto error_iounmap;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7e17107..9c65aed 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -647,7 +647,7 @@
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+			IRQF_SHARED, "aacraid", dev) < 0) {
 
 		if (dev->msi)
 			pci_disable_msi(dev->pdev);
@@ -804,7 +804,7 @@
 		goto error_iounmap;
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-		IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+		IRQF_SHARED, "aacraid", dev) < 0) {
 		if (dev->msi)
 			pci_disable_msi(dev->pdev);
 		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 3f7b6fe..e86eb6a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -857,7 +857,7 @@
 	SETPORT(SIMODE0, 0);
 	SETPORT(SIMODE1, 0);
 
-	if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+	if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {
 		printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
 		goto out_host_put;
 	}
@@ -891,7 +891,7 @@
 	SETPORT(SSTAT0, 0x7f);
 	SETPORT(SSTAT1, 0xef);
 
-	if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+	if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {
 		printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
 		goto out_host_put;
 	}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
index 9df9e2c..8373447 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -209,7 +209,6 @@
 #define AIC_OP_JC16	0x9105
 #define AIC_OP_JNC16	0x9205
 #define AIC_OP_CALL16	0x9305
-#define AIC_OP_CALL16	0x9305
 
 /* Page extension is low three bits of second opcode byte. */
 #define AIC_OP_JMPF	0xA005
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f6a30b..652b41b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2500,16 +2500,15 @@
 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
 {
 	uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
-	dma_addr_t dma_coherent_handle;
+
 	/*
 	********************************************************************
 	** here we need to tell iop 331 our freeccb.HighPart
 	** if freeccb.HighPart is not zero
 	********************************************************************
 	*/
-	dma_coherent_handle = acb->dma_coherent_handle;
-	cdb_phyaddr = (uint32_t)(dma_coherent_handle);
-	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
+	cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
+	cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
 	/*
 	***********************************************************************
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 09ba186..059ff47 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2971,7 +2971,7 @@
 	ec->irqaddr	= ashost->fast + INT_REG;
 	ec->irqmask	= 0x0a;
 
-	ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
+	ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
 	if (ret) {
 		printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
 			host->host_no, ashost->scsi.irq, ret);
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index b679778..f8e0609 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -262,7 +262,7 @@
 		goto out_unmap;
 	}
 
-	ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
+	ret = request_irq(host->irq, cumanascsi_intr, 0,
 			  "CumanaSCSI-1", host);
 	if (ret) {
 		printk("scsi%d: IRQ%d not free: %d\n",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 58915f2..abc66f5 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -431,7 +431,7 @@
 		goto out_free;
 
 	ret = request_irq(ec->irq, cumanascsi_2_intr,
-			  IRQF_DISABLED, "cumanascsi2", info);
+			  0, "cumanascsi2", info);
 	if (ret) {
 		printk("scsi%d: IRQ%d not free: %d\n",
 		       host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index abc9593..5e1b73e 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -358,7 +358,7 @@
 		goto out_free;
 
 	ret = request_irq(ec->irq, powertecscsi_intr,
-			  IRQF_DISABLED, "powertec", info);
+			  0, "powertec", info);
 	if (ret) {
 		printk("scsi%d: IRQ%d not free: %d\n",
 		       host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 2e28f6c..1bfb0bd 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -98,6 +98,14 @@
 	struct be_queue_info cq;
 };
 
+struct beiscsi_mcc_tag_state {
+#define MCC_TAG_STATE_COMPLETED 0x00
+#define MCC_TAG_STATE_RUNNING   0x01
+#define MCC_TAG_STATE_TIMEOUT   0x02
+	uint8_t tag_state;
+	struct be_dma_mem tag_mem_state;
+};
+
 struct be_ctrl_info {
 	u8 __iomem *csr;
 	u8 __iomem *db;		/* Door Bell */
@@ -122,6 +130,8 @@
 	unsigned short mcc_alloc_index;
 	unsigned short mcc_free_index;
 	unsigned int mcc_tag_available;
+
+	struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
 };
 
 #include "be_cmds.h"
@@ -129,6 +139,7 @@
 #define PAGE_SHIFT_4K 12
 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
 #define mcc_timeout		120000 /* 12s timeout */
+#define BEISCSI_LOGOUT_SYNC_DELAY	250
 
 /* Returns number of pages spanned by the data starting at the given addr */
 #define PAGES_4K_SPANNED(_address, size)				\
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 3338391..1432ed5 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -138,7 +138,7 @@
  * @phba: Driver private structure
  * @tag: Tag for the MBX Command
  * @wrb: the WRB used for the MBX Command
- * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
  *
  * Waits for MBX completion with the passed TAG.
  *
@@ -148,21 +148,26 @@
  **/
 int beiscsi_mccq_compl(struct beiscsi_hba *phba,
 		uint32_t tag, struct be_mcc_wrb **wrb,
-		void *cmd_hdr)
+		struct be_dma_mem *mbx_cmd_mem)
 {
 	int rc = 0;
 	uint32_t mcc_tag_response;
 	uint16_t status = 0, addl_status = 0, wrb_num = 0;
 	struct be_mcc_wrb *temp_wrb;
-	struct be_cmd_req_hdr *ioctl_hdr;
-	struct be_cmd_resp_hdr *ioctl_resp_hdr;
+	struct be_cmd_req_hdr *mbx_hdr;
+	struct be_cmd_resp_hdr *mbx_resp_hdr;
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
 	if (beiscsi_error(phba)) {
 		free_mcc_tag(&phba->ctrl, tag);
-		return -EIO;
+		return -EPERM;
 	}
 
+	/* Set MBX Tag state to Active */
+	spin_lock(&phba->ctrl.mbox_lock);
+	phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
+	spin_unlock(&phba->ctrl.mbox_lock);
+
 	/* wait for the mccq completion */
 	rc = wait_event_interruptible_timeout(
 				phba->ctrl.mcc_wait[tag],
@@ -171,56 +176,71 @@
 				BEISCSI_HOST_MBX_TIMEOUT));
 
 	if (rc <= 0) {
+		struct be_dma_mem *tag_mem;
+		/* Set MBX Tag state to timeout */
+		spin_lock(&phba->ctrl.mbox_lock);
+		phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
+		spin_unlock(&phba->ctrl.mbox_lock);
+
+		/* Store resource addr to be freed later */
+		tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+		if (mbx_cmd_mem) {
+			tag_mem->size = mbx_cmd_mem->size;
+			tag_mem->va = mbx_cmd_mem->va;
+			tag_mem->dma = mbx_cmd_mem->dma;
+		} else
+			tag_mem->size = 0;
+
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 			    BEISCSI_LOG_CONFIG,
 			    "BC_%d : MBX Cmd Completion timed out\n");
-		rc = -EBUSY;
-
-		/* decrement the mccq used count */
-		atomic_dec(&phba->ctrl.mcc_obj.q.used);
-
-		goto release_mcc_tag;
-	} else
+		return -EBUSY;
+	} else {
 		rc = 0;
+		/* Set MBX Tag state to completed */
+		spin_lock(&phba->ctrl.mbox_lock);
+		phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+		spin_unlock(&phba->ctrl.mbox_lock);
+	}
 
 	mcc_tag_response = phba->ctrl.mcc_numtag[tag];
 	status = (mcc_tag_response & CQE_STATUS_MASK);
 	addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
 			CQE_STATUS_ADDL_SHIFT);
 
-	if (cmd_hdr) {
-		ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+	if (mbx_cmd_mem) {
+		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
 	} else {
 		wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
 			   CQE_STATUS_WRB_SHIFT;
 		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
-		ioctl_hdr = embedded_payload(temp_wrb);
+		mbx_hdr = embedded_payload(temp_wrb);
 
 		if (wrb)
 			*wrb = temp_wrb;
 	}
 
 	if (status || addl_status) {
-		beiscsi_log(phba, KERN_ERR,
+		beiscsi_log(phba, KERN_WARNING,
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 			    BEISCSI_LOG_CONFIG,
 			    "BC_%d : MBX Cmd Failed for "
 			    "Subsys : %d Opcode : %d with "
 			    "Status : %d and Extd_Status : %d\n",
-			    ioctl_hdr->subsystem,
-			    ioctl_hdr->opcode,
+			    mbx_hdr->subsystem,
+			    mbx_hdr->opcode,
 			    status, addl_status);
 
 		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-			ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+			mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
 			beiscsi_log(phba, KERN_WARNING,
 				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 				    BEISCSI_LOG_CONFIG,
 				    "BC_%d : Insufficent Buffer Error "
 				    "Resp_Len : %d Actual_Resp_Len : %d\n",
-				    ioctl_resp_hdr->response_length,
-				    ioctl_resp_hdr->actual_resp_len);
+				    mbx_resp_hdr->response_length,
+				    mbx_resp_hdr->actual_resp_len);
 
 			rc = -EAGAIN;
 			goto release_mcc_tag;
@@ -319,6 +339,7 @@
 int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
 				    struct be_mcc_compl *compl)
 {
+	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	u16 compl_status, extd_status;
 	unsigned short tag;
 
@@ -338,7 +359,32 @@
 	ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
 	ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
 	ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
-	wake_up_interruptible(&ctrl->mcc_wait[tag]);
+
+	if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
+		wake_up_interruptible(&ctrl->mcc_wait[tag]);
+	} else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
+		struct be_dma_mem *tag_mem;
+		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+
+		beiscsi_log(phba, KERN_WARNING,
+			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+			    BEISCSI_LOG_CONFIG,
+			    "BC_%d : MBX Completion for timeout Command "
+			    "from FW\n");
+		/* Check if memory needs to be freed */
+		if (tag_mem->size)
+			pci_free_consistent(ctrl->pdev, tag_mem->size,
+					    tag_mem->va, tag_mem->dma);
+
+		/* Change tag state */
+		spin_lock(&phba->ctrl.mbox_lock);
+		ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+		spin_unlock(&phba->ctrl.mbox_lock);
+
+		/* Free MCC Tag */
+		free_mcc_tag(ctrl, tag);
+	}
+
 	return 0;
 }
 
@@ -354,8 +400,23 @@
 	return NULL;
 }
 
-static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+/**
+ * be2iscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ *
+ * Depending on adapter state appropriate error flag is passed.
+ **/
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 {
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	uint32_t iscsi_err_flag;
+
+	if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
+		iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
+	else
+		iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+
 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
 }
 
@@ -386,18 +447,6 @@
 	}
 }
 
-static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
-		       u16 num_popped)
-{
-	u32 val = 0;
-	val |= qid & DB_CQ_RING_ID_MASK;
-	if (arm)
-		val |= 1 << DB_CQ_REARM_SHIFT;
-	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
-	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
-}
-
-
 int beiscsi_process_mcc(struct beiscsi_hba *phba)
 {
 	struct be_mcc_compl *compl;
@@ -428,7 +477,7 @@
 	}
 
 	if (num)
-		beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
+		hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
 
 	spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
 	return status;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 627ebbe..7cf7f99 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -103,7 +103,7 @@
 
 /********** MCC door bell ************/
 #define DB_MCCQ_OFFSET 0x140
-#define DB_MCCQ_RING_ID_MASK 0x7FF		/* bits 0 - 10 */
+#define DB_MCCQ_RING_ID_MASK 0xFFFF		/* bits 0 - 15 */
 /* Number of entries posted */
 #define DB_MCCQ_NUM_POSTED_SHIFT 16		/* bits 16 - 29 */
 
@@ -709,7 +709,8 @@
 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
 
 int beiscsi_mccq_compl(struct beiscsi_hba *phba,
-			uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
+			uint32_t tag, struct be_mcc_wrb **wrb,
+			struct be_dma_mem *mbx_cmd_mem);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
 int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
@@ -1017,8 +1018,8 @@
 	int *users_final_status;
 } __packed;
 
-#define DB_DEF_PDU_RING_ID_MASK		0x3FF	/* bits 0 - 9 */
-#define DB_DEF_PDU_CQPROC_MASK		0x3FFF	/* bits 0 - 9 */
+#define DB_DEF_PDU_RING_ID_MASK	0x3FFF	/* bits 0 - 13 */
+#define DB_DEF_PDU_CQPROC_MASK		0x3FFF	/* bits 16 - 29 */
 #define DB_DEF_PDU_REARM_SHIFT		14
 #define DB_DEF_PDU_EVENT_SHIFT		15
 #define DB_DEF_PDU_CQPROC_SHIFT		16
@@ -1317,4 +1318,5 @@
 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 			u8 subsystem, u8 opcode, int cmd_len);
 
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
 #endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 889066d..a3df433 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -793,7 +793,7 @@
 		ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
 		break;
 	case BE2ISCSI_LINK_SPEED_100MBPS:
-		ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
+		ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;
 		break;
 	case BE2ISCSI_LINK_SPEED_1GBPS:
 		ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
@@ -1153,16 +1153,18 @@
 		return -EAGAIN;
 	}
 
-	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+	ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 			    "BS_%d : mgmt_open_connection Failed");
 
-		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-			    nonemb_cmd.va, nonemb_cmd.dma);
+		if (ret != -EBUSY)
+			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+					    nonemb_cmd.va, nonemb_cmd.dma);
+
 		beiscsi_free_ep(beiscsi_ep);
-		return -EBUSY;
+		return ret;
 	}
 
 	ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
@@ -1359,6 +1361,7 @@
 	beiscsi_mccq_compl(phba, tag, NULL, NULL);
 	beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
 free_ep:
+	msleep(BEISCSI_LOGOUT_SYNC_DELAY);
 	beiscsi_free_ep(beiscsi_ep);
 	beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
 	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5642a9b..9be818f 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -228,24 +228,25 @@
 	struct invalidate_command_table *inv_tbl;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int cid, tag, num_invalidate;
+	int rc;
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!aborted_task || !aborted_task->sc) {
 		/* we raced */
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return SUCCESS;
 	}
 
 	aborted_io_task = aborted_task->dd_data;
 	if (!aborted_io_task->scsi_cmnd) {
 		/* raced or invalid command */
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return SUCCESS;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	/* Invalidate WRB Posted for this Task */
 	AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
 		      aborted_io_task->pwrb_handle->pwrb,
@@ -285,9 +286,11 @@
 		return FAILED;
 	}
 
-	beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
-	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-			    nonemb_cmd.va, nonemb_cmd.dma);
+	rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+	if (rc != -EBUSY)
+		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+				    nonemb_cmd.va, nonemb_cmd.dma);
+
 	return iscsi_eh_abort(sc);
 }
 
@@ -303,13 +306,14 @@
 	struct invalidate_command_table *inv_tbl;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int cid, tag, i, num_invalidate;
+	int rc;
 
 	/* invalidate iocbs */
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return FAILED;
 	}
 	conn = session->leadconn;
@@ -338,7 +342,7 @@
 		num_invalidate++;
 		inv_tbl++;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	inv_tbl = phba->inv_tbl;
 
 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -363,9 +367,10 @@
 		return FAILED;
 	}
 
-	beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
-	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-			    nonemb_cmd.va, nonemb_cmd.dma);
+	rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+	if (rc != -EBUSY)
+		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+				    nonemb_cmd.va, nonemb_cmd.dma);
 	return iscsi_eh_device_reset(sc);
 }
 
@@ -674,8 +679,19 @@
 	}
 
 	pci_set_master(pcidev);
-	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
-		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+	if (ret) {
+		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+			pci_disable_device(pcidev);
+			return ret;
+		} else {
+			ret = pci_set_consistent_dma_mask(pcidev,
+							  DMA_BIT_MASK(32));
+		}
+	} else {
+		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
 		if (ret) {
 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
 			pci_disable_device(pcidev);
@@ -804,14 +820,23 @@
 			   unsigned char rearm, unsigned char event)
 {
 	u32 val = 0;
-	val |= id & DB_EQ_RING_ID_MASK;
+
 	if (rearm)
 		val |= 1 << DB_EQ_REARM_SHIFT;
 	if (clr_interrupt)
 		val |= 1 << DB_EQ_CLR_SHIFT;
 	if (event)
 		val |= 1 << DB_EQ_EVNT_SHIFT;
+
 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
+	/* Setting lower order EQ_ID Bits */
+	val |= (id & DB_EQ_RING_ID_LOW_MASK);
+
+	/* Setting Higher order EQ_ID Bits */
+	val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
+		  DB_EQ_RING_ID_HIGH_MASK)
+		  << DB_EQ_HIGH_SET_SHIFT);
+
 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
 }
 
@@ -1093,15 +1118,25 @@
 	return ret;
 }
 
-static void hwi_ring_cq_db(struct beiscsi_hba *phba,
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
 			   unsigned int id, unsigned int num_processed,
 			   unsigned char rearm, unsigned char event)
 {
 	u32 val = 0;
-	val |= id & DB_CQ_RING_ID_MASK;
+
 	if (rearm)
 		val |= 1 << DB_CQ_REARM_SHIFT;
+
 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
+
+	/* Setting lower order CQ_ID Bits */
+	val |= (id & DB_CQ_RING_ID_LOW_MASK);
+
+	/* Setting Higher order CQ_ID Bits */
+	val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
+		  DB_CQ_RING_ID_HIGH_MASK)
+		  << DB_CQ_HIGH_SET_SHIFT);
+
 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 }
 
@@ -1150,9 +1185,9 @@
 		return 1;
 	}
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 	return 0;
 }
 
@@ -1342,8 +1377,10 @@
 	resid = csol_cqe->res_cnt;
 
 	if (!task->sc) {
-		if (io_task->scsi_cmnd)
+		if (io_task->scsi_cmnd) {
 			scsi_dma_unmap(io_task->scsi_cmnd);
+			io_task->scsi_cmnd = NULL;
+		}
 
 		return;
 	}
@@ -1380,6 +1417,7 @@
 		conn->rxdata_octets += resid;
 unmap:
 	scsi_dma_unmap(io_task->scsi_cmnd);
+	io_task->scsi_cmnd = NULL;
 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
 }
 
@@ -1568,7 +1606,7 @@
 	pwrb = pwrb_handle->pwrb;
 	type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	switch (type) {
 	case HWH_TYPE_IO:
 	case HWH_TYPE_IO_RD:
@@ -1607,7 +1645,7 @@
 		break;
 	}
 
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 }
 
 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
@@ -4360,12 +4398,16 @@
 		goto boot_freemem;
 	}
 
-	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+	ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
 			    "BM_%d : beiscsi_get_session_info Failed");
-		goto boot_freemem;
+
+		if (ret != -EBUSY)
+			goto boot_freemem;
+		else
+			return ret;
 	}
 
 	session_resp = nonemb_cmd.va ;
@@ -4625,6 +4667,11 @@
 			spin_unlock(&phba->io_sgl_lock);
 			io_task->psgl_handle = NULL;
 		}
+
+		if (io_task->scsi_cmnd) {
+			scsi_dma_unmap(io_task->scsi_cmnd);
+			io_task->scsi_cmnd = NULL;
+		}
 	} else {
 		if (!beiscsi_conn->login_in_progress)
 			beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
@@ -4646,9 +4693,9 @@
 	 * login/startup related tasks.
 	 */
 	beiscsi_conn->login_in_progress = 0;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	beiscsi_cleanup_task(task);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 
 	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
@@ -5273,6 +5320,8 @@
 		return;
 	}
 
+	phba->state = BE_ADAPTER_STATE_SHUTDOWN;
+	iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
 	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
 	pci_disable_device(pcidev);
 }
@@ -5594,6 +5643,8 @@
 		phba->ctrl.mcc_tag[i] = i + 1;
 		phba->ctrl.mcc_numtag[i + 1] = 0;
 		phba->ctrl.mcc_tag_available++;
+		memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
+		       sizeof(struct beiscsi_mcc_tag_state));
 	}
 
 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 31fa27b..9380b55 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"10.0.659.0"
+#define BUILD_STR		"10.2.125.0"
 #define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -97,9 +97,14 @@
 
 #define INVALID_SESS_HANDLE	0xFFFFFFFF
 
+/**
+ * Adapter States
+ **/
 #define BE_ADAPTER_LINK_UP	0x001
 #define BE_ADAPTER_LINK_DOWN	0x002
 #define BE_ADAPTER_PCI_ERR	0x004
+#define BE_ADAPTER_STATE_SHUTDOWN	0x008
+
 
 #define BEISCSI_CLEAN_UNLOAD	0x01
 #define BEISCSI_EEH_UNLOAD	0x02
@@ -135,11 +140,15 @@
 #define DB_RXULP0_OFFSET 0xA0
 /********* Event Q door bell *************/
 #define DB_EQ_OFFSET			DB_CQ_OFFSET
-#define DB_EQ_RING_ID_MASK		0x1FF	/* bits 0 - 8 */
+#define DB_EQ_RING_ID_LOW_MASK		0x1FF	/* bits 0 - 8 */
 /* Clear the interrupt for this eq */
 #define DB_EQ_CLR_SHIFT			(9)	/* bit 9 */
 /* Must be 1 */
 #define DB_EQ_EVNT_SHIFT		(10)	/* bit 10 */
+/* Higher Order EQ_ID bit */
+#define DB_EQ_RING_ID_HIGH_MASK	0x1F /* bits 11 - 15 */
+#define DB_EQ_HIGH_SET_SHIFT	11
+#define DB_EQ_HIGH_FEILD_SHIFT	9
 /* Number of event entries processed */
 #define DB_EQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */
 /* Rearm bit */
@@ -147,7 +156,12 @@
 
 /********* Compl Q door bell *************/
 #define DB_CQ_OFFSET			0x120
-#define DB_CQ_RING_ID_MASK		0x3FF	/* bits 0 - 9 */
+#define DB_CQ_RING_ID_LOW_MASK		0x3FF	/* bits 0 - 9 */
+/* Higher Order CQ_ID bit */
+#define DB_CQ_RING_ID_HIGH_MASK	0x1F /* bits 11 - 15 */
+#define DB_CQ_HIGH_SET_SHIFT	11
+#define DB_CQ_HIGH_FEILD_SHIFT	10
+
 /* Number of event entries processed */
 #define DB_CQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */
 /* Rearm bit */
@@ -821,6 +835,9 @@
 void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
 				     struct iscsi_task *task);
 
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
+		     unsigned int id, unsigned int num_processed,
+		     unsigned char rearm, unsigned char event);
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
 	return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index b2fcac7..088bdf7 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -828,22 +828,25 @@
 	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
 
-	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
-	if (rc) {
-		/* Check if the IOCTL needs to be re-issued */
-		if (rc == -EAGAIN)
-			return rc;
-
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-			    "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
-
-		goto free_cmd;
-	}
+	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
 
 	if (resp_buf)
 		memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
 
+	if (rc) {
+		/* Check if the MBX Cmd needs to be re-issued */
+		if (rc == -EAGAIN)
+			return rc;
+
+		beiscsi_log(phba, KERN_WARNING,
+			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+			    "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+
+		if (rc != -EBUSY)
+			goto free_cmd;
+		else
+			return rc;
+	}
 free_cmd:
 	pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
 			    nonemb_cmd->va, nonemb_cmd->dma);
@@ -1348,7 +1351,6 @@
 {
 	int rc;
 	unsigned int tag;
-	struct be_mcc_wrb *wrb = NULL;
 
 	tag = be_cmd_set_vlan(phba, vlan_tag);
 	if (!tag) {
@@ -1358,7 +1360,7 @@
 		return -EBUSY;
 	}
 
-	rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+	rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
 	if (rc) {
 		beiscsi_log(phba, KERN_ERR,
 			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 65180e1..315d6d6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3878,7 +3878,7 @@
 		bfa_trc(sfp, sfp->data_valid);
 		if (sfp->data_valid) {
 			u32	size = sizeof(struct sfp_mem_s);
-			u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+			u8 *des = (u8 *) &(sfp->sfpmem);
 			memcpy(des, sfp->dbuf_kva, size);
 		}
 		/*
@@ -6851,7 +6851,7 @@
 bfa_flash_status_read(void __iomem *pci_bar)
 {
 	union bfa_flash_dev_status_reg_u	dev_status;
-	u32				status;
+	int				status;
 	u32			ret_status;
 	int				i;
 
@@ -6899,7 +6899,7 @@
 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
 			 char *buf)
 {
-	u32 status;
+	int status;
 
 	/*
 	 * len must be mutiple of 4 and not exceeding fifo size
@@ -7006,7 +7006,7 @@
 	while (!bfa_raw_sem_get(bar)) {
 		if (--n <= 0)
 			return BFA_STATUS_BADFLASH;
-		udelay(10000);
+		mdelay(10);
 	}
 	return BFA_STATUS_OK;
 }
@@ -7021,7 +7021,8 @@
 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
 		       u32 len)
 {
-	u32 n, status;
+	u32 n;
+	int status;
 	u32 off, l, s, residue, fifo_sz;
 
 	residue = len;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 157f604..8994fb8 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2304,8 +2304,10 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	if (bfa_fcport_is_dport(&bfad->bfa))
+	if (bfa_fcport_is_dport(&bfad->bfa)) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 		return BFA_STATUS_DPORT_ERR;
+	}
 
 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 9967f9c..f067332 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -73,9 +73,14 @@
 
 		break;
 
-	case BFI_IOIM_STS_ABORTED:
 	case BFI_IOIM_STS_TIMEDOUT:
+		host_status = DID_TIME_OUT;
+		cmnd->result = ScsiResult(host_status, 0);
+		break;
 	case BFI_IOIM_STS_PATHTOV:
+		host_status = DID_TRANSPORT_DISRUPTED;
+		cmnd->result = ScsiResult(host_status, 0);
+		break;
 	default:
 		host_status = DID_ERROR;
 		cmnd->result = ScsiResult(host_status, 0);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1ebf3fb..6a97665 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"2.4.1"
+#define BNX2FC_VERSION		"2.4.2"
 
 #define PFX			"bnx2fc: "
 
@@ -367,6 +367,7 @@
 	atomic_t num_active_ios;
 	u32 flush_in_prog;
 	unsigned long timestamp;
+	unsigned long retry_delay_timestamp;
 	struct list_head free_task_list;
 	struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
 	struct list_head active_cmd_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9b94850..6287f6a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Sep 17, 2013"
+#define DRV_MODULE_RELDATE	"Dec 11, 2013"
 
 
 static char version[] =
@@ -850,6 +850,9 @@
 				__bnx2fc_destroy(interface);
 		}
 		mutex_unlock(&bnx2fc_dev_lock);
+
+		/* Ensure ALL destroy work has been completed before return */
+		flush_workqueue(bnx2fc_wq);
 		return;
 
 	default:
@@ -2389,6 +2392,9 @@
 			__bnx2fc_destroy(interface);
 	mutex_unlock(&bnx2fc_dev_lock);
 
+	/* Ensure ALL destroy work has been completed before return */
+	flush_workqueue(bnx2fc_wq);
+
 	bnx2fc_ulp_stop(hba);
 	/* unregister cnic device */
 	if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e9279a8..32a5e0a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1871,7 +1871,15 @@
 		rc = SCSI_MLQUEUE_TARGET_BUSY;
 		goto exit_qcmd;
 	}
-
+	if (tgt->retry_delay_timestamp) {
+		if (time_after(jiffies, tgt->retry_delay_timestamp)) {
+			tgt->retry_delay_timestamp = 0;
+		} else {
+			/* If retry_delay timer is active, flow off the ML */
+			rc = SCSI_MLQUEUE_TARGET_BUSY;
+			goto exit_qcmd;
+		}
+	}
 	io_req = bnx2fc_cmd_alloc(tgt);
 	if (!io_req) {
 		rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -1961,6 +1969,15 @@
 				 " fcp_resid = 0x%x\n",
 				io_req->cdb_status, io_req->fcp_resid);
 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+			    io_req->cdb_status == SAM_STAT_BUSY) {
+				/* Set the jiffies + retry_delay_timer * 100ms
+				   for the rport/tgt */
+				tgt->retry_delay_timestamp = jiffies +
+					fcp_rsp->retry_delay_timer * HZ / 10;
+			}
+
 		}
 		if (io_req->fcp_resid)
 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d9bae56..6870cf6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -386,6 +386,7 @@
 	tgt->rq_prod_idx = 0x8000;
 	tgt->rq_cons_idx = 0;
 	atomic_set(&tgt->num_active_ios, 0);
+	tgt->retry_delay_timestamp = 0;
 
 	if (rdata->flags & FC_RP_FLAGS_RETRY &&
 	    rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index b87a193..b5ffd28 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1361,7 +1361,7 @@
 	u32 datalen = 0;
 
 	resp_cqe = (struct bnx2i_cmd_response *)cqe;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
 	if (!task)
@@ -1432,7 +1432,7 @@
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
 			     conn->data, datalen);
 fail:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 	return 0;
 }
 
@@ -1457,7 +1457,7 @@
 	int pad_len;
 
 	login = (struct bnx2i_login_response *) cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
 	if (!task)
@@ -1500,7 +1500,7 @@
 		bnx2i_conn->gen_pdu.resp_buf,
 		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1525,7 +1525,7 @@
 	int pad_len;
 
 	text = (struct bnx2i_text_response *) cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
 	if (!task)
 		goto done;
@@ -1561,7 +1561,7 @@
 			     bnx2i_conn->gen_pdu.resp_wr_ptr -
 			     bnx2i_conn->gen_pdu.resp_buf);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1584,7 +1584,7 @@
 	struct iscsi_tm_rsp *resp_hdr;
 
 	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
 	if (!task)
@@ -1600,7 +1600,7 @@
 
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1623,7 +1623,7 @@
 	struct iscsi_logout_rsp *resp_hdr;
 
 	logout = (struct bnx2i_logout_response *) cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
 	if (!task)
@@ -1647,7 +1647,7 @@
 
 	bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1668,12 +1668,12 @@
 	struct iscsi_task *task;
 
 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
 	if (task)
 		__iscsi_put_task(task);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 }
 
 /**
@@ -1712,7 +1712,7 @@
 
 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
 	memset(hdr, 0, sizeof(struct iscsi_hdr));
 	hdr->opcode = nop_in->op_code;
@@ -1738,7 +1738,7 @@
 	}
 done:
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 
 	return tgt_async_nop;
 }
@@ -1771,7 +1771,7 @@
 		return;
 	}
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
 	resp_hdr->opcode = async_cqe->op_code;
@@ -1790,7 +1790,7 @@
 
 	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
 			     (struct iscsi_hdr *)resp_hdr, NULL, 0);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 }
 
 
@@ -1817,7 +1817,7 @@
 	} else
 		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
 	memset(hdr, 0, sizeof(struct iscsi_hdr));
 	hdr->opcode = reject->op_code;
@@ -1828,7 +1828,7 @@
 	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
 			     reject->data_length);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 }
 
 /**
@@ -1848,13 +1848,13 @@
 	struct iscsi_task *task;
 
 	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
 	if (!task)
 		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
 			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	complete(&bnx2i_conn->cmd_cleanup_cmpl);
 }
 
@@ -1921,11 +1921,11 @@
 	int rc = 0;
 	int cpu;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
 				 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
 	if (!task || !task->sc) {
-		spin_unlock(&session->lock);
+		spin_unlock(&session->back_lock);
 		return -EINVAL;
 	}
 	sc = task->sc;
@@ -1935,7 +1935,7 @@
 	else
 		cpu = sc->request->cpu;
 
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 
 	p = &per_cpu(bnx2i_percpu, cpu);
 	spin_lock(&p->p_work_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c8b0aff..166543f 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1170,10 +1170,10 @@
 	if (task->state == ISCSI_TASK_ABRT_TMF) {
 		bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
 
-		spin_unlock_bh(&conn->session->lock);
+		spin_unlock_bh(&conn->session->back_lock);
 		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
 				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
-		spin_lock_bh(&conn->session->lock);
+		spin_lock_bh(&conn->session->back_lock);
 	}
 	bnx2i_iscsi_unmap_sg_list(task->dd_data);
 }
@@ -2060,7 +2060,7 @@
 		goto out;
 
 	if (session) {
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&session->frwd_lock);
 		if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
 			if (session->state == ISCSI_STATE_LOGGING_OUT) {
 				if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
@@ -2076,7 +2076,7 @@
 		} else
 			close = 1;
 
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 	}
 
 	bnx2i_ep->state = EP_STATE_DISCONN_START;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a9f842..e8ee5e5 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -175,52 +175,6 @@
 			sizeof(struct fw_ofld_tx_data_wr));
 }
 
-
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
-	(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
-	(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct cxgbi_device *cdev,
-				struct l2t_entry *l2t)
-{
-	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
-	unsigned int ntuple = 0;
-	u32 viid;
-
-	switch (lldi->filt_mode) {
-
-	/* default filter mode */
-	case HW_TPL_FR_MT_PR_IV_P_FC:
-		if (l2t->vlan == VLAN_NONE)
-			ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
-		else {
-			ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
-			ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-		}
-		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-			  FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-		break;
-	case HW_TPL_FR_MT_PR_OV_P_FC: {
-		viid = cxgb4_port_viid(l2t->neigh->dev);
-
-		ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
-		ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
-		ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
-		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-			  FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-		break;
-	}
-	default:
-		break;
-	}
-	return ntuple;
-}
-
 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 				struct l2t_entry *e)
 {
@@ -248,8 +202,6 @@
 		struct cpl_act_open_req *req =
 				(struct cpl_act_open_req *)skb->head;
 
-		req = (struct cpl_act_open_req *)skb->head;
-
 		INIT_TP_WR(req, 0);
 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 					qid_atid));
@@ -258,7 +210,9 @@
 		req->local_ip = csk->saddr.sin_addr.s_addr;
 		req->peer_ip = csk->daddr.sin_addr.s_addr;
 		req->opt0 = cpu_to_be64(opt0);
-		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+		req->params = cpu_to_be32(cxgb4_select_ntuple(
+					csk->cdev->ports[csk->port_id],
+					csk->l2t));
 		opt2 |= 1 << 22;
 		req->opt2 = cpu_to_be32(opt2);
 
@@ -271,8 +225,6 @@
 		struct cpl_t5_act_open_req *req =
 				(struct cpl_t5_act_open_req *)skb->head;
 
-		req = (struct cpl_t5_act_open_req *)skb->head;
-
 		INIT_TP_WR(req, 0);
 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 					qid_atid));
@@ -281,7 +233,10 @@
 		req->local_ip = csk->saddr.sin_addr.s_addr;
 		req->peer_ip = csk->daddr.sin_addr.s_addr;
 		req->opt0 = cpu_to_be64(opt0);
-		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+		req->params = cpu_to_be64(V_FILTER_TUPLE(
+				cxgb4_select_ntuple(
+					csk->cdev->ports[csk->port_id],
+					csk->l2t)));
 		opt2 |= 1 << 31;
 		req->opt2 = cpu_to_be32(opt2);
 
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index d01f016..eb29fe7 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -277,7 +277,7 @@
 		/* With interrupts enabled, it will sometimes hang when doing heavy
 		 * reads. So better not enable them until I finger it out. */
 		if (instance->irq != SCSI_IRQ_NONE)
-			if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED,
+			if (request_irq(instance->irq, dtc_intr, 0,
 					"dtc", instance)) {
 				printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
 				instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 94de889..ebf5736 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1221,7 +1221,7 @@
 
 	/* Board detected, allocate its IRQ */
 	if (request_irq(irq, do_interrupt_handler,
-			IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+			(subversion == ESA) ? IRQF_SHARED : 0,
 			driver_name, (void *)&sha[j])) {
 		printk("%s: unable to allocate IRQ %u, detaching.\n", name,
 		       irq);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 1663173..8319d2b 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -687,7 +687,7 @@
 		return 0;
 
 	if (!reg_IRQ[gc->IRQ]) {	/* Interrupt already registered ? */
-		if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) {
+		if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {
 			reg_IRQ[gc->IRQ]++;
 			if (!gc->IRQ_TR)
 				reg_IRQL[gc->IRQ] = 1;	/* IRQ is edge triggered */
@@ -921,7 +921,7 @@
 
 	for (i = 0; i < MAXIRQ; i++)
 		if (reg_IRQ[i])
-			request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
+			request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);
 
 	HBA_ptr = first_HBA;
 
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index b9750e2..6776931 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -231,7 +231,7 @@
 
 static void esas2r_claim_interrupts(struct esas2r_adapter *a)
 {
-	unsigned long flags = IRQF_DISABLED;
+	unsigned long flags = 0;
 
 	if (a->intr_mode == INTR_MODE_LEGACY)
 		flags |= IRQF_SHARED;
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index 9bf285d..a82030a 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -165,13 +165,9 @@
 
 		/*
 		 * Put a line break at the end of the formatted string so that
-		 * we don't wind up with run-on messages.  only append if there
-		 * is enough space in the buffer.
+		 * we don't wind up with run-on messages.
 		 */
-		if (strlen(event_buffer) < buflen)
-			strcat(buffer, "\n");
-
-		printk(event_buffer);
+		printk("%s\n", event_buffer);
 
 		spin_unlock_irqrestore(&event_buffer_lock, flags);
 	}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5cec6c6..7176365 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -461,7 +461,7 @@
 
 		if (instance->irq != SCSI_IRQ_NONE)
 			if (request_irq(instance->irq, generic_NCR5380_intr,
-					IRQF_DISABLED, "NCR5380", instance)) {
+					0, "NCR5380", instance)) {
 				printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
 				instance->irq = SCSI_IRQ_NONE;
 			}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ce5ef0190..0f1ae13 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4711,7 +4711,7 @@
 	printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
 		isa_bios, ha->irq, ha->drq);
 
-	error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
 	if (error) {
 		printk("GDT-ISA: Unable to allocate IRQ\n");
 		goto out_host_put;
@@ -4843,7 +4843,7 @@
 	printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
 		eisa_slot >> 12, ha->irq);
 
-	error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
 	if (error) {
 		printk("GDT-EISA: Unable to allocate IRQ\n");
 		goto out_host_put;
@@ -4979,7 +4979,7 @@
 		ha->irq);
 
 	error = request_irq(ha->irq, gdth_interrupt,
-				IRQF_DISABLED|IRQF_SHARED, "gdth", ha);
+				IRQF_SHARED, "gdth", ha);
 	if (error) {
 		printk("GDT-PCI: Unable to allocate IRQ\n");
 		goto out_host_put;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f28ea07..3cbb57a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -398,7 +398,7 @@
 	shost->ordered_tag = sht->ordered_tag;
 	shost->no_write_same = sht->no_write_same;
 
-	if (shost_eh_deadline == -1)
+	if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
 		shost->eh_deadline = -1;
 	else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
 		shost_printk(KERN_WARNING, shost,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 868318a..8cf4a0c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,6 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -47,13 +47,13 @@
 #include <linux/string.h>
 #include <linux/bitmap.h>
 #include <linux/atomic.h>
-#include <linux/kthread.h>
 #include <linux/jiffies.h>
+#include <asm/div64.h>
 #include "hpsa_cmd.h"
 #include "hpsa.h"
 
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "3.4.0-1"
+#define HPSA_DRIVER_VERSION "3.4.4-1"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -118,6 +118,11 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
+	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
@@ -163,6 +168,11 @@
 	{0x21C7103C, "Smart Array", &SA5_access},
 	{0x21C8103C, "Smart Array", &SA5_access},
 	{0x21C9103C, "Smart Array", &SA5_access},
+	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
+	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
+	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
+	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
+	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
@@ -182,8 +192,9 @@
 static struct CommandList *cmd_alloc(struct ctlr_info *h);
 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
-	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 	int cmd_type);
+#define VPD_PAGE (1 << 8)
 
 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 static void hpsa_scan_start(struct Scsi_Host *);
@@ -204,7 +215,7 @@
 	struct CommandList *c);
 /* performant mode helper functions */
 static void calc_bucket_map(int *bucket, int num_buckets,
-	int nsgs, int *bucket_map);
+	int nsgs, int min_blocks, int *bucket_map);
 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
 static inline u32 next_command(struct ctlr_info *h, u8 q);
 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -216,8 +227,14 @@
 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
 				     int wait_for_ready);
 static inline void finish_cmd(struct CommandList *c);
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
 #define BOARD_NOT_READY 0
 #define BOARD_READY 1
+static void hpsa_drain_accel_commands(struct ctlr_info *h);
+static void hpsa_flush_cache(struct ctlr_info *h);
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr);
 
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 {
@@ -280,6 +297,55 @@
 	return 1;
 }
 
+static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	int status, len;
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+	char tmpbuf[10];
+
+	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+		return -EACCES;
+	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+	strncpy(tmpbuf, buf, len);
+	tmpbuf[len] = '\0';
+	if (sscanf(tmpbuf, "%d", &status) != 1)
+		return -EINVAL;
+	h = shost_to_hba(shost);
+	h->acciopath_status = !!status;
+	dev_warn(&h->pdev->dev,
+		"hpsa: HP SSD Smart Path %s via sysfs update.\n",
+		h->acciopath_status ? "enabled" : "disabled");
+	return count;
+}
+
+static ssize_t host_store_raid_offload_debug(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	int debug_level, len;
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+	char tmpbuf[10];
+
+	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+		return -EACCES;
+	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+	strncpy(tmpbuf, buf, len);
+	tmpbuf[len] = '\0';
+	if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+		return -EINVAL;
+	if (debug_level < 0)
+		debug_level = 0;
+	h = shost_to_hba(shost);
+	h->raid_offload_debug = debug_level;
+	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
+		h->raid_offload_debug);
+	return count;
+}
+
 static ssize_t host_store_rescan(struct device *dev,
 				 struct device_attribute *attr,
 				 const char *buf, size_t count)
@@ -327,6 +393,17 @@
 			"performant" : "simple");
 }
 
+static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	h = shost_to_hba(shost);
+	return snprintf(buf, 30, "HP SSD Smart Path %s\n",
+		(h->acciopath_status == 1) ?  "enabled" : "disabled");
+}
+
 /* List of controllers which cannot be hard reset on kexec with reset_devices */
 static u32 unresettable_controller[] = {
 	0x324a103C, /* Smart Array P712m */
@@ -416,6 +493,13 @@
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
 	"1(ADM)", "UNKNOWN"
 };
+#define HPSA_RAID_0	0
+#define HPSA_RAID_4	1
+#define HPSA_RAID_1	2	/* also used for RAID 10 */
+#define HPSA_RAID_5	3	/* also used for RAID 50 */
+#define HPSA_RAID_51	4
+#define HPSA_RAID_6	5	/* also used for RAID 60 */
+#define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */
 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
 
 static ssize_t raid_level_show(struct device *dev,
@@ -504,10 +588,39 @@
 			sn[12], sn[13], sn[14], sn[15]);
 }
 
+static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+	int offload_enabled;
+
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->lock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		return -ENODEV;
+	}
+	offload_enabled = hdev->offload_enabled;
+	spin_unlock_irqrestore(&h->lock, flags);
+	return snprintf(buf, 20, "%d\n", offload_enabled);
+}
+
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
+			host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
+		host_show_hp_ssd_smart_path_status,
+		host_store_hp_ssd_smart_path_status);
+static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
+			host_store_raid_offload_debug);
 static DEVICE_ATTR(firmware_revision, S_IRUGO,
 	host_show_firmware_revision, NULL);
 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
@@ -521,6 +634,7 @@
 	&dev_attr_raid_level,
 	&dev_attr_lunid,
 	&dev_attr_unique_id,
+	&dev_attr_hp_ssd_smart_path_enabled,
 	NULL,
 };
 
@@ -530,6 +644,8 @@
 	&dev_attr_commands_outstanding,
 	&dev_attr_transport_mode,
 	&dev_attr_resettable,
+	&dev_attr_hp_ssd_smart_path_status,
+	&dev_attr_raid_offload_debug,
 	NULL,
 };
 
@@ -570,6 +686,9 @@
 	struct reply_pool *rq = &h->reply_queue[q];
 	unsigned long flags;
 
+	if (h->transMethod & CFGTBL_Trans_io_accel1)
+		return h->access.command_completed(h, q);
+
 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
 		return h->access.command_completed(h, q);
 
@@ -590,6 +709,32 @@
 	return a;
 }
 
+/*
+ * There are some special bits in the bus address of the
+ * command that we have to set for the controller to know
+ * how to process the command:
+ *
+ * Normal performant mode:
+ * bit 0: 1 means performant mode, 0 means simple mode.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 0)
+ *
+ * ioaccel1 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 110)
+ * (command type is needed because ioaccel1 mode
+ * commands are submitted through the same register as normal
+ * mode commands, so this is how the controller knows whether
+ * the command is normal mode or ioaccel1 mode.)
+ *
+ * ioaccel2 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-4 = block fetch table entry (note extra bit)
+ * bits 4-6 = not needed, because ioaccel2 mode has
+ * a separate special register for submitting commands.
+ */
+
 /* set_performant_mode: Modify the tag for cciss performant
  * set bit 0 for pull model, bits 3-1 for block fetch
  * register number
@@ -598,12 +743,47 @@
 {
 	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-		if (likely(h->msix_vector))
+		if (likely(h->msix_vector > 0))
 			c->Header.ReplyQueue =
 				raw_smp_processor_id() % h->nreply_queues;
 	}
 }
 
+static void set_ioaccel1_performant_mode(struct ctlr_info *h,
+						struct CommandList *c)
+{
+	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+
+	/* Tell the controller to post the reply to the queue for this
+	 * processor.  This seems to give the best I/O throughput.
+	 */
+	cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
+	/* Set the bits in the address sent down to include:
+	 *  - performant mode bit (bit 0)
+	 *  - pull count (bits 1-3)
+	 *  - command type (bits 4-6)
+	 */
+	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
+					IOACCEL1_BUSADDR_CMDTYPE;
+}
+
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
+						struct CommandList *c)
+{
+	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+	/* Tell the controller to post the reply to the queue for this
+	 * processor.  This seems to give the best I/O throughput.
+	 */
+	cp->reply_queue = smp_processor_id() % h->nreply_queues;
+	/* Set the bits in the address sent down to include:
+	 *  - performant mode bit not used in ioaccel mode 2
+	 *  - pull count (bits 0-3)
+	 *  - command type isn't needed for ioaccel2
+	 */
+	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
+}
+
 static int is_firmware_flash_cmd(u8 *cdb)
 {
 	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
@@ -638,7 +818,16 @@
 {
 	unsigned long flags;
 
-	set_performant_mode(h, c);
+	switch (c->cmd_type) {
+	case CMD_IOACCEL1:
+		set_ioaccel1_performant_mode(h, c);
+		break;
+	case CMD_IOACCEL2:
+		set_ioaccel2_performant_mode(h, c);
+		break;
+	default:
+		set_performant_mode(h, c);
+	}
 	dial_down_lockup_detection_during_fw_flash(h, c);
 	spin_lock_irqsave(&h->lock, flags);
 	addQ(&h->reqQ, c);
@@ -782,6 +971,14 @@
 
 	/* Raid level changed. */
 	h->dev[entry]->raid_level = new_entry->raid_level;
+
+	/* Raid offload parameters changed. */
+	h->dev[entry]->offload_config = new_entry->offload_config;
+	h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+	h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
+	h->dev[entry]->raid_map = new_entry->raid_map;
+
 	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
 		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
 		new_entry->target, new_entry->lun);
@@ -902,6 +1099,10 @@
 	 */
 	if (dev1->raid_level != dev2->raid_level)
 		return 1;
+	if (dev1->offload_config != dev2->offload_config)
+		return 1;
+	if (dev1->offload_enabled != dev2->offload_enabled)
+		return 1;
 	return 0;
 }
 
@@ -932,6 +1133,9 @@
 					return DEVICE_UPDATED;
 				return DEVICE_SAME;
 			} else {
+				/* Keep offline devices offline */
+				if (needle->volume_offline)
+					return DEVICE_NOT_FOUND;
 				return DEVICE_CHANGED;
 			}
 		}
@@ -940,6 +1144,110 @@
 	return DEVICE_NOT_FOUND;
 }
 
+static void hpsa_monitor_offline_device(struct ctlr_info *h,
+					unsigned char scsi3addr[])
+{
+	struct offline_device_entry *device;
+	unsigned long flags;
+
+	/* Check to see if device is already on the list */
+	spin_lock_irqsave(&h->offline_device_lock, flags);
+	list_for_each_entry(device, &h->offline_device_list, offline_list) {
+		if (memcmp(device->scsi3addr, scsi3addr,
+			sizeof(device->scsi3addr)) == 0) {
+			spin_unlock_irqrestore(&h->offline_device_lock, flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&h->offline_device_lock, flags);
+
+	/* Device is not on the list, add it. */
+	device = kmalloc(sizeof(*device), GFP_KERNEL);
+	if (!device) {
+		dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+		return;
+	}
+	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+	spin_lock_irqsave(&h->offline_device_lock, flags);
+	list_add_tail(&device->offline_list, &h->offline_device_list);
+	spin_unlock_irqrestore(&h->offline_device_lock, flags);
+}
+
+/* Print a message explaining various offline volume states */
+static void hpsa_show_volume_status(struct ctlr_info *h,
+	struct hpsa_scsi_dev_t *sd)
+{
+	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+	switch (sd->volume_offline) {
+	case HPSA_LV_OK:
+		break;
+	case HPSA_LV_UNDERGOING_ERASE:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_UNDERGOING_RPI:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PENDING_RPI:
+		dev_info(&h->pdev->dev,
+				"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+				h->scsi_host->host_no,
+				sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_ENCRYPTED_NO_KEY:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_UNDERGOING_ENCRYPTION:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PENDING_ENCRYPTION:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	}
+}
+
 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
 	struct hpsa_scsi_dev_t *sd[], int nsds)
 {
@@ -1004,6 +1312,20 @@
 	for (i = 0; i < nsds; i++) {
 		if (!sd[i]) /* if already added above. */
 			continue;
+
+		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS
+		 * as the SCSI mid-layer does not handle such devices well.
+		 * It relentlessly loops sending TUR at 3Hz, then READ(10)
+		 * at 160Hz, and prevents the system from coming up.
+		 */
+		if (sd[i]->volume_offline) {
+			hpsa_show_volume_status(h, sd[i]);
+			dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
+				h->scsi_host->host_no,
+				sd[i]->bus, sd[i]->target, sd[i]->lun);
+			continue;
+		}
+
 		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
 					h->ndevices, &entry);
 		if (device_change == DEVICE_NOT_FOUND) {
@@ -1022,6 +1344,17 @@
 	}
 	spin_unlock_irqrestore(&h->devlock, flags);
 
+	/* Monitor devices which are in one of several NOT READY states to be
+	 * brought online later. This must be done without holding h->devlock,
+	 * so don't touch h->dev[]
+	 */
+	for (i = 0; i < nsds; i++) {
+		if (!sd[i]) /* if already added above. */
+			continue;
+		if (sd[i]->volume_offline)
+			hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
+	}
+
 	/* Don't notify scsi mid layer of any changes the first time through
 	 * (or if there are no changes) scsi_scan_host will do it later the
 	 * first time through.
@@ -1187,11 +1520,163 @@
 	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
 }
 
+
+/* Decode the various types of errors on ioaccel2 path.
+ * Return 1 for any error that should generate a RAID path retry.
+ * Return 0 for errors that don't require a RAID path retry.
+ */
+static int handle_ioaccel_mode2_error(struct ctlr_info *h,
+					struct CommandList *c,
+					struct scsi_cmnd *cmd,
+					struct io_accel2_cmd *c2)
+{
+	int data_len;
+	int retry = 0;
+
+	switch (c2->error_data.serv_response) {
+	case IOACCEL2_SERV_RESPONSE_COMPLETE:
+		switch (c2->error_data.status) {
+		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with check condition.\n",
+				"HP SSD Smart Path");
+			if (c2->error_data.data_present !=
+					IOACCEL2_SENSE_DATA_PRESENT)
+				break;
+			/* copy the sense data */
+			data_len = c2->error_data.sense_data_len;
+			if (data_len > SCSI_SENSE_BUFFERSIZE)
+				data_len = SCSI_SENSE_BUFFERSIZE;
+			if (data_len > sizeof(c2->error_data.sense_data_buff))
+				data_len =
+					sizeof(c2->error_data.sense_data_buff);
+			memcpy(cmd->sense_buffer,
+				c2->error_data.sense_data_buff, data_len);
+			cmd->result |= SAM_STAT_CHECK_CONDITION;
+			retry = 1;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with BUSY status.\n",
+				"HP SSD Smart Path");
+			retry = 1;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with reservation conflict.\n",
+				"HP SSD Smart Path");
+			retry = 1;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
+			/* Make scsi midlayer do unlimited retries */
+			cmd->result = DID_IMM_RETRY << 16;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with aborted status.\n",
+				"HP SSD Smart Path");
+			retry = 1;
+			break;
+		default:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with unrecognized status: 0x%02x\n",
+				"HP SSD Smart Path", c2->error_data.status);
+			retry = 1;
+			break;
+		}
+		break;
+	case IOACCEL2_SERV_RESPONSE_FAILURE:
+		/* don't expect to get here. */
+		dev_warn(&h->pdev->dev,
+			"unexpected delivery or target failure, status = 0x%02x\n",
+			c2->error_data.status);
+		retry = 1;
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
+		dev_warn(&h->pdev->dev, "task management function rejected.\n");
+		retry = 1;
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
+		dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
+		break;
+	default:
+		dev_warn(&h->pdev->dev,
+			"%s: Unrecognized server response: 0x%02x\n",
+			"HP SSD Smart Path",
+			c2->error_data.serv_response);
+		retry = 1;
+		break;
+	}
+
+	return retry;	/* retry on raid path? */
+}
+
+static void process_ioaccel2_completion(struct ctlr_info *h,
+		struct CommandList *c, struct scsi_cmnd *cmd,
+		struct hpsa_scsi_dev_t *dev)
+{
+	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+	int raid_retry = 0;
+
+	/* check for good status */
+	if (likely(c2->error_data.serv_response == 0 &&
+			c2->error_data.status == 0)) {
+		cmd_free(h, c);
+		cmd->scsi_done(cmd);
+		return;
+	}
+
+	/* Any RAID offload error results in retry which will use
+	 * the normal I/O path so the controller can handle whatever's
+	 * wrong.
+	 */
+	if (is_logical_dev_addr_mode(dev->scsi3addr) &&
+		c2->error_data.serv_response ==
+			IOACCEL2_SERV_RESPONSE_FAILURE) {
+		if (c2->error_data.status ==
+			IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+			dev_warn(&h->pdev->dev,
+				"%s: Path is unavailable, retrying on standard path.\n",
+				"HP SSD Smart Path");
+		else
+			dev_warn(&h->pdev->dev,
+				"%s: Error 0x%02x, retrying on standard path.\n",
+				"HP SSD Smart Path", c2->error_data.status);
+
+		dev->offload_enabled = 0;
+		h->drv_req_rescan = 1;	/* schedule controller for a rescan */
+		cmd->result = DID_SOFT_ERROR << 16;
+		cmd_free(h, c);
+		cmd->scsi_done(cmd);
+		return;
+	}
+	raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
+	/* If error found, disable Smart Path, schedule a rescan,
+	 * and force a retry on the standard path.
+	 */
+	if (raid_retry) {
+		dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
+			"HP SSD Smart Path");
+		dev->offload_enabled = 0; /* Disable Smart Path */
+		h->drv_req_rescan = 1;	  /* schedule controller rescan */
+		cmd->result = DID_SOFT_ERROR << 16;
+	}
+	cmd_free(h, c);
+	cmd->scsi_done(cmd);
+}
+
 static void complete_scsi_command(struct CommandList *cp)
 {
 	struct scsi_cmnd *cmd;
 	struct ctlr_info *h;
 	struct ErrorInfo *ei;
+	struct hpsa_scsi_dev_t *dev;
 
 	unsigned char sense_key;
 	unsigned char asc;      /* additional sense code */
@@ -1201,13 +1686,19 @@
 	ei = cp->err_info;
 	cmd = (struct scsi_cmnd *) cp->scsi_cmd;
 	h = cp->h;
+	dev = cmd->device->hostdata;
 
 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
-	if (cp->Header.SGTotal > h->max_cmd_sg_entries)
+	if ((cp->cmd_type == CMD_SCSI) &&
+		(cp->Header.SGTotal > h->max_cmd_sg_entries))
 		hpsa_unmap_sg_chain_block(h, cp);
 
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+
+	if (cp->cmd_type == CMD_IOACCEL2)
+		return process_ioaccel2_completion(h, cp, cmd, dev);
+
 	cmd->result |= ei->ScsiStatus;
 
 	/* copy the sense data whether we need to or not. */
@@ -1227,6 +1718,32 @@
 		return;
 	}
 
+	/* For I/O accelerator commands, copy over some fields to the normal
+	 * CISS header used below for error handling.
+	 */
+	if (cp->cmd_type == CMD_IOACCEL1) {
+		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
+		cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
+		cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
+		cp->Header.Tag.lower = c->Tag.lower;
+		cp->Header.Tag.upper = c->Tag.upper;
+		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
+		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
+
+		/* Any RAID offload error results in retry which will use
+		 * the normal I/O path so the controller can handle whatever's
+		 * wrong.
+		 */
+		if (is_logical_dev_addr_mode(dev->scsi3addr)) {
+			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
+				dev->offload_enabled = 0;
+			cmd->result = DID_SOFT_ERROR << 16;
+			cmd_free(h, cp);
+			cmd->scsi_done(cmd);
+			return;
+		}
+	}
+
 	/* an error has occurred */
 	switch (ei->CommandStatus) {
 
@@ -1389,6 +1906,14 @@
 		cmd->result = DID_ERROR << 16;
 		dev_warn(&h->pdev->dev, "Command unabortable\n");
 		break;
+	case CMD_IOACCEL_DISABLED:
+		/* This only handles the direct pass-through case since RAID
+		 * offload is handled above.  Just attempt a retry.
+		 */
+		cmd->result = DID_SOFT_ERROR << 16;
+		dev_warn(&h->pdev->dev,
+				"cp %p had HP SSD Smart Path error\n", cp);
+		break;
 	default:
 		cmd->result = DID_ERROR << 16;
 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1438,6 +1963,7 @@
 	cp->SG[0].Addr.upper =
 	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
 	cp->SG[0].Len = buflen;
+	cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
 	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
 	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
 	return 0;
@@ -1490,17 +2016,37 @@
 	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
 }
 
-static void hpsa_scsi_interpret_error(struct CommandList *cp)
+static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
+				struct CommandList *c)
 {
-	struct ErrorInfo *ei;
-	struct device *d = &cp->h->pdev->dev;
+	const u8 *cdb = c->Request.CDB;
+	const u8 *lun = c->Header.LUN.LunAddrBytes;
 
-	ei = cp->err_info;
+	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
+	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+		txt, lun[0], lun[1], lun[2], lun[3],
+		lun[4], lun[5], lun[6], lun[7],
+		cdb[0], cdb[1], cdb[2], cdb[3],
+		cdb[4], cdb[5], cdb[6], cdb[7],
+		cdb[8], cdb[9], cdb[10], cdb[11],
+		cdb[12], cdb[13], cdb[14], cdb[15]);
+}
+
+static void hpsa_scsi_interpret_error(struct ctlr_info *h,
+			struct CommandList *cp)
+{
+	const struct ErrorInfo *ei = cp->err_info;
+	struct device *d = &cp->h->pdev->dev;
+	const u8 *sd = ei->SenseInfo;
+
 	switch (ei->CommandStatus) {
 	case CMD_TARGET_STATUS:
-		dev_warn(d, "cmd %p has completed with errors\n", cp);
-		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
-				ei->ScsiStatus);
+		hpsa_print_cmd(h, "SCSI status", cp);
+		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
+			dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
+				sd[2] & 0x0f, sd[12], sd[13]);
+		else
+			dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
 		if (ei->ScsiStatus == 0)
 			dev_warn(d, "SCSI status is abnormally zero.  "
 			"(probably indicates selection timeout "
@@ -1508,54 +2054,51 @@
 			"firmware bug, circa July, 2001.)\n");
 		break;
 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
-			dev_info(d, "UNDERRUN\n");
 		break;
 	case CMD_DATA_OVERRUN:
-		dev_warn(d, "cp %p has completed with data overrun\n", cp);
+		hpsa_print_cmd(h, "overrun condition", cp);
 		break;
 	case CMD_INVALID: {
 		/* controller unfortunately reports SCSI passthru's
 		 * to non-existent targets as invalid commands.
 		 */
-		dev_warn(d, "cp %p is reported invalid (probably means "
-			"target device no longer present)\n", cp);
-		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
-		print_cmd(cp);  */
+		hpsa_print_cmd(h, "invalid command", cp);
+		dev_warn(d, "probably means device no longer present\n");
 		}
 		break;
 	case CMD_PROTOCOL_ERR:
-		dev_warn(d, "cp %p has protocol error \n", cp);
+		hpsa_print_cmd(h, "protocol error", cp);
 		break;
 	case CMD_HARDWARE_ERR:
-		/* cmd->result = DID_ERROR << 16; */
-		dev_warn(d, "cp %p had hardware error\n", cp);
+		hpsa_print_cmd(h, "hardware error", cp);
 		break;
 	case CMD_CONNECTION_LOST:
-		dev_warn(d, "cp %p had connection lost\n", cp);
+		hpsa_print_cmd(h, "connection lost", cp);
 		break;
 	case CMD_ABORTED:
-		dev_warn(d, "cp %p was aborted\n", cp);
+		hpsa_print_cmd(h, "aborted", cp);
 		break;
 	case CMD_ABORT_FAILED:
-		dev_warn(d, "cp %p reports abort failed\n", cp);
+		hpsa_print_cmd(h, "abort failed", cp);
 		break;
 	case CMD_UNSOLICITED_ABORT:
-		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+		hpsa_print_cmd(h, "unsolicited abort", cp);
 		break;
 	case CMD_TIMEOUT:
-		dev_warn(d, "cp %p timed out\n", cp);
+		hpsa_print_cmd(h, "timed out", cp);
 		break;
 	case CMD_UNABORTABLE:
-		dev_warn(d, "Command unabortable\n");
+		hpsa_print_cmd(h, "unabortable", cp);
 		break;
 	default:
-		dev_warn(d, "cp %p returned unknown status %x\n", cp,
+		hpsa_print_cmd(h, "unknown status", cp);
+		dev_warn(d, "Unknown command status %x\n",
 				ei->CommandStatus);
 	}
 }
 
 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
-			unsigned char page, unsigned char *buf,
+			u16 page, unsigned char *buf,
 			unsigned char bufsize)
 {
 	int rc = IO_OK;
@@ -1577,7 +2120,7 @@
 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
 	ei = c->err_info;
 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		hpsa_scsi_interpret_error(c);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
 	}
 out:
@@ -1585,7 +2128,39 @@
 	return rc;
 }
 
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
+		unsigned char *scsi3addr, unsigned char page,
+		struct bmic_controller_parameters *buf, size_t bufsize)
+{
+	int rc = IO_OK;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+
+	if (c == NULL) {			/* trouble... */
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -ENOMEM;
+	}
+
+	if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
+			page, scsi3addr, TYPE_CMD)) {
+		rc = -1;
+		goto out;
+	}
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+	ei = c->err_info;
+	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		hpsa_scsi_interpret_error(h, c);
+		rc = -1;
+	}
+out:
+	cmd_special_free(h, c);
+	return rc;
+	}
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+	u8 reset_type)
 {
 	int rc = IO_OK;
 	struct CommandList *c;
@@ -1599,14 +2174,15 @@
 	}
 
 	/* fill_cmd can't fail here, no data buffer to map. */
-	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
-			NULL, 0, 0, scsi3addr, TYPE_MSG);
+	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+			scsi3addr, TYPE_MSG);
+	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
 	hpsa_scsi_do_simple_cmd_core(h, c);
 	/* no unmap needed here because no data xfer. */
 
 	ei = c->err_info;
 	if (ei->CommandStatus != 0) {
-		hpsa_scsi_interpret_error(c);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
 	}
 	cmd_special_free(h, c);
@@ -1623,7 +2199,7 @@
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 		return;
-	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
 	if (rc == 0)
 		*raid_level = buf[8];
 	if (*raid_level > RAID_UNKNOWN)
@@ -1632,6 +2208,204 @@
 	return;
 }
 
+#define HPSA_MAP_DEBUG
+#ifdef HPSA_MAP_DEBUG
+static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
+				struct raid_map_data *map_buff)
+{
+	struct raid_map_disk_data *dd = &map_buff->data[0];
+	int map, row, col;
+	u16 map_cnt, row_cnt, disks_per_row;
+
+	if (rc != 0)
+		return;
+
+	/* Show details only if debugging has been activated. */
+	if (h->raid_offload_debug < 2)
+		return;
+
+	dev_info(&h->pdev->dev, "structure_size = %u\n",
+				le32_to_cpu(map_buff->structure_size));
+	dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
+			le32_to_cpu(map_buff->volume_blk_size));
+	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
+			le64_to_cpu(map_buff->volume_blk_cnt));
+	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
+			map_buff->phys_blk_shift);
+	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
+			map_buff->parity_rotation_shift);
+	dev_info(&h->pdev->dev, "strip_size = %u\n",
+			le16_to_cpu(map_buff->strip_size));
+	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
+			le64_to_cpu(map_buff->disk_starting_blk));
+	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
+			le64_to_cpu(map_buff->disk_blk_cnt));
+	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
+			le16_to_cpu(map_buff->data_disks_per_row));
+	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
+			le16_to_cpu(map_buff->metadata_disks_per_row));
+	dev_info(&h->pdev->dev, "row_cnt = %u\n",
+			le16_to_cpu(map_buff->row_cnt));
+	dev_info(&h->pdev->dev, "layout_map_count = %u\n",
+			le16_to_cpu(map_buff->layout_map_count));
+	dev_info(&h->pdev->dev, "flags = %u\n",
+			le16_to_cpu(map_buff->flags));
+	if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
+		dev_info(&h->pdev->dev, "encrypytion = ON\n");
+	else
+		dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+	dev_info(&h->pdev->dev, "dekindex = %u\n",
+			le16_to_cpu(map_buff->dekindex));
+
+	map_cnt = le16_to_cpu(map_buff->layout_map_count);
+	for (map = 0; map < map_cnt; map++) {
+		dev_info(&h->pdev->dev, "Map%u:\n", map);
+		row_cnt = le16_to_cpu(map_buff->row_cnt);
+		for (row = 0; row < row_cnt; row++) {
+			dev_info(&h->pdev->dev, "  Row%u:\n", row);
+			disks_per_row =
+				le16_to_cpu(map_buff->data_disks_per_row);
+			for (col = 0; col < disks_per_row; col++, dd++)
+				dev_info(&h->pdev->dev,
+					"    D%02u: h=0x%04x xor=%u,%u\n",
+					col, dd->ioaccel_handle,
+					dd->xor_mult[0], dd->xor_mult[1]);
+			disks_per_row =
+				le16_to_cpu(map_buff->metadata_disks_per_row);
+			for (col = 0; col < disks_per_row; col++, dd++)
+				dev_info(&h->pdev->dev,
+					"    M%02u: h=0x%04x xor=%u,%u\n",
+					col, dd->ioaccel_handle,
+					dd->xor_mult[0], dd->xor_mult[1]);
+		}
+	}
+}
+#else
+static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
+			__attribute__((unused)) int rc,
+			__attribute__((unused)) struct raid_map_data *map_buff)
+{
+}
+#endif
+
+static int hpsa_get_raid_map(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+	int rc = 0;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+	if (c == NULL) {
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -ENOMEM;
+	}
+	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
+			sizeof(this_device->raid_map), 0,
+			scsi3addr, TYPE_CMD)) {
+		dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
+		cmd_special_free(h, c);
+		return -ENOMEM;
+	}
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+	ei = c->err_info;
+	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		hpsa_scsi_interpret_error(h, c);
+		cmd_special_free(h, c);
+		return -1;
+	}
+	cmd_special_free(h, c);
+
+	/* @todo in the future, dynamically allocate RAID map memory */
+	if (le32_to_cpu(this_device->raid_map.structure_size) >
+				sizeof(this_device->raid_map)) {
+		dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
+		rc = -1;
+	}
+	hpsa_debug_map_buff(h, rc, &this_device->raid_map);
+	return rc;
+}
+
+static int hpsa_vpd_page_supported(struct ctlr_info *h,
+	unsigned char scsi3addr[], u8 page)
+{
+	int rc;
+	int i;
+	int pages;
+	unsigned char *buf, bufsize;
+
+	buf = kzalloc(256, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	/* Get the size of the page list first */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+				buf, HPSA_VPD_HEADER_SZ);
+	if (rc != 0)
+		goto exit_unsupported;
+	pages = buf[3];
+	if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
+		bufsize = pages + HPSA_VPD_HEADER_SZ;
+	else
+		bufsize = 255;
+
+	/* Get the whole VPD page list */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+				buf, bufsize);
+	if (rc != 0)
+		goto exit_unsupported;
+
+	pages = buf[3];
+	for (i = 1; i <= pages; i++)
+		if (buf[3 + i] == page)
+			goto exit_supported;
+exit_unsupported:
+	kfree(buf);
+	return 0;
+exit_supported:
+	kfree(buf);
+	return 1;
+}
+
+static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+	int rc;
+	unsigned char *buf;
+	u8 ioaccel_status;
+
+	this_device->offload_config = 0;
+	this_device->offload_enabled = 0;
+
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return;
+	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
+		goto out;
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
+	if (rc != 0)
+		goto out;
+
+#define IOACCEL_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x01
+#define OFFLOAD_ENABLED_BIT 0x02
+	ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+	this_device->offload_config =
+		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+	if (this_device->offload_config) {
+		this_device->offload_enabled =
+			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+		if (hpsa_get_raid_map(h, scsi3addr, this_device))
+			this_device->offload_enabled = 0;
+	}
+out:
+	kfree(buf);
+	return;
+}
+
 /* Get the device id from inquiry page 0x83 */
 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
 	unsigned char *device_id, int buflen)
@@ -1644,7 +2418,7 @@
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 		return -1;
-	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
 	if (rc == 0)
 		memcpy(device_id, &buf[8], buflen);
 	kfree(buf);
@@ -1678,8 +2452,16 @@
 	ei = c->err_info;
 	if (ei->CommandStatus != 0 &&
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		hpsa_scsi_interpret_error(c);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
+	} else {
+		if (buf->extended_response_flag != extended_response) {
+			dev_err(&h->pdev->dev,
+				"report luns requested format %u, got %u\n",
+				extended_response,
+				buf->extended_response_flag);
+			rc = -1;
+		}
 	}
 out:
 	cmd_special_free(h, c);
@@ -1707,6 +2489,117 @@
 	device->lun = lun;
 }
 
+/* Use VPD inquiry to get details of volume status */
+static int hpsa_get_volume_status(struct ctlr_info *h,
+					unsigned char scsi3addr[])
+{
+	int rc;
+	int status;
+	int size;
+	unsigned char *buf;
+
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+
+	/* Does controller have VPD for logical volume status? */
+	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
+		dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
+		goto exit_failed;
+	}
+
+	/* Get the size of the VPD return buffer */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+					buf, HPSA_VPD_HEADER_SZ);
+	if (rc != 0) {
+		dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+		goto exit_failed;
+	}
+	size = buf[3];
+
+	/* Now get the whole VPD buffer */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+					buf, size + HPSA_VPD_HEADER_SZ);
+	if (rc != 0) {
+		dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+		goto exit_failed;
+	}
+	status = buf[4]; /* status byte */
+
+	kfree(buf);
+	return status;
+exit_failed:
+	kfree(buf);
+	return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+}
+
+/* Determine offline status of a volume.
+ * Return either:
+ *  0 (not offline)
+ * -1 (offline for unknown reasons)
+ *  # (integer code indicating one of several NOT READY states
+ *     describing why a volume is to be kept offline)
+ */
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
+					unsigned char scsi3addr[])
+{
+	struct CommandList *c;
+	unsigned char *sense, sense_key, asc, ascq;
+	int ldstat = 0;
+	u16 cmd_status;
+	u8 scsi_status;
+#define ASC_LUN_NOT_READY 0x04
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
+
+	c = cmd_alloc(h);
+	if (!c)
+		return 0;
+	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
+	hpsa_scsi_do_simple_cmd_core(h, c);
+	sense = c->err_info->SenseInfo;
+	sense_key = sense[2];
+	asc = sense[12];
+	ascq = sense[13];
+	cmd_status = c->err_info->CommandStatus;
+	scsi_status = c->err_info->ScsiStatus;
+	cmd_free(h, c);
+	/* Is the volume 'not ready'? */
+	if (cmd_status != CMD_TARGET_STATUS ||
+		scsi_status != SAM_STAT_CHECK_CONDITION ||
+		sense_key != NOT_READY ||
+		asc != ASC_LUN_NOT_READY)  {
+		return 0;
+	}
+
+	/* Determine the reason for not ready state */
+	ldstat = hpsa_get_volume_status(h, scsi3addr);
+
+	/* Keep volume offline in certain cases: */
+	switch (ldstat) {
+	case HPSA_LV_UNDERGOING_ERASE:
+	case HPSA_LV_UNDERGOING_RPI:
+	case HPSA_LV_PENDING_RPI:
+	case HPSA_LV_ENCRYPTED_NO_KEY:
+	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+	case HPSA_LV_UNDERGOING_ENCRYPTION:
+	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+		return ldstat;
+	case HPSA_VPD_LV_STATUS_UNSUPPORTED:
+		/* If VPD status page isn't available,
+		 * use ASC/ASCQ to determine state
+		 */
+		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
+			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
+			return ldstat;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
 static int hpsa_update_device_info(struct ctlr_info *h,
 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
 	unsigned char *is_OBDR_device)
@@ -1745,10 +2638,18 @@
 		sizeof(this_device->device_id));
 
 	if (this_device->devtype == TYPE_DISK &&
-		is_logical_dev_addr_mode(scsi3addr))
+		is_logical_dev_addr_mode(scsi3addr)) {
 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
-	else
+		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
+			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
+		this_device->volume_offline =
+			hpsa_volume_offline(h, scsi3addr);
+	} else {
 		this_device->raid_level = RAID_UNKNOWN;
+		this_device->offload_config = 0;
+		this_device->offload_enabled = 0;
+		this_device->volume_offline = 0;
+	}
 
 	if (is_OBDR_device) {
 		/* See if this is a One-Button-Disaster-Recovery device
@@ -1878,6 +2779,105 @@
 }
 
 /*
+ * Get address of physical disk used for an ioaccel2 mode command:
+ *	1. Extract ioaccel2 handle from the command.
+ *	2. Find a matching ioaccel2 handle from list of physical disks.
+ *	3. Return:
+ *		1 and set scsi3addr to address of matching physical
+ *		0 if no matching physical disk was found.
+ */
+static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
+	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
+{
+	struct ReportExtendedLUNdata *physicals = NULL;
+	int responsesize = 24;	/* size of physical extended response */
+	int extended = 2;	/* flag forces reporting 'other dev info'. */
+	int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
+	u32 nphysicals = 0;	/* number of reported physical devs */
+	int found = 0;		/* found match (1) or not (0) */
+	u32 find;		/* handle we need to match */
+	int i;
+	struct scsi_cmnd *scmd;	/* scsi command within request being aborted */
+	struct hpsa_scsi_dev_t *d; /* device of request being aborted */
+	struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
+	u32 it_nexus;		/* 4 byte device handle for the ioaccel2 cmd */
+	u32 scsi_nexus;		/* 4 byte device handle for the ioaccel2 cmd */
+
+	if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
+		return 0; /* no match */
+
+	/* point to the ioaccel2 device handle */
+	c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
+	if (c2a == NULL)
+		return 0; /* no match */
+
+	scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
+	if (scmd == NULL)
+		return 0; /* no match */
+
+	d = scmd->device->hostdata;
+	if (d == NULL)
+		return 0; /* no match */
+
+	it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
+	scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
+	find = c2a->scsi_nexus;
+
+	if (h->raid_offload_debug > 0)
+		dev_info(&h->pdev->dev,
+			"%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+			__func__, scsi_nexus,
+			d->device_id[0], d->device_id[1], d->device_id[2],
+			d->device_id[3], d->device_id[4], d->device_id[5],
+			d->device_id[6], d->device_id[7], d->device_id[8],
+			d->device_id[9], d->device_id[10], d->device_id[11],
+			d->device_id[12], d->device_id[13], d->device_id[14],
+			d->device_id[15]);
+
+	/* Get the list of physical devices */
+	physicals = kzalloc(reportsize, GFP_KERNEL);
+	if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
+		reportsize, extended)) {
+		dev_err(&h->pdev->dev,
+			"Can't lookup %s device handle: report physical LUNs failed.\n",
+			"HP SSD Smart Path");
+		kfree(physicals);
+		return 0;
+	}
+	nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
+							responsesize;
+
+
+	/* find ioaccel2 handle in list of physicals: */
+	for (i = 0; i < nphysicals; i++) {
+		/* handle is in bytes 28-31 of each lun */
+		if (memcmp(&((struct ReportExtendedLUNdata *)
+				physicals)->LUN[i][20], &find, 4) != 0) {
+			continue; /* didn't match */
+		}
+		found = 1;
+		memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
+					physicals)->LUN[i][0], 8);
+		if (h->raid_offload_debug > 0)
+			dev_info(&h->pdev->dev,
+				"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+				__func__, find,
+				((struct ReportExtendedLUNdata *)
+					physicals)->LUN[i][20],
+				scsi3addr[0], scsi3addr[1], scsi3addr[2],
+				scsi3addr[3], scsi3addr[4], scsi3addr[5],
+				scsi3addr[6], scsi3addr[7]);
+		break; /* found it */
+	}
+
+	kfree(physicals);
+	if (found)
+		return 1;
+	else
+		return 0;
+
+}
+/*
  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
  * logdev.  The number of luns in physdev and logdev are returned in
  * *nphysicals and *nlogicals, respectively.
@@ -1885,14 +2885,26 @@
  */
 static int hpsa_gather_lun_info(struct ctlr_info *h,
 	int reportlunsize,
-	struct ReportLUNdata *physdev, u32 *nphysicals,
+	struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
 	struct ReportLUNdata *logdev, u32 *nlogicals)
 {
-	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+	int physical_entry_size = 8;
+
+	*physical_mode = 0;
+
+	/* For I/O accelerator mode we need to read physical device handles */
+	if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+		h->transMethod & CFGTBL_Trans_io_accel2) {
+		*physical_mode = HPSA_REPORT_PHYS_EXTENDED;
+		physical_entry_size = 24;
+	}
+	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
+							*physical_mode)) {
 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
 		return -1;
 	}
-	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
+	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
+							physical_entry_size;
 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
 			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1923,7 +2935,8 @@
 }
 
 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
-	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
+	int nphysicals, int nlogicals,
+	struct ReportExtendedLUNdata *physdev_list,
 	struct ReportLUNdata *logdev_list)
 {
 	/* Helper function, figure out where the LUN ID info is coming from
@@ -1947,6 +2960,24 @@
 	return NULL;
 }
 
+static int hpsa_hba_mode_enabled(struct ctlr_info *h)
+{
+	int rc;
+	struct bmic_controller_parameters *ctlr_params;
+	ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
+		GFP_KERNEL);
+
+	if (!ctlr_params)
+		return 0;
+	rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
+		sizeof(struct bmic_controller_parameters));
+	if (rc != 0) {
+		kfree(ctlr_params);
+		return 0;
+	}
+	return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
+}
+
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 {
 	/* the idea here is we could get notified
@@ -1959,16 +2990,18 @@
 	 * tell which devices we already know about, vs. new
 	 * devices, vs.  disappearing devices.
 	 */
-	struct ReportLUNdata *physdev_list = NULL;
+	struct ReportExtendedLUNdata *physdev_list = NULL;
 	struct ReportLUNdata *logdev_list = NULL;
 	u32 nphysicals = 0;
 	u32 nlogicals = 0;
+	int physical_mode = 0;
 	u32 ndev_allocated = 0;
 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
 	int ncurrent = 0;
-	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
 	int i, n_ext_target_devs, ndevs_to_allocate;
 	int raid_ctlr_position;
+	u8 rescan_hba_mode;
 	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
 
 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -1982,8 +3015,18 @@
 	}
 	memset(lunzerobits, 0, sizeof(lunzerobits));
 
-	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
-			logdev_list, &nlogicals))
+	rescan_hba_mode = hpsa_hba_mode_enabled(h);
+
+	if (!h->hba_mode_enabled && rescan_hba_mode)
+		dev_warn(&h->pdev->dev, "HBA mode enabled\n");
+	else if (h->hba_mode_enabled && !rescan_hba_mode)
+		dev_warn(&h->pdev->dev, "HBA mode disabled\n");
+
+	h->hba_mode_enabled = rescan_hba_mode;
+
+	if (hpsa_gather_lun_info(h, reportlunsize,
+			(struct ReportLUNdata *) physdev_list, &nphysicals,
+			&physical_mode, logdev_list, &nlogicals))
 		goto out;
 
 	/* We might see up to the maximum number of logical and physical disks
@@ -2064,9 +3107,28 @@
 				ncurrent++;
 			break;
 		case TYPE_DISK:
-			if (i < nphysicals)
+			if (h->hba_mode_enabled) {
+				/* never use raid mapper in HBA mode */
+				this_device->offload_enabled = 0;
+				ncurrent++;
 				break;
-			ncurrent++;
+			} else if (h->acciopath_status) {
+				if (i >= nphysicals) {
+					ncurrent++;
+					break;
+				}
+			} else {
+				if (i < nphysicals)
+					break;
+				ncurrent++;
+				break;
+			}
+			if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
+				memcpy(&this_device->ioaccel_handle,
+					&lunaddrbytes[20],
+					sizeof(this_device->ioaccel_handle));
+				ncurrent++;
+			}
 			break;
 		case TYPE_TAPE:
 		case TYPE_MEDIUM_CHANGER:
@@ -2136,7 +3198,7 @@
 		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
 		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
 		curr_sg->Len = len;
-		curr_sg->Ext = 0;  /* we are not chaining */
+		curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
 		curr_sg++;
 	}
 
@@ -2160,6 +3222,726 @@
 	return 0;
 }
 
+#define IO_ACCEL_INELIGIBLE (1)
+static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
+{
+	int is_write = 0;
+	u32 block;
+	u32 block_cnt;
+
+	/* Perform some CDB fixups if needed using 10 byte reads/writes only */
+	switch (cdb[0]) {
+	case WRITE_6:
+	case WRITE_12:
+		is_write = 1;
+	case READ_6:
+	case READ_12:
+		if (*cdb_len == 6) {
+			block = (((u32) cdb[2]) << 8) | cdb[3];
+			block_cnt = cdb[4];
+		} else {
+			BUG_ON(*cdb_len != 12);
+			block = (((u32) cdb[2]) << 24) |
+				(((u32) cdb[3]) << 16) |
+				(((u32) cdb[4]) << 8) |
+				cdb[5];
+			block_cnt =
+				(((u32) cdb[6]) << 24) |
+				(((u32) cdb[7]) << 16) |
+				(((u32) cdb[8]) << 8) |
+				cdb[9];
+		}
+		if (block_cnt > 0xffff)
+			return IO_ACCEL_INELIGIBLE;
+
+		cdb[0] = is_write ? WRITE_10 : READ_10;
+		cdb[1] = 0;
+		cdb[2] = (u8) (block >> 24);
+		cdb[3] = (u8) (block >> 16);
+		cdb[4] = (u8) (block >> 8);
+		cdb[5] = (u8) (block);
+		cdb[6] = 0;
+		cdb[7] = (u8) (block_cnt >> 8);
+		cdb[8] = (u8) (block_cnt);
+		cdb[9] = 0;
+		*cdb_len = 10;
+		break;
+	}
+	return 0;
+}
+
+static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+	unsigned int len;
+	unsigned int total_len = 0;
+	struct scatterlist *sg;
+	u64 addr64;
+	int use_sg, i;
+	struct SGDescriptor *curr_sg;
+	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
+
+	/* TODO: implement chaining support */
+	if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+		return IO_ACCEL_INELIGIBLE;
+
+	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+
+	if (fixup_ioaccel_cdb(cdb, &cdb_len))
+		return IO_ACCEL_INELIGIBLE;
+
+	c->cmd_type = CMD_IOACCEL1;
+
+	/* Adjust the DMA address to point to the accelerated command buffer */
+	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
+				(c->cmdindex * sizeof(*cp));
+	BUG_ON(c->busaddr & 0x0000007F);
+
+	use_sg = scsi_dma_map(cmd);
+	if (use_sg < 0)
+		return use_sg;
+
+	if (use_sg) {
+		curr_sg = cp->SG;
+		scsi_for_each_sg(cmd, sg, use_sg, i) {
+			addr64 = (u64) sg_dma_address(sg);
+			len  = sg_dma_len(sg);
+			total_len += len;
+			curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
+			curr_sg->Addr.upper =
+				(u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
+			curr_sg->Len = len;
+
+			if (i == (scsi_sg_count(cmd) - 1))
+				curr_sg->Ext = HPSA_SG_LAST;
+			else
+				curr_sg->Ext = 0;  /* we are not chaining */
+			curr_sg++;
+		}
+
+		switch (cmd->sc_data_direction) {
+		case DMA_TO_DEVICE:
+			control |= IOACCEL1_CONTROL_DATA_OUT;
+			break;
+		case DMA_FROM_DEVICE:
+			control |= IOACCEL1_CONTROL_DATA_IN;
+			break;
+		case DMA_NONE:
+			control |= IOACCEL1_CONTROL_NODATAXFER;
+			break;
+		default:
+			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+			cmd->sc_data_direction);
+			BUG();
+			break;
+		}
+	} else {
+		control |= IOACCEL1_CONTROL_NODATAXFER;
+	}
+
+	c->Header.SGList = use_sg;
+	/* Fill out the command structure to submit */
+	cp->dev_handle = ioaccel_handle & 0xFFFF;
+	cp->transfer_len = total_len;
+	cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
+			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
+	cp->control = control;
+	memcpy(cp->CDB, cdb, cdb_len);
+	memcpy(cp->CISS_LUN, scsi3addr, 8);
+	/* Tag was already set at init time. */
+	enqueue_cmd_and_start_io(h, c);
+	return 0;
+}
+
+/*
+ * Queue a command directly to a device behind the controller using the
+ * I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+
+	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
+		cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
+}
+
+/*
+ * Set encryption parameters for the ioaccel2 request
+ */
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
+	struct CommandList *c, struct io_accel2_cmd *cp)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+	struct raid_map_data *map = &dev->raid_map;
+	u64 first_block;
+
+	BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+	/* Are we doing encryption on this device */
+	if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+		return;
+	/* Set the data encryption key index. */
+	cp->dekindex = map->dekindex;
+
+	/* Set the encryption enable flag, encoded into direction field. */
+	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
+
+	/* Set encryption tweak values based on logical block address
+	 * If block size is 512, tweak value is LBA.
+	 * For other block sizes, tweak is (LBA * block size)/ 512)
+	 */
+	switch (cmd->cmnd[0]) {
+	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
+	case WRITE_6:
+	case READ_6:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[2]) << 8) |
+					cmd->cmnd[3];
+			cp->tweak_upper = 0;
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 8) |
+					cmd->cmnd[3];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	case WRITE_10:
+	case READ_10:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[2]) << 24) |
+				(((u32) cmd->cmnd[3]) << 16) |
+				(((u32) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			cp->tweak_upper = 0;
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 24) |
+				(((u64) cmd->cmnd[3]) << 16) |
+				(((u64) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
+	case WRITE_12:
+	case READ_12:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[2]) << 24) |
+				(((u32) cmd->cmnd[3]) << 16) |
+				(((u32) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			cp->tweak_upper = 0;
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 24) |
+				(((u64) cmd->cmnd[3]) << 16) |
+				(((u64) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	case WRITE_16:
+	case READ_16:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[6]) << 24) |
+				(((u32) cmd->cmnd[7]) << 16) |
+				(((u32) cmd->cmnd[8]) << 8) |
+					cmd->cmnd[9];
+			cp->tweak_upper =
+				(((u32) cmd->cmnd[2]) << 24) |
+				(((u32) cmd->cmnd[3]) << 16) |
+				(((u32) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 56) |
+				(((u64) cmd->cmnd[3]) << 48) |
+				(((u64) cmd->cmnd[4]) << 40) |
+				(((u64) cmd->cmnd[5]) << 32) |
+				(((u64) cmd->cmnd[6]) << 24) |
+				(((u64) cmd->cmnd[7]) << 16) |
+				(((u64) cmd->cmnd[8]) << 8) |
+					cmd->cmnd[9];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	default:
+		dev_err(&h->pdev->dev,
+			"ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
+			__func__);
+		BUG();
+		break;
+	}
+}
+
+static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+	struct ioaccel2_sg_element *curr_sg;
+	int use_sg, i;
+	struct scatterlist *sg;
+	u64 addr64;
+	u32 len;
+	u32 total_len = 0;
+
+	if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+		return IO_ACCEL_INELIGIBLE;
+
+	if (fixup_ioaccel_cdb(cdb, &cdb_len))
+		return IO_ACCEL_INELIGIBLE;
+	c->cmd_type = CMD_IOACCEL2;
+	/* Adjust the DMA address to point to the accelerated command buffer */
+	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
+				(c->cmdindex * sizeof(*cp));
+	BUG_ON(c->busaddr & 0x0000007F);
+
+	memset(cp, 0, sizeof(*cp));
+	cp->IU_type = IOACCEL2_IU_TYPE;
+
+	use_sg = scsi_dma_map(cmd);
+	if (use_sg < 0)
+		return use_sg;
+
+	if (use_sg) {
+		BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
+		curr_sg = cp->sg;
+		scsi_for_each_sg(cmd, sg, use_sg, i) {
+			addr64 = (u64) sg_dma_address(sg);
+			len  = sg_dma_len(sg);
+			total_len += len;
+			curr_sg->address = cpu_to_le64(addr64);
+			curr_sg->length = cpu_to_le32(len);
+			curr_sg->reserved[0] = 0;
+			curr_sg->reserved[1] = 0;
+			curr_sg->reserved[2] = 0;
+			curr_sg->chain_indicator = 0;
+			curr_sg++;
+		}
+
+		switch (cmd->sc_data_direction) {
+		case DMA_TO_DEVICE:
+			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+			cp->direction |= IOACCEL2_DIR_DATA_OUT;
+			break;
+		case DMA_FROM_DEVICE:
+			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+			cp->direction |= IOACCEL2_DIR_DATA_IN;
+			break;
+		case DMA_NONE:
+			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+			cp->direction |= IOACCEL2_DIR_NO_DATA;
+			break;
+		default:
+			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+				cmd->sc_data_direction);
+			BUG();
+			break;
+		}
+	} else {
+		cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+		cp->direction |= IOACCEL2_DIR_NO_DATA;
+	}
+
+	/* Set encryption parameters, if necessary */
+	set_encrypt_ioaccel2(h, c, cp);
+
+	cp->scsi_nexus = ioaccel_handle;
+	cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
+				DIRECT_LOOKUP_BIT;
+	memcpy(cp->cdb, cdb, sizeof(cp->cdb));
+
+	/* fill in sg elements */
+	cp->sg_count = (u8) use_sg;
+
+	cp->data_len = cpu_to_le32(total_len);
+	cp->err_ptr = cpu_to_le64(c->busaddr +
+			offsetof(struct io_accel2_cmd, error_data));
+	cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
+
+	enqueue_cmd_and_start_io(h, c);
+	return 0;
+}
+
+/*
+ * Queue a command to the correct I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr)
+{
+	if (h->transMethod & CFGTBL_Trans_io_accel1)
+		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
+						cdb, cdb_len, scsi3addr);
+	else
+		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
+						cdb, cdb_len, scsi3addr);
+}
+
+static void raid_map_helper(struct raid_map_data *map,
+		int offload_to_mirror, u32 *map_index, u32 *current_group)
+{
+	if (offload_to_mirror == 0)  {
+		/* use physical disk in the first mirrored group. */
+		*map_index %= map->data_disks_per_row;
+		return;
+	}
+	do {
+		/* determine mirror group that *map_index indicates */
+		*current_group = *map_index / map->data_disks_per_row;
+		if (offload_to_mirror == *current_group)
+			continue;
+		if (*current_group < (map->layout_map_count - 1)) {
+			/* select map index from next group */
+			*map_index += map->data_disks_per_row;
+			(*current_group)++;
+		} else {
+			/* select map index from first group */
+			*map_index %= map->data_disks_per_row;
+			*current_group = 0;
+		}
+	} while (offload_to_mirror != *current_group);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+	struct raid_map_data *map = &dev->raid_map;
+	struct raid_map_disk_data *dd = &map->data[0];
+	int is_write = 0;
+	u32 map_index;
+	u64 first_block, last_block;
+	u32 block_cnt;
+	u32 blocks_per_row;
+	u64 first_row, last_row;
+	u32 first_row_offset, last_row_offset;
+	u32 first_column, last_column;
+	u64 r0_first_row, r0_last_row;
+	u32 r5or6_blocks_per_row;
+	u64 r5or6_first_row, r5or6_last_row;
+	u32 r5or6_first_row_offset, r5or6_last_row_offset;
+	u32 r5or6_first_column, r5or6_last_column;
+	u32 total_disks_per_row;
+	u32 stripesize;
+	u32 first_group, last_group, current_group;
+	u32 map_row;
+	u32 disk_handle;
+	u64 disk_block;
+	u32 disk_block_cnt;
+	u8 cdb[16];
+	u8 cdb_len;
+#if BITS_PER_LONG == 32
+	u64 tmpdiv;
+#endif
+	int offload_to_mirror;
+
+	BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+	/* check for valid opcode, get LBA and block count */
+	switch (cmd->cmnd[0]) {
+	case WRITE_6:
+		is_write = 1;
+	case READ_6:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 8) |
+			cmd->cmnd[3];
+		block_cnt = cmd->cmnd[4];
+		break;
+	case WRITE_10:
+		is_write = 1;
+	case READ_10:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 24) |
+			(((u64) cmd->cmnd[3]) << 16) |
+			(((u64) cmd->cmnd[4]) << 8) |
+			cmd->cmnd[5];
+		block_cnt =
+			(((u32) cmd->cmnd[7]) << 8) |
+			cmd->cmnd[8];
+		break;
+	case WRITE_12:
+		is_write = 1;
+	case READ_12:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 24) |
+			(((u64) cmd->cmnd[3]) << 16) |
+			(((u64) cmd->cmnd[4]) << 8) |
+			cmd->cmnd[5];
+		block_cnt =
+			(((u32) cmd->cmnd[6]) << 24) |
+			(((u32) cmd->cmnd[7]) << 16) |
+			(((u32) cmd->cmnd[8]) << 8) |
+		cmd->cmnd[9];
+		break;
+	case WRITE_16:
+		is_write = 1;
+	case READ_16:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 56) |
+			(((u64) cmd->cmnd[3]) << 48) |
+			(((u64) cmd->cmnd[4]) << 40) |
+			(((u64) cmd->cmnd[5]) << 32) |
+			(((u64) cmd->cmnd[6]) << 24) |
+			(((u64) cmd->cmnd[7]) << 16) |
+			(((u64) cmd->cmnd[8]) << 8) |
+			cmd->cmnd[9];
+		block_cnt =
+			(((u32) cmd->cmnd[10]) << 24) |
+			(((u32) cmd->cmnd[11]) << 16) |
+			(((u32) cmd->cmnd[12]) << 8) |
+			cmd->cmnd[13];
+		break;
+	default:
+		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
+	}
+	BUG_ON(block_cnt == 0);
+	last_block = first_block + block_cnt - 1;
+
+	/* check for write to non-RAID-0 */
+	if (is_write && dev->raid_level != 0)
+		return IO_ACCEL_INELIGIBLE;
+
+	/* check for invalid block or wraparound */
+	if (last_block >= map->volume_blk_cnt || last_block < first_block)
+		return IO_ACCEL_INELIGIBLE;
+
+	/* calculate stripe information for the request */
+	blocks_per_row = map->data_disks_per_row * map->strip_size;
+#if BITS_PER_LONG == 32
+	tmpdiv = first_block;
+	(void) do_div(tmpdiv, blocks_per_row);
+	first_row = tmpdiv;
+	tmpdiv = last_block;
+	(void) do_div(tmpdiv, blocks_per_row);
+	last_row = tmpdiv;
+	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+	tmpdiv = first_row_offset;
+	(void) do_div(tmpdiv,  map->strip_size);
+	first_column = tmpdiv;
+	tmpdiv = last_row_offset;
+	(void) do_div(tmpdiv, map->strip_size);
+	last_column = tmpdiv;
+#else
+	first_row = first_block / blocks_per_row;
+	last_row = last_block / blocks_per_row;
+	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+	first_column = first_row_offset / map->strip_size;
+	last_column = last_row_offset / map->strip_size;
+#endif
+
+	/* if this isn't a single row/column then give to the controller */
+	if ((first_row != last_row) || (first_column != last_column))
+		return IO_ACCEL_INELIGIBLE;
+
+	/* proceeding with driver mapping */
+	total_disks_per_row = map->data_disks_per_row +
+				map->metadata_disks_per_row;
+	map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+				map->row_cnt;
+	map_index = (map_row * total_disks_per_row) + first_column;
+
+	switch (dev->raid_level) {
+	case HPSA_RAID_0:
+		break; /* nothing special to do */
+	case HPSA_RAID_1:
+		/* Handles load balance across RAID 1 members.
+		 * (2-drive R1 and R10 with even # of drives.)
+		 * Appropriate for SSDs, not optimal for HDDs
+		 */
+		BUG_ON(map->layout_map_count != 2);
+		if (dev->offload_to_mirror)
+			map_index += map->data_disks_per_row;
+		dev->offload_to_mirror = !dev->offload_to_mirror;
+		break;
+	case HPSA_RAID_ADM:
+		/* Handles N-way mirrors  (R1-ADM)
+		 * and R10 with # of drives divisible by 3.)
+		 */
+		BUG_ON(map->layout_map_count != 3);
+
+		offload_to_mirror = dev->offload_to_mirror;
+		raid_map_helper(map, offload_to_mirror,
+				&map_index, &current_group);
+		/* set mirror group to use next time */
+		offload_to_mirror =
+			(offload_to_mirror >= map->layout_map_count - 1)
+			? 0 : offload_to_mirror + 1;
+		/* FIXME: remove after debug/dev */
+		BUG_ON(offload_to_mirror >= map->layout_map_count);
+		dev_warn(&h->pdev->dev,
+			"DEBUG: Using physical disk map index %d from mirror group %d\n",
+			map_index, offload_to_mirror);
+		dev->offload_to_mirror = offload_to_mirror;
+		/* Avoid direct use of dev->offload_to_mirror within this
+		 * function since multiple threads might simultaneously
+		 * increment it beyond the range of dev->layout_map_count -1.
+		 */
+		break;
+	case HPSA_RAID_5:
+	case HPSA_RAID_6:
+		if (map->layout_map_count <= 1)
+			break;
+
+		/* Verify first and last block are in same RAID group */
+		r5or6_blocks_per_row =
+			map->strip_size * map->data_disks_per_row;
+		BUG_ON(r5or6_blocks_per_row == 0);
+		stripesize = r5or6_blocks_per_row * map->layout_map_count;
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		first_group = do_div(tmpdiv, stripesize);
+		tmpdiv = first_group;
+		(void) do_div(tmpdiv, r5or6_blocks_per_row);
+		first_group = tmpdiv;
+		tmpdiv = last_block;
+		last_group = do_div(tmpdiv, stripesize);
+		tmpdiv = last_group;
+		(void) do_div(tmpdiv, r5or6_blocks_per_row);
+		last_group = tmpdiv;
+#else
+		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+		if (first_group != last_group)
+			return IO_ACCEL_INELIGIBLE;
+
+		/* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		(void) do_div(tmpdiv, stripesize);
+		first_row = r5or6_first_row = r0_first_row = tmpdiv;
+		tmpdiv = last_block;
+		(void) do_div(tmpdiv, stripesize);
+		r5or6_last_row = r0_last_row = tmpdiv;
+#else
+		first_row = r5or6_first_row = r0_first_row =
+						first_block / stripesize;
+		r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+		if (r5or6_first_row != r5or6_last_row)
+			return IO_ACCEL_INELIGIBLE;
+
+
+		/* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		first_row_offset = do_div(tmpdiv, stripesize);
+		tmpdiv = first_row_offset;
+		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
+		r5or6_first_row_offset = first_row_offset;
+		tmpdiv = last_block;
+		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+		tmpdiv = r5or6_last_row_offset;
+		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+		tmpdiv = r5or6_first_row_offset;
+		(void) do_div(tmpdiv, map->strip_size);
+		first_column = r5or6_first_column = tmpdiv;
+		tmpdiv = r5or6_last_row_offset;
+		(void) do_div(tmpdiv, map->strip_size);
+		r5or6_last_column = tmpdiv;
+#else
+		first_row_offset = r5or6_first_row_offset =
+			(u32)((first_block % stripesize) %
+						r5or6_blocks_per_row);
+
+		r5or6_last_row_offset =
+			(u32)((last_block % stripesize) %
+						r5or6_blocks_per_row);
+
+		first_column = r5or6_first_column =
+			r5or6_first_row_offset / map->strip_size;
+		r5or6_last_column =
+			r5or6_last_row_offset / map->strip_size;
+#endif
+		if (r5or6_first_column != r5or6_last_column)
+			return IO_ACCEL_INELIGIBLE;
+
+		/* Request is eligible */
+		map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+			map->row_cnt;
+
+		map_index = (first_group *
+			(map->row_cnt * total_disks_per_row)) +
+			(map_row * total_disks_per_row) + first_column;
+		break;
+	default:
+		return IO_ACCEL_INELIGIBLE;
+	}
+
+	disk_handle = dd[map_index].ioaccel_handle;
+	disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
+			(first_row_offset - (first_column * map->strip_size));
+	disk_block_cnt = block_cnt;
+
+	/* handle differing logical/physical block sizes */
+	if (map->phys_blk_shift) {
+		disk_block <<= map->phys_blk_shift;
+		disk_block_cnt <<= map->phys_blk_shift;
+	}
+	BUG_ON(disk_block_cnt > 0xffff);
+
+	/* build the new CDB for the physical disk I/O */
+	if (disk_block > 0xffffffff) {
+		cdb[0] = is_write ? WRITE_16 : READ_16;
+		cdb[1] = 0;
+		cdb[2] = (u8) (disk_block >> 56);
+		cdb[3] = (u8) (disk_block >> 48);
+		cdb[4] = (u8) (disk_block >> 40);
+		cdb[5] = (u8) (disk_block >> 32);
+		cdb[6] = (u8) (disk_block >> 24);
+		cdb[7] = (u8) (disk_block >> 16);
+		cdb[8] = (u8) (disk_block >> 8);
+		cdb[9] = (u8) (disk_block);
+		cdb[10] = (u8) (disk_block_cnt >> 24);
+		cdb[11] = (u8) (disk_block_cnt >> 16);
+		cdb[12] = (u8) (disk_block_cnt >> 8);
+		cdb[13] = (u8) (disk_block_cnt);
+		cdb[14] = 0;
+		cdb[15] = 0;
+		cdb_len = 16;
+	} else {
+		cdb[0] = is_write ? WRITE_10 : READ_10;
+		cdb[1] = 0;
+		cdb[2] = (u8) (disk_block >> 24);
+		cdb[3] = (u8) (disk_block >> 16);
+		cdb[4] = (u8) (disk_block >> 8);
+		cdb[5] = (u8) (disk_block);
+		cdb[6] = 0;
+		cdb[7] = (u8) (disk_block_cnt >> 8);
+		cdb[8] = (u8) (disk_block_cnt);
+		cdb[9] = 0;
+		cdb_len = 10;
+	}
+	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
+						dev->scsi3addr);
+}
 
 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
 	void (*done)(struct scsi_cmnd *))
@@ -2169,6 +3951,7 @@
 	unsigned char scsi3addr[8];
 	struct CommandList *c;
 	unsigned long flags;
+	int rc = 0;
 
 	/* Get the ptr to our adapter structure out of cmd->host. */
 	h = sdev_to_hba(cmd->device);
@@ -2203,6 +3986,32 @@
 
 	c->cmd_type = CMD_SCSI;
 	c->scsi_cmd = cmd;
+
+	/* Call alternate submit routine for I/O accelerated commands.
+	 * Retries always go down the normal I/O path.
+	 */
+	if (likely(cmd->retries == 0 &&
+		cmd->request->cmd_type == REQ_TYPE_FS &&
+		h->acciopath_status)) {
+		if (dev->offload_enabled) {
+			rc = hpsa_scsi_ioaccel_raid_map(h, c);
+			if (rc == 0)
+				return 0; /* Sent on ioaccel path */
+			if (rc < 0) {   /* scsi_dma_map failed. */
+				cmd_free(h, c);
+				return SCSI_MLQUEUE_HOST_BUSY;
+			}
+		} else if (dev->ioaccel_handle) {
+			rc = hpsa_scsi_ioaccel_direct_map(h, c);
+			if (rc == 0)
+				return 0; /* Sent on direct map path */
+			if (rc < 0) {   /* scsi_dma_map failed. */
+				cmd_free(h, c);
+				return SCSI_MLQUEUE_HOST_BUSY;
+			}
+		}
+	}
+
 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
 	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
@@ -2262,11 +4071,38 @@
 
 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
 
+static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
+{
+	unsigned long flags;
+
+	/*
+	 * Don't let rescans be initiated on a controller known
+	 * to be locked up.  If the controller locks up *during*
+	 * a rescan, that thread is probably hosed, but at least
+	 * we can prevent new rescan threads from piling up on a
+	 * locked up controller.
+	 */
+	spin_lock_irqsave(&h->lock, flags);
+	if (unlikely(h->lockup_detected)) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		spin_lock_irqsave(&h->scan_lock, flags);
+		h->scan_finished = 1;
+		wake_up_all(&h->scan_wait_queue);
+		spin_unlock_irqrestore(&h->scan_lock, flags);
+		return 1;
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	return 0;
+}
+
 static void hpsa_scan_start(struct Scsi_Host *sh)
 {
 	struct ctlr_info *h = shost_to_hba(sh);
 	unsigned long flags;
 
+	if (do_not_scan_if_controller_locked_up(h))
+		return;
+
 	/* wait until any scan already in progress is finished. */
 	while (1) {
 		spin_lock_irqsave(&h->scan_lock, flags);
@@ -2283,6 +4119,9 @@
 	h->scan_finished = 0; /* mark scan as in progress */
 	spin_unlock_irqrestore(&h->scan_lock, flags);
 
+	if (do_not_scan_if_controller_locked_up(h))
+		return;
+
 	hpsa_update_scsi_devices(h, h->scsi_host->host_no);
 
 	spin_lock_irqsave(&h->scan_lock, flags);
@@ -2346,7 +4185,10 @@
 	sh->max_lun = HPSA_MAX_LUN;
 	sh->max_id = HPSA_MAX_LUN;
 	sh->can_queue = h->nr_cmds;
-	sh->cmd_per_lun = h->nr_cmds;
+	if (h->hba_mode_enabled)
+		sh->cmd_per_lun = 7;
+	else
+		sh->cmd_per_lun = h->nr_cmds;
 	sh->sg_tablesize = h->maxsgentries;
 	h->scsi_host = sh;
 	sh->hostdata[0] = (unsigned long) h;
@@ -2372,7 +4214,7 @@
 static int wait_for_device_to_become_ready(struct ctlr_info *h,
 	unsigned char lunaddr[])
 {
-	int rc = 0;
+	int rc;
 	int count = 0;
 	int waittime = 1; /* seconds */
 	struct CommandList *c;
@@ -2392,6 +4234,7 @@
 		 */
 		msleep(1000 * waittime);
 		count++;
+		rc = 0; /* Device ready. */
 
 		/* Increase wait time with each try, up to a point. */
 		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
@@ -2448,7 +4291,7 @@
 	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
 		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
 	/* send a reset to the SCSI LUN which the command was sent to */
-	rc = hpsa_send_reset(h, dev->scsi3addr);
+	rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
 	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
 		return SUCCESS;
 
@@ -2471,12 +4314,36 @@
 	tag[7] = original_tag[4];
 }
 
+static void hpsa_get_tag(struct ctlr_info *h,
+	struct CommandList *c, u32 *taglower, u32 *tagupper)
+{
+	if (c->cmd_type == CMD_IOACCEL1) {
+		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
+			&h->ioaccel_cmd_pool[c->cmdindex];
+		*tagupper = cm1->Tag.upper;
+		*taglower = cm1->Tag.lower;
+		return;
+	}
+	if (c->cmd_type == CMD_IOACCEL2) {
+		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
+			&h->ioaccel2_cmd_pool[c->cmdindex];
+		/* upper tag not used in ioaccel2 mode */
+		memset(tagupper, 0, sizeof(*tagupper));
+		*taglower = cm2->Tag;
+		return;
+	}
+	*tagupper = c->Header.Tag.upper;
+	*taglower = c->Header.Tag.lower;
+}
+
+
 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
 	struct CommandList *abort, int swizzle)
 {
 	int rc = IO_OK;
 	struct CommandList *c;
 	struct ErrorInfo *ei;
+	u32 tagupper, taglower;
 
 	c = cmd_special_alloc(h);
 	if (c == NULL) {	/* trouble... */
@@ -2490,8 +4357,9 @@
 	if (swizzle)
 		swizzle_abort_tag(&c->Request.CDB[4]);
 	hpsa_scsi_do_simple_cmd_core(h, c);
+	hpsa_get_tag(h, abort, &taglower, &tagupper);
 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
-		__func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
+		__func__, tagupper, taglower);
 	/* no unmap needed here because no data xfer. */
 
 	ei = c->err_info;
@@ -2503,15 +4371,14 @@
 		break;
 	default:
 		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
-			__func__, abort->Header.Tag.upper,
-			abort->Header.Tag.lower);
-		hpsa_scsi_interpret_error(c);
+			__func__, tagupper, taglower);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
 		break;
 	}
 	cmd_special_free(h, c);
-	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
-		abort->Header.Tag.upper, abort->Header.Tag.lower);
+	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
+		__func__, tagupper, taglower);
 	return rc;
 }
 
@@ -2565,6 +4432,83 @@
 	return NULL;
 }
 
+/* ioaccel2 path firmware cannot handle abort task requests.
+ * Change abort requests to physical target reset, and send to the
+ * address of the physical disk used for the ioaccel 2 command.
+ * Return 0 on success (IO_OK)
+ *	 -1 on failure
+ */
+
+static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct CommandList *abort)
+{
+	int rc = IO_OK;
+	struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
+	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
+	unsigned char *psa = &phys_scsi3addr[0];
+
+	/* Get a pointer to the hpsa logical device. */
+	scmd = (struct scsi_cmnd *) abort->scsi_cmd;
+	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
+	if (dev == NULL) {
+		dev_warn(&h->pdev->dev,
+			"Cannot abort: no device pointer for command.\n");
+			return -1; /* not abortable */
+	}
+
+	if (h->raid_offload_debug > 0)
+		dev_info(&h->pdev->dev,
+			"Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+
+	if (!dev->offload_enabled) {
+		dev_warn(&h->pdev->dev,
+			"Can't abort: device is not operating in HP SSD Smart Path mode.\n");
+		return -1; /* not abortable */
+	}
+
+	/* Incoming scsi3addr is logical addr. We need physical disk addr. */
+	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
+		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
+		return -1; /* not abortable */
+	}
+
+	/* send the reset */
+	if (h->raid_offload_debug > 0)
+		dev_info(&h->pdev->dev,
+			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			psa[0], psa[1], psa[2], psa[3],
+			psa[4], psa[5], psa[6], psa[7]);
+	rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
+	if (rc != 0) {
+		dev_warn(&h->pdev->dev,
+			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			psa[0], psa[1], psa[2], psa[3],
+			psa[4], psa[5], psa[6], psa[7]);
+		return rc; /* failed to reset */
+	}
+
+	/* wait for device to recover */
+	if (wait_for_device_to_become_ready(h, psa) != 0) {
+		dev_warn(&h->pdev->dev,
+			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			psa[0], psa[1], psa[2], psa[3],
+			psa[4], psa[5], psa[6], psa[7]);
+		return -1;  /* failed to recover */
+	}
+
+	/* device recovered */
+	dev_info(&h->pdev->dev,
+		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+		psa[0], psa[1], psa[2], psa[3],
+		psa[4], psa[5], psa[6], psa[7]);
+
+	return rc; /* success */
+}
+
 /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
  * tell which kind we're dealing with, so we send the abort both ways.  There
  * shouldn't be any collisions between swizzled and unswizzled tags due to the
@@ -2578,6 +4522,14 @@
 	struct CommandList *c;
 	int rc = 0, rc2 = 0;
 
+	/* ioccelerator mode 2 commands should be aborted via the
+	 * accelerated path, since RAID path is unaware of these commands,
+	 * but underlying firmware can't handle abort TMF.
+	 * Change abort to physical device reset.
+	 */
+	if (abort->cmd_type == CMD_IOACCEL2)
+		return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
+
 	/* we do not expect to find the swizzled tag in our queue, but
 	 * check anyway just to be sure the assumptions which make this
 	 * the case haven't become wrong.
@@ -2616,6 +4568,7 @@
 	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */
 	char msg[256];		/* For debug messaging. */
 	int ml = 0;
+	u32 tagupper, taglower;
 
 	/* Find the controller of the command to be aborted */
 	h = sdev_to_hba(sc->device);
@@ -2648,9 +4601,8 @@
 				msg);
 		return FAILED;
 	}
-
-	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
-		abort->Header.Tag.upper, abort->Header.Tag.lower);
+	hpsa_get_tag(h, abort, &taglower, &tagupper);
+	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
 	as  = (struct scsi_cmnd *) abort->scsi_cmd;
 	if (as != NULL)
 		ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
@@ -2776,6 +4728,7 @@
 		return NULL;
 	memset(c, 0, sizeof(*c));
 
+	c->cmd_type = CMD_SCSI;
 	c->cmdindex = -1;
 
 	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
@@ -3038,7 +4991,7 @@
 		c->SG[0].Addr.lower = temp64.val32.lower;
 		c->SG[0].Addr.upper = temp64.val32.upper;
 		c->SG[0].Len = iocommand.buf_size;
-		c->SG[0].Ext = 0; /* we are not chaining*/
+		c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
 	}
 	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
 	if (iocommand.buf_size > 0)
@@ -3168,8 +5121,7 @@
 			c->SG[i].Addr.lower = temp64.val32.lower;
 			c->SG[i].Addr.upper = temp64.val32.upper;
 			c->SG[i].Len = buff_size[i];
-			/* we are not chaining */
-			c->SG[i].Ext = 0;
+			c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
 		}
 	}
 	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
@@ -3304,7 +5256,7 @@
 }
 
 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
-	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 	int cmd_type)
 {
 	int pci_dir = XFER_NONE;
@@ -3327,9 +5279,9 @@
 		switch (cmd) {
 		case HPSA_INQUIRY:
 			/* are we trying to read a vital product page */
-			if (page_code != 0) {
+			if (page_code & VPD_PAGE) {
 				c->Request.CDB[1] = 0x01;
-				c->Request.CDB[2] = page_code;
+				c->Request.CDB[2] = (page_code & 0xff);
 			}
 			c->Request.CDBLen = 6;
 			c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3369,6 +5321,28 @@
 			c->Request.Type.Direction = XFER_NONE;
 			c->Request.Timeout = 0;
 			break;
+		case HPSA_GET_RAID_MAP:
+			c->Request.CDBLen = 12;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = HPSA_CISS_READ;
+			c->Request.CDB[1] = cmd;
+			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+			c->Request.CDB[7] = (size >> 16) & 0xFF;
+			c->Request.CDB[8] = (size >> 8) & 0xFF;
+			c->Request.CDB[9] = size & 0xFF;
+			break;
+		case BMIC_SENSE_CONTROLLER_PARAMETERS:
+			c->Request.CDBLen = 10;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = BMIC_READ;
+			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
+			c->Request.CDB[7] = (size >> 16) & 0xFF;
+			c->Request.CDB[8] = (size >> 8) & 0xFF;
+			break;
 		default:
 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
 			BUG();
@@ -3562,7 +5536,8 @@
 	spin_unlock_irqrestore(&h->lock, flags);
 
 	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
-	if (likely(c->cmd_type == CMD_SCSI))
+	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
+			|| c->cmd_type == CMD_IOACCEL2))
 		complete_scsi_command(c);
 	else if (c->cmd_type == CMD_IOCTL_PEND)
 		complete(c->waiting);
@@ -4169,21 +6144,24 @@
 		goto default_int_mode;
 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
 		dev_info(&h->pdev->dev, "MSIX\n");
+		h->msix_vector = MAX_REPLY_QUEUES;
 		err = pci_enable_msix(h->pdev, hpsa_msix_entries,
-						MAX_REPLY_QUEUES);
-		if (!err) {
-			for (i = 0; i < MAX_REPLY_QUEUES; i++)
-				h->intr[i] = hpsa_msix_entries[i].vector;
-			h->msix_vector = 1;
-			return;
-		}
+				      h->msix_vector);
 		if (err > 0) {
 			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
 			       "available\n", err);
-			goto default_int_mode;
+			h->msix_vector = err;
+			err = pci_enable_msix(h->pdev, hpsa_msix_entries,
+					      h->msix_vector);
+		}
+		if (!err) {
+			for (i = 0; i < h->msix_vector; i++)
+				h->intr[i] = hpsa_msix_entries[i].vector;
+			return;
 		} else {
 			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
 			       err);
+			h->msix_vector = 0;
 			goto default_int_mode;
 		}
 	}
@@ -4336,6 +6314,7 @@
 	hpsa_get_max_perf_mode_cmds(h);
 	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+	h->fw_support = readl(&(h->cfgtable->misc_fw_support));
 	/*
 	 * Limit in-command s/g elements to 32 save dma'able memory.
 	 * Howvever spec says if 0, use 31
@@ -4352,6 +6331,10 @@
 
 	/* Find out what task management functions are supported and cache */
 	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
+	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
+		dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
+	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
 }
 
 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -4390,6 +6373,23 @@
 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
 }
 
+static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+{
+	int i;
+	u32 doorbell_value;
+	unsigned long flags;
+	/* wait until the clear_event_notify bit 6 is cleared by controller. */
+	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+		spin_lock_irqsave(&h->lock, flags);
+		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+		spin_unlock_irqrestore(&h->lock, flags);
+		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
+			break;
+		/* delay and try again */
+		msleep(20);
+	}
+}
+
 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 {
 	int i;
@@ -4420,18 +6420,20 @@
 		return -ENOTSUPP;
 
 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
 	/* Update the field, and then ring the doorbell */
 	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	hpsa_wait_for_mode_change_ack(h);
 	print_cfg_table(&h->pdev->dev, h->cfgtable);
-	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
-		dev_warn(&h->pdev->dev,
-			"unable to get board into simple mode\n");
-		return -ENODEV;
-	}
+	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
+		goto error;
 	h->transMethod = CFGTBL_Trans_Simple;
 	return 0;
+error:
+	dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+	return -ENODEV;
 }
 
 static int hpsa_pci_init(struct ctlr_info *h)
@@ -4577,11 +6579,19 @@
 		pci_free_consistent(h->pdev,
 			    h->nr_cmds * sizeof(struct CommandList),
 			    h->cmd_pool, h->cmd_pool_dhandle);
+	if (h->ioaccel2_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
 	if (h->errinfo_pool)
 		pci_free_consistent(h->pdev,
 			    h->nr_cmds * sizeof(struct ErrorInfo),
 			    h->errinfo_pool,
 			    h->errinfo_pool_dhandle);
+	if (h->ioaccel_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(struct io_accel1_cmd),
+			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
 }
 
 static int hpsa_request_irq(struct ctlr_info *h,
@@ -4597,15 +6607,15 @@
 	for (i = 0; i < MAX_REPLY_QUEUES; i++)
 		h->q[i] = (u8) i;
 
-	if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
+	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
 		/* If performant mode and MSI-X, use multiple reply queues */
-		for (i = 0; i < MAX_REPLY_QUEUES; i++)
+		for (i = 0; i < h->msix_vector; i++)
 			rc = request_irq(h->intr[i], msixhandler,
 					0, h->devname,
 					&h->q[i]);
 	} else {
 		/* Use single reply pool */
-		if (h->msix_vector || h->msi_vector) {
+		if (h->msix_vector > 0 || h->msi_vector) {
 			rc = request_irq(h->intr[h->intr_mode],
 				msixhandler, 0, h->devname,
 				&h->q[h->intr_mode]);
@@ -4658,7 +6668,7 @@
 		return;
 	}
 
-	for (i = 0; i < MAX_REPLY_QUEUES; i++)
+	for (i = 0; i < h->msix_vector; i++)
 		free_irq(h->intr[i], &h->q[i]);
 }
 
@@ -4681,6 +6691,7 @@
 	hpsa_free_irqs_and_disable_msix(h);
 	hpsa_free_sg_chain_blocks(h);
 	hpsa_free_cmd_pool(h);
+	kfree(h->ioaccel1_blockFetchTable);
 	kfree(h->blockFetchTable);
 	pci_free_consistent(h->pdev, h->reply_pool_size,
 		h->reply_pool, h->reply_pool_dhandle);
@@ -4760,6 +6771,92 @@
 	h->last_heartbeat_timestamp = now;
 }
 
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
+	int i;
+	char *event_type;
+
+	/* Clear the driver-requested rescan flag */
+	h->drv_req_rescan = 0;
+
+	/* Ask the controller to clear the events we're handling. */
+	if ((h->transMethod & (CFGTBL_Trans_io_accel1
+			| CFGTBL_Trans_io_accel2)) &&
+		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
+		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
+
+		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
+			event_type = "state change";
+		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
+			event_type = "configuration change";
+		/* Stop sending new RAID offload reqs via the IO accelerator */
+		scsi_block_requests(h->scsi_host);
+		for (i = 0; i < h->ndevices; i++)
+			h->dev[i]->offload_enabled = 0;
+		hpsa_drain_accel_commands(h);
+		/* Set 'accelerator path config change' bit */
+		dev_warn(&h->pdev->dev,
+			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
+			h->events, event_type);
+		writel(h->events, &(h->cfgtable->clear_event_notify));
+		/* Set the "clear event notify field update" bit 6 */
+		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+		/* Wait until ctlr clears 'clear event notify field', bit 6 */
+		hpsa_wait_for_clear_event_notify_ack(h);
+		scsi_unblock_requests(h->scsi_host);
+	} else {
+		/* Acknowledge controller notification events. */
+		writel(h->events, &(h->cfgtable->clear_event_notify));
+		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+		hpsa_wait_for_clear_event_notify_ack(h);
+#if 0
+		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+		hpsa_wait_for_mode_change_ack(h);
+#endif
+	}
+	return;
+}
+
+/* Check a register on the controller to see if there are configuration
+ * changes (added/changed/removed logical drives, etc.) which mean that
+ * we should rescan the controller for devices.
+ * Also check flag for driver-initiated rescan.
+ */
+static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
+{
+	if (h->drv_req_rescan)
+		return 1;
+
+	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+		return 0;
+
+	h->events = readl(&(h->cfgtable->event_notify));
+	return h->events & RESCAN_REQUIRED_EVENT_BITS;
+}
+
+/*
+ * Check if any of the offline devices have become ready
+ */
+static int hpsa_offline_devices_ready(struct ctlr_info *h)
+{
+	unsigned long flags;
+	struct offline_device_entry *d;
+	struct list_head *this, *tmp;
+
+	spin_lock_irqsave(&h->offline_device_lock, flags);
+	list_for_each_safe(this, tmp, &h->offline_device_list) {
+		d = list_entry(this, struct offline_device_entry,
+				offline_list);
+		spin_unlock_irqrestore(&h->offline_device_lock, flags);
+		if (!hpsa_volume_offline(h, d->scsi3addr))
+			return 1;
+		spin_lock_irqsave(&h->offline_device_lock, flags);
+	}
+	spin_unlock_irqrestore(&h->offline_device_lock, flags);
+	return 0;
+}
+
+
 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
 {
 	unsigned long flags;
@@ -4768,6 +6865,15 @@
 	detect_controller_lockup(h);
 	if (h->lockup_detected)
 		return;
+
+	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
+		scsi_host_get(h->scsi_host);
+		h->drv_req_rescan = 0;
+		hpsa_ack_ctlr_events(h);
+		hpsa_scan_start(h->scsi_host);
+		scsi_host_put(h->scsi_host);
+	}
+
 	spin_lock_irqsave(&h->lock, flags);
 	if (h->remove_in_progress) {
 		spin_unlock_irqrestore(&h->lock, flags);
@@ -4807,7 +6913,7 @@
 	 * the 5 lower bits of the address are used by the hardware. and by
 	 * the driver.  See comments in hpsa.h for more info.
 	 */
-#define COMMANDLIST_ALIGNMENT 32
+#define COMMANDLIST_ALIGNMENT 128
 	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
 	h = kzalloc(sizeof(*h), GFP_KERNEL);
 	if (!h)
@@ -4817,7 +6923,9 @@
 	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
 	INIT_LIST_HEAD(&h->cmpQ);
 	INIT_LIST_HEAD(&h->reqQ);
+	INIT_LIST_HEAD(&h->offline_device_list);
 	spin_lock_init(&h->lock);
+	spin_lock_init(&h->offline_device_lock);
 	spin_lock_init(&h->scan_lock);
 	spin_lock_init(&h->passthru_count_lock);
 	rc = hpsa_pci_init(h);
@@ -4859,6 +6967,7 @@
 
 	pci_set_drvdata(pdev, h);
 	h->ndevices = 0;
+	h->hba_mode_enabled = 0;
 	h->scsi_host = NULL;
 	spin_lock_init(&h->devlock);
 	hpsa_put_ctlr_into_performant_mode(h);
@@ -4918,6 +7027,11 @@
 		goto reinit_after_soft_reset;
 	}
 
+		/* Enable Accelerated IO path at driver layer */
+		h->acciopath_status = 1;
+
+	h->drv_req_rescan = 0;
+
 	/* Turn the interrupts on so we can service requests */
 	h->access.set_intr_mask(h, HPSA_INTR_ON);
 
@@ -5034,6 +7148,8 @@
 		h->reply_pool, h->reply_pool_dhandle);
 	kfree(h->cmd_pool_bits);
 	kfree(h->blockFetchTable);
+	kfree(h->ioaccel1_blockFetchTable);
+	kfree(h->ioaccel2_blockFetchTable);
 	kfree(h->hba_inquiry_data);
 	pci_disable_device(pdev);
 	pci_release_regions(pdev);
@@ -5074,20 +7190,17 @@
  * bits of the command address.
  */
 static void  calc_bucket_map(int bucket[], int num_buckets,
-	int nsgs, int *bucket_map)
+	int nsgs, int min_blocks, int *bucket_map)
 {
 	int i, j, b, size;
 
-	/* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
 	/* Note, bucket_map must have nsgs+1 entries. */
 	for (i = 0; i <= nsgs; i++) {
 		/* Compute size of a command with i SG entries */
-		size = i + MINIMUM_TRANSFER_BLOCKS;
+		size = i + min_blocks;
 		b = num_buckets; /* Assume the biggest bucket */
 		/* Find the bucket that is just big enough */
-		for (j = 0; j < 8; j++) {
+		for (j = 0; j < num_buckets; j++) {
 			if (bucket[j] >= size) {
 				b = j;
 				break;
@@ -5098,10 +7211,16 @@
 	}
 }
 
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
 {
 	int i;
 	unsigned long register_value;
+	unsigned long transMethod = CFGTBL_Trans_Performant |
+			(trans_support & CFGTBL_Trans_use_short_tags) |
+				CFGTBL_Trans_enable_directed_msix |
+			(trans_support & (CFGTBL_Trans_io_accel1 |
+				CFGTBL_Trans_io_accel2));
+	struct access_method access = SA5_performant_access;
 
 	/* This is a bit complicated.  There are 8 registers on
 	 * the controller which we write to to tell it 8 different
@@ -5121,6 +7240,16 @@
 	 * sizes for small commands, and fewer sizes for larger commands.
 	 */
 	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+#define MIN_IOACCEL2_BFT_ENTRY 5
+#define HPSA_IOACCEL2_HEADER_SZ 4
+	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
+			13, 14, 15, 16, 17, 18, 19,
+			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
+	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
+	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
+	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
+				 16 * MIN_IOACCEL2_BFT_ENTRY);
+	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
 	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
 	/*  5 = 1 s/g entry or 4k
 	 *  6 = 2 s/g entry or 8k
@@ -5133,7 +7262,7 @@
 
 	bft[7] = SG_ENTRIES_IN_CMD + 4;
 	calc_bucket_map(bft, ARRAY_SIZE(bft),
-				SG_ENTRIES_IN_CMD, h->blockFetchTable);
+				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
 	for (i = 0; i < 8; i++)
 		writel(bft[i], &h->transtable->BlockFetch[i]);
 
@@ -5150,9 +7279,22 @@
 			&h->transtable->RepQAddr[i].lower);
 	}
 
-	writel(CFGTBL_Trans_Performant | use_short_tags |
-		CFGTBL_Trans_enable_directed_msix,
-		&(h->cfgtable->HostWrite.TransportRequest));
+	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
+	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
+	/*
+	 * enable outbound interrupt coalescing in accelerator mode;
+	 */
+	if (trans_support & CFGTBL_Trans_io_accel1) {
+		access = SA5_ioaccel_mode1_access;
+		writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+		writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+	} else {
+		if (trans_support & CFGTBL_Trans_io_accel2) {
+			access = SA5_ioaccel_mode2_access;
+			writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+			writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+		}
+	}
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	hpsa_wait_for_mode_change_ack(h);
 	register_value = readl(&(h->cfgtable->TransportActive));
@@ -5162,23 +7304,186 @@
 		return;
 	}
 	/* Change the access methods to the performant access methods */
-	h->access = SA5_performant_access;
-	h->transMethod = CFGTBL_Trans_Performant;
+	h->access = access;
+	h->transMethod = transMethod;
+
+	if (!((trans_support & CFGTBL_Trans_io_accel1) ||
+		(trans_support & CFGTBL_Trans_io_accel2)))
+		return;
+
+	if (trans_support & CFGTBL_Trans_io_accel1) {
+		/* Set up I/O accelerator mode */
+		for (i = 0; i < h->nreply_queues; i++) {
+			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
+			h->reply_queue[i].current_entry =
+				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
+		}
+		bft[7] = h->ioaccel_maxsg + 8;
+		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
+				h->ioaccel1_blockFetchTable);
+
+		/* initialize all reply queue entries to unused */
+		memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
+				h->reply_pool_size);
+
+		/* set all the constant fields in the accelerator command
+		 * frames once at init time to save CPU cycles later.
+		 */
+		for (i = 0; i < h->nr_cmds; i++) {
+			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
+
+			cp->function = IOACCEL1_FUNCTION_SCSIIO;
+			cp->err_info = (u32) (h->errinfo_pool_dhandle +
+					(i * sizeof(struct ErrorInfo)));
+			cp->err_info_len = sizeof(struct ErrorInfo);
+			cp->sgl_offset = IOACCEL1_SGLOFFSET;
+			cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+			cp->timeout_sec = 0;
+			cp->ReplyQueue = 0;
+			cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
+						DIRECT_LOOKUP_BIT;
+			cp->Tag.upper = 0;
+			cp->host_addr.lower =
+				(u32) (h->ioaccel_cmd_pool_dhandle +
+					(i * sizeof(struct io_accel1_cmd)));
+			cp->host_addr.upper = 0;
+		}
+	} else if (trans_support & CFGTBL_Trans_io_accel2) {
+		u64 cfg_offset, cfg_base_addr_index;
+		u32 bft2_offset, cfg_base_addr;
+		int rc;
+
+		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+			&cfg_base_addr_index, &cfg_offset);
+		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
+		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
+		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
+				4, h->ioaccel2_blockFetchTable);
+		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
+		BUILD_BUG_ON(offsetof(struct CfgTable,
+				io_accel_request_size_offset) != 0xb8);
+		h->ioaccel2_bft2_regs =
+			remap_pci_mem(pci_resource_start(h->pdev,
+					cfg_base_addr_index) +
+					cfg_offset + bft2_offset,
+					ARRAY_SIZE(bft2) *
+					sizeof(*h->ioaccel2_bft2_regs));
+		for (i = 0; i < ARRAY_SIZE(bft2); i++)
+			writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
+	}
+	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+	hpsa_wait_for_mode_change_ack(h);
+}
+
+static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
+{
+	h->ioaccel_maxsg =
+		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
+		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
+
+	/* Command structures must be aligned on a 128-byte boundary
+	 * because the 7 lower bits of the address are used by the
+	 * hardware.
+	 */
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
+	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
+			IOACCEL1_COMMANDLIST_ALIGNMENT);
+	h->ioaccel_cmd_pool =
+		pci_alloc_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+			&(h->ioaccel_cmd_pool_dhandle));
+
+	h->ioaccel1_blockFetchTable =
+		kmalloc(((h->ioaccel_maxsg + 1) *
+				sizeof(u32)), GFP_KERNEL);
+
+	if ((h->ioaccel_cmd_pool == NULL) ||
+		(h->ioaccel1_blockFetchTable == NULL))
+		goto clean_up;
+
+	memset(h->ioaccel_cmd_pool, 0,
+		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
+	return 0;
+
+clean_up:
+	if (h->ioaccel_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
+	kfree(h->ioaccel1_blockFetchTable);
+	return 1;
+}
+
+static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
+{
+	/* Allocate ioaccel2 mode command blocks and block fetch table */
+
+	h->ioaccel_maxsg =
+		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
+		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
+
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
+	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
+			IOACCEL2_COMMANDLIST_ALIGNMENT);
+	h->ioaccel2_cmd_pool =
+		pci_alloc_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+			&(h->ioaccel2_cmd_pool_dhandle));
+
+	h->ioaccel2_blockFetchTable =
+		kmalloc(((h->ioaccel_maxsg + 1) *
+				sizeof(u32)), GFP_KERNEL);
+
+	if ((h->ioaccel2_cmd_pool == NULL) ||
+		(h->ioaccel2_blockFetchTable == NULL))
+		goto clean_up;
+
+	memset(h->ioaccel2_cmd_pool, 0,
+		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
+	return 0;
+
+clean_up:
+	if (h->ioaccel2_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
+	kfree(h->ioaccel2_blockFetchTable);
+	return 1;
 }
 
 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 {
 	u32 trans_support;
+	unsigned long transMethod = CFGTBL_Trans_Performant |
+					CFGTBL_Trans_use_short_tags;
 	int i;
 
 	if (hpsa_simple_mode)
 		return;
 
+	/* Check for I/O accelerator mode support */
+	if (trans_support & CFGTBL_Trans_io_accel1) {
+		transMethod |= CFGTBL_Trans_io_accel1 |
+				CFGTBL_Trans_enable_directed_msix;
+		if (hpsa_alloc_ioaccel_cmd_and_bft(h))
+			goto clean_up;
+	} else {
+		if (trans_support & CFGTBL_Trans_io_accel2) {
+				transMethod |= CFGTBL_Trans_io_accel2 |
+				CFGTBL_Trans_enable_directed_msix;
+		if (ioaccel2_alloc_cmds_and_bft(h))
+			goto clean_up;
+		}
+	}
+
+	/* TODO, check that this next line h->nreply_queues is correct */
 	trans_support = readl(&(h->cfgtable->TransportSupport));
 	if (!(trans_support & PERFORMANT_MODE))
 		return;
 
-	h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
+	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
 	hpsa_get_max_perf_mode_cmds(h);
 	/* Performant mode ring buffer and supporting data structures */
 	h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
@@ -5200,9 +7505,7 @@
 		|| (h->blockFetchTable == NULL))
 		goto clean_up;
 
-	hpsa_enter_performant_mode(h,
-		trans_support & CFGTBL_Trans_use_short_tags);
-
+	hpsa_enter_performant_mode(h, trans_support);
 	return;
 
 clean_up:
@@ -5212,6 +7515,31 @@
 	kfree(h->blockFetchTable);
 }
 
+static int is_accelerated_cmd(struct CommandList *c)
+{
+	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
+}
+
+static void hpsa_drain_accel_commands(struct ctlr_info *h)
+{
+	struct CommandList *c = NULL;
+	unsigned long flags;
+	int accel_cmds_out;
+
+	do { /* wait for all outstanding commands to drain out */
+		accel_cmds_out = 0;
+		spin_lock_irqsave(&h->lock, flags);
+		list_for_each_entry(c, &h->cmpQ, list)
+			accel_cmds_out += is_accelerated_cmd(c);
+		list_for_each_entry(c, &h->reqQ, list)
+			accel_cmds_out += is_accelerated_cmd(c);
+		spin_unlock_irqrestore(&h->lock, flags);
+		if (accel_cmds_out <= 0)
+			break;
+		msleep(100);
+	} while (1);
+}
+
 /*
  *  This is it.  Register the PCI driver information for the cards we control
  *  the OS will call our registered routines when it finds one of our cards.
@@ -5226,5 +7554,83 @@
 	pci_unregister_driver(&hpsa_pci_driver);
 }
 
+static void __attribute__((unused)) verify_offsets(void)
+{
+#define VERIFY_OFFSET(member, offset) \
+	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
+
+	VERIFY_OFFSET(structure_size, 0);
+	VERIFY_OFFSET(volume_blk_size, 4);
+	VERIFY_OFFSET(volume_blk_cnt, 8);
+	VERIFY_OFFSET(phys_blk_shift, 16);
+	VERIFY_OFFSET(parity_rotation_shift, 17);
+	VERIFY_OFFSET(strip_size, 18);
+	VERIFY_OFFSET(disk_starting_blk, 20);
+	VERIFY_OFFSET(disk_blk_cnt, 28);
+	VERIFY_OFFSET(data_disks_per_row, 36);
+	VERIFY_OFFSET(metadata_disks_per_row, 38);
+	VERIFY_OFFSET(row_cnt, 40);
+	VERIFY_OFFSET(layout_map_count, 42);
+	VERIFY_OFFSET(flags, 44);
+	VERIFY_OFFSET(dekindex, 46);
+	/* VERIFY_OFFSET(reserved, 48 */
+	VERIFY_OFFSET(data, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
+
+	VERIFY_OFFSET(IU_type, 0);
+	VERIFY_OFFSET(direction, 1);
+	VERIFY_OFFSET(reply_queue, 2);
+	/* VERIFY_OFFSET(reserved1, 3);  */
+	VERIFY_OFFSET(scsi_nexus, 4);
+	VERIFY_OFFSET(Tag, 8);
+	VERIFY_OFFSET(cdb, 16);
+	VERIFY_OFFSET(cciss_lun, 32);
+	VERIFY_OFFSET(data_len, 40);
+	VERIFY_OFFSET(cmd_priority_task_attr, 44);
+	VERIFY_OFFSET(sg_count, 45);
+	/* VERIFY_OFFSET(reserved3 */
+	VERIFY_OFFSET(err_ptr, 48);
+	VERIFY_OFFSET(err_len, 56);
+	/* VERIFY_OFFSET(reserved4  */
+	VERIFY_OFFSET(sg, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
+
+	VERIFY_OFFSET(dev_handle, 0x00);
+	VERIFY_OFFSET(reserved1, 0x02);
+	VERIFY_OFFSET(function, 0x03);
+	VERIFY_OFFSET(reserved2, 0x04);
+	VERIFY_OFFSET(err_info, 0x0C);
+	VERIFY_OFFSET(reserved3, 0x10);
+	VERIFY_OFFSET(err_info_len, 0x12);
+	VERIFY_OFFSET(reserved4, 0x13);
+	VERIFY_OFFSET(sgl_offset, 0x14);
+	VERIFY_OFFSET(reserved5, 0x15);
+	VERIFY_OFFSET(transfer_len, 0x1C);
+	VERIFY_OFFSET(reserved6, 0x20);
+	VERIFY_OFFSET(io_flags, 0x24);
+	VERIFY_OFFSET(reserved7, 0x26);
+	VERIFY_OFFSET(LUN, 0x34);
+	VERIFY_OFFSET(control, 0x3C);
+	VERIFY_OFFSET(CDB, 0x40);
+	VERIFY_OFFSET(reserved8, 0x50);
+	VERIFY_OFFSET(host_context_flags, 0x60);
+	VERIFY_OFFSET(timeout_sec, 0x62);
+	VERIFY_OFFSET(ReplyQueue, 0x64);
+	VERIFY_OFFSET(reserved9, 0x65);
+	VERIFY_OFFSET(Tag, 0x68);
+	VERIFY_OFFSET(host_addr, 0x70);
+	VERIFY_OFFSET(CISS_LUN, 0x78);
+	VERIFY_OFFSET(SG, 0x78 + 8);
+#undef VERIFY_OFFSET
+}
+
 module_init(hpsa_init);
 module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 01c3283..44235a2 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,6 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -46,6 +46,15 @@
 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
+	unsigned char volume_offline;	/* discovered via TUR or VPD */
+	u32 ioaccel_handle;
+	int offload_config;		/* I/O accel RAID offload configured */
+	int offload_enabled;		/* I/O accel RAID offload enabled */
+	int offload_to_mirror;		/* Send next I/O accelerator RAID
+					 * offload request to mirror drive
+					 */
+	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
+
 };
 
 struct reply_pool {
@@ -55,6 +64,46 @@
 	u32 current_entry;
 };
 
+#pragma pack(1)
+struct bmic_controller_parameters {
+	u8   led_flags;
+	u8   enable_command_list_verification;
+	u8   backed_out_write_drives;
+	u16  stripes_for_parity;
+	u8   parity_distribution_mode_flags;
+	u16  max_driver_requests;
+	u16  elevator_trend_count;
+	u8   disable_elevator;
+	u8   force_scan_complete;
+	u8   scsi_transfer_mode;
+	u8   force_narrow;
+	u8   rebuild_priority;
+	u8   expand_priority;
+	u8   host_sdb_asic_fix;
+	u8   pdpi_burst_from_host_disabled;
+	char software_name[64];
+	char hardware_name[32];
+	u8   bridge_revision;
+	u8   snapshot_priority;
+	u32  os_specific;
+	u8   post_prompt_timeout;
+	u8   automatic_drive_slamming;
+	u8   reserved1;
+	u8   nvram_flags;
+	u8   cache_nvram_flags;
+	u8   drive_config_flags;
+	u16  reserved2;
+	u8   temp_warning_level;
+	u8   temp_shutdown_level;
+	u8   temp_condition_reset;
+	u8   max_coalesce_commands;
+	u32  max_coalesce_delay;
+	u8   orca_password[4];
+	u8   access_id[16];
+	u8   reserved[356];
+};
+#pragma pack()
+
 struct ctlr_info {
 	int	ctlr;
 	char	devname[8];
@@ -80,6 +129,7 @@
 	unsigned int msi_vector;
 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
 	struct access_method access;
+	char hba_mode_enabled;
 
 	/* queue and queue Info */
 	struct list_head reqQ;
@@ -95,6 +145,10 @@
 	/* pointers to command and error info pool */
 	struct CommandList 	*cmd_pool;
 	dma_addr_t		cmd_pool_dhandle;
+	struct io_accel1_cmd	*ioaccel_cmd_pool;
+	dma_addr_t		ioaccel_cmd_pool_dhandle;
+	struct io_accel2_cmd	*ioaccel2_cmd_pool;
+	dma_addr_t		ioaccel2_cmd_pool_dhandle;
 	struct ErrorInfo 	*errinfo_pool;
 	dma_addr_t		errinfo_pool_dhandle;
 	unsigned long  		*cmd_pool_bits;
@@ -128,7 +182,14 @@
 	u8 nreply_queues;
 	dma_addr_t reply_pool_dhandle;
 	u32 *blockFetchTable;
+	u32 *ioaccel1_blockFetchTable;
+	u32 *ioaccel2_blockFetchTable;
+	u32 *ioaccel2_bft2_regs;
 	unsigned char *hba_inquiry_data;
+	u32 driver_support;
+	u32 fw_support;
+	int ioaccel_support;
+	int ioaccel_maxsg;
 	u64 last_intr_timestamp;
 	u32 last_heartbeat;
 	u64 last_heartbeat_timestamp;
@@ -161,7 +222,35 @@
 #define HPSATMF_LOG_QRY_TASK    (1 << 23)
 #define HPSATMF_LOG_QRY_TSET    (1 << 24)
 #define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
+	u32 events;
+#define CTLR_STATE_CHANGE_EVENT				(1 << 0)
+#define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
+#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
+#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
+#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
+#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
+#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
+
+#define RESCAN_REQUIRED_EVENT_BITS \
+		(CTLR_STATE_CHANGE_EVENT | \
+		CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
+		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
+		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
+		CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
+		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
+		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
+	spinlock_t offline_device_lock;
+	struct list_head offline_device_list;
+	int	acciopath_status;
+	int	drv_req_rescan;	/* flag for driver to request rescan event */
+	int	raid_offload_debug;
 };
+
+struct offline_device_entry {
+	unsigned char scsi3addr[8];
+	struct list_head offline_list;
+};
+
 #define HPSA_ABORT_MSG 0
 #define HPSA_DEVICE_RESET_MSG 1
 #define HPSA_RESET_TYPE_CONTROLLER 0x00
@@ -242,6 +331,14 @@
 
 #define HPSA_INTR_ON 	1
 #define HPSA_INTR_OFF	0
+
+/*
+ * Inbound Post Queue offsets for IO Accelerator Mode 2
+ */
+#define IOACCEL2_INBOUND_POSTQ_32	0x48
+#define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
+#define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
+
 /*
 	Send the command to the hardware
 */
@@ -254,6 +351,18 @@
 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
 }
 
+static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
+		c->Header.Tag.lower);
+	if (c->cmd_type == CMD_IOACCEL2)
+		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
+	else
+		writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+}
+
 /*
  *  This card is the opposite of the other cards.
  *   0 turns interrupts on...
@@ -387,6 +496,50 @@
 	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
 }
 
+#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
+
+static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
+{
+	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
+		true : false;
+}
+
+#define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
+#define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
+#define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
+#define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
+
+static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+{
+	u64 register_value;
+	struct reply_pool *rq = &h->reply_queue[q];
+	unsigned long flags;
+
+	BUG_ON(q >= h->nreply_queues);
+
+	register_value = rq->head[rq->current_entry];
+	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
+		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
+		if (++rq->current_entry == rq->size)
+			rq->current_entry = 0;
+		/*
+		 * @todo
+		 *
+		 * Don't really need to write the new index after each command,
+		 * but with current driver design this is easiest.
+		 */
+		wmb();
+		writel((q << 24) | rq->current_entry, h->vaddr +
+				IOACCEL_MODE1_CONSUMER_INDEX);
+		spin_lock_irqsave(&h->lock, flags);
+		h->commands_outstanding--;
+		spin_unlock_irqrestore(&h->lock, flags);
+	}
+	return (unsigned long) register_value;
+}
+
 static struct access_method SA5_access = {
 	SA5_submit_command,
 	SA5_intr_mask,
@@ -395,6 +548,22 @@
 	SA5_completed,
 };
 
+static struct access_method SA5_ioaccel_mode1_access = {
+	SA5_submit_command,
+	SA5_performant_intr_mask,
+	SA5_fifo_full,
+	SA5_ioaccel_mode1_intr_pending,
+	SA5_ioaccel_mode1_completed,
+};
+
+static struct access_method SA5_ioaccel_mode2_access = {
+	SA5_submit_command_ioaccel2,
+	SA5_performant_intr_mask,
+	SA5_fifo_full,
+	SA5_performant_intr_pending,
+	SA5_performant_completed,
+};
+
 static struct access_method SA5_performant_access = {
 	SA5_submit_command,
 	SA5_performant_intr_mask,
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index bfc8c4e..b5cc705 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,6 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
 #define SENSEINFOBYTES          32 /* may vary between hbas */
 #define SG_ENTRIES_IN_CMD	32 /* Max SG entries excluding chain blocks */
 #define HPSA_SG_CHAIN		0x80000000
+#define HPSA_SG_LAST		0x40000000
 #define MAXREPLYQS              256
 
 /* Command Status value */
@@ -41,6 +42,8 @@
 #define CMD_UNSOLICITED_ABORT   0x000A
 #define CMD_TIMEOUT             0x000B
 #define CMD_UNABORTABLE		0x000C
+#define CMD_IOACCEL_DISABLED	0x000E
+
 
 /* Unit Attentions ASC's as defined for the MSA2012sa */
 #define POWER_OR_RESET			0x29
@@ -79,8 +82,9 @@
 #define ATTR_ACA                0x07
 
 /* cdb type */
-#define TYPE_CMD				0x00
-#define TYPE_MSG				0x01
+#define TYPE_CMD		0x00
+#define TYPE_MSG		0x01
+#define TYPE_IOACCEL2_CMD	0x81 /* 0x81 is not used by hardware */
 
 /* Message Types  */
 #define HPSA_TASK_MANAGEMENT    0x00
@@ -125,9 +129,12 @@
 #define CFGTBL_AccCmds          0x00000001l
 #define DOORBELL_CTLR_RESET	0x00000004l
 #define DOORBELL_CTLR_RESET2	0x00000020l
+#define DOORBELL_CLEAR_EVENTS	0x00000040l
 
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_io_accel1	0x00000080l
+#define CFGTBL_Trans_io_accel2	0x00000100l
 #define CFGTBL_Trans_use_short_tags 0x20000000l
 #define CFGTBL_Trans_enable_directed_msix (1 << 30)
 
@@ -135,6 +142,28 @@
 #define CFGTBL_BusType_Ultra3   0x00000002l
 #define CFGTBL_BusType_Fibre1G  0x00000100l
 #define CFGTBL_BusType_Fibre2G  0x00000200l
+
+/* VPD Inquiry types */
+#define HPSA_VPD_SUPPORTED_PAGES        0x00
+#define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
+#define HPSA_VPD_LV_IOACCEL_STATUS      0xC2
+#define HPSA_VPD_LV_STATUS		0xC3
+#define HPSA_VPD_HEADER_SZ              4
+
+/* Logical volume states */
+#define HPSA_VPD_LV_STATUS_UNSUPPORTED			-1
+#define HPSA_LV_OK                                      0x0
+#define HPSA_LV_UNDERGOING_ERASE			0x0F
+#define HPSA_LV_UNDERGOING_RPI				0x12
+#define HPSA_LV_PENDING_RPI				0x13
+#define HPSA_LV_ENCRYPTED_NO_KEY			0x14
+#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER	0x15
+#define HPSA_LV_UNDERGOING_ENCRYPTION			0x16
+#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING		0x17
+#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER	0x18
+#define HPSA_LV_PENDING_ENCRYPTION			0x19
+#define HPSA_LV_PENDING_ENCRYPTION_REKEYING		0x1A
+
 struct vals32 {
 	u32   lower;
 	u32   upper;
@@ -162,9 +191,50 @@
 
 #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
 #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+#define HPSA_REPORT_PHYS_EXTENDED 0x02
+#define HPSA_CISS_READ	0xc0	/* CISS Read */
+#define HPSA_GET_RAID_MAP 0xc8	/* CISS Get RAID Layout Map */
+
+#define RAID_MAP_MAX_ENTRIES   256
+
+struct raid_map_disk_data {
+	u32   ioaccel_handle;         /**< Handle to access this disk via the
+					*  I/O accelerator */
+	u8    xor_mult[2];            /**< XOR multipliers for this position,
+					*  valid for data disks only */
+	u8    reserved[2];
+};
+
+struct raid_map_data {
+	u32   structure_size;		/* Size of entire structure in bytes */
+	u32   volume_blk_size;		/* bytes / block in the volume */
+	u64   volume_blk_cnt;		/* logical blocks on the volume */
+	u8    phys_blk_shift;		/* Shift factor to convert between
+					 * units of logical blocks and physical
+					 * disk blocks */
+	u8    parity_rotation_shift;	/* Shift factor to convert between units
+					 * of logical stripes and physical
+					 * stripes */
+	u16   strip_size;		/* blocks used on each disk / stripe */
+	u64   disk_starting_blk;	/* First disk block used in volume */
+	u64   disk_blk_cnt;		/* disk blocks used by volume / disk */
+	u16   data_disks_per_row;	/* data disk entries / row in the map */
+	u16   metadata_disks_per_row;	/* mirror/parity disk entries / row
+					 * in the map */
+	u16   row_cnt;			/* rows in each layout map */
+	u16   layout_map_count;		/* layout maps (1 map per mirror/parity
+					 * group) */
+	u16   flags;			/* Bit 0 set if encryption enabled */
+#define RAID_MAP_FLAG_ENCRYPT_ON  0x01
+	u16   dekindex;			/* Data encryption key index. */
+	u8    reserved[16];
+	struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
+};
+
 struct ReportLUNdata {
 	u8 LUNListLength[4];
-	u32 reserved;
+	u8 extended_response_flag;
+	u8 reserved[3];
 	u8 LUN[HPSA_MAX_LUN][8];
 };
 
@@ -187,6 +257,7 @@
 #define BMIC_CACHE_FLUSH 0xc2
 #define HPSA_CACHE_FLUSH 0x01	/* C2 was already being used by HPSA */
 #define BMIC_FLASH_FIRMWARE 0xF7
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
 
 /* Command List Structure */
 union SCSI3Addr {
@@ -283,6 +354,8 @@
 /* Command types */
 #define CMD_IOCTL_PEND  0x01
 #define CMD_SCSI	0x03
+#define CMD_IOACCEL1	0x04
+#define CMD_IOACCEL2	0x05
 
 #define DIRECT_LOOKUP_SHIFT 5
 #define DIRECT_LOOKUP_BIT 0x10
@@ -314,7 +387,6 @@
 	int			   cmd_type;
 	long			   cmdindex;
 	struct list_head list;
-	struct request *rq;
 	struct completion *waiting;
 	void   *scsi_cmd;
 
@@ -327,16 +399,183 @@
  */
 #define IS_32_BIT ((8 - sizeof(long))/4)
 #define IS_64_BIT (!IS_32_BIT)
-#define PAD_32 (4)
-#define PAD_64 (4)
+#define PAD_32 (40)
+#define PAD_64 (12)
 #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
 	u8 pad[COMMANDLIST_PAD];
 };
 
+/* Max S/G elements in I/O accelerator command */
+#define IOACCEL1_MAXSGENTRIES           24
+#define IOACCEL2_MAXSGENTRIES		28
+
+/*
+ * Structure for I/O accelerator (mode 1) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel1_cmd {
+	u16 dev_handle;			/* 0x00 - 0x01 */
+	u8  reserved1;			/* 0x02 */
+	u8  function;			/* 0x03 */
+	u8  reserved2[8];		/* 0x04 - 0x0B */
+	u32 err_info;			/* 0x0C - 0x0F */
+	u8  reserved3[2];		/* 0x10 - 0x11 */
+	u8  err_info_len;		/* 0x12 */
+	u8  reserved4;			/* 0x13 */
+	u8  sgl_offset;			/* 0x14 */
+	u8  reserved5[7];		/* 0x15 - 0x1B */
+	u32 transfer_len;		/* 0x1C - 0x1F */
+	u8  reserved6[4];		/* 0x20 - 0x23 */
+	u16 io_flags;			/* 0x24 - 0x25 */
+	u8  reserved7[14];		/* 0x26 - 0x33 */
+	u8  LUN[8];			/* 0x34 - 0x3B */
+	u32 control;			/* 0x3C - 0x3F */
+	u8  CDB[16];			/* 0x40 - 0x4F */
+	u8  reserved8[16];		/* 0x50 - 0x5F */
+	u16 host_context_flags;		/* 0x60 - 0x61 */
+	u16 timeout_sec;		/* 0x62 - 0x63 */
+	u8  ReplyQueue;			/* 0x64 */
+	u8  reserved9[3];		/* 0x65 - 0x67 */
+	struct vals32 Tag;		/* 0x68 - 0x6F */
+	struct vals32 host_addr;	/* 0x70 - 0x77 */
+	u8  CISS_LUN[8];		/* 0x78 - 0x7F */
+	struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
+#define IOACCEL1_PAD_64 0
+#define IOACCEL1_PAD_32 0
+#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
+			IS_64_BIT * IOACCEL1_PAD_64)
+	u8 pad[IOACCEL1_PAD];
+};
+
+#define IOACCEL1_FUNCTION_SCSIIO        0x00
+#define IOACCEL1_SGLOFFSET              32
+
+#define IOACCEL1_IOFLAGS_IO_REQ         0x4000
+#define IOACCEL1_IOFLAGS_CDBLEN_MASK    0x001F
+#define IOACCEL1_IOFLAGS_CDBLEN_MAX     16
+
+#define IOACCEL1_CONTROL_NODATAXFER     0x00000000
+#define IOACCEL1_CONTROL_DATA_OUT       0x01000000
+#define IOACCEL1_CONTROL_DATA_IN        0x02000000
+#define IOACCEL1_CONTROL_TASKPRIO_MASK  0x00007800
+#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11
+#define IOACCEL1_CONTROL_SIMPLEQUEUE    0x00000000
+#define IOACCEL1_CONTROL_HEADOFQUEUE    0x00000100
+#define IOACCEL1_CONTROL_ORDEREDQUEUE   0x00000200
+#define IOACCEL1_CONTROL_ACA            0x00000400
+
+#define IOACCEL1_HCFLAGS_CISS_FORMAT    0x0013
+
+#define IOACCEL1_BUSADDR_CMDTYPE        0x00000060
+
+struct ioaccel2_sg_element {
+	u64 address;
+	u32 length;
+	u8 reserved[3];
+	u8 chain_indicator;
+#define IOACCEL2_CHAIN 0x80
+};
+
+/*
+ * SCSI Response Format structure for IO Accelerator Mode 2
+ */
+struct io_accel2_scsi_response {
+	u8 IU_type;
+#define IOACCEL2_IU_TYPE_SRF			0x60
+	u8 reserved1[3];
+	u8 req_id[4];		/* request identifier */
+	u8 reserved2[4];
+	u8 serv_response;		/* service response */
+#define IOACCEL2_SERV_RESPONSE_COMPLETE		0x000
+#define IOACCEL2_SERV_RESPONSE_FAILURE		0x001
+#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE	0x002
+#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS	0x003
+#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED	0x004
+#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN	0x005
+	u8 status;			/* status */
+#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD	0x00
+#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND	0x02
+#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY	0x08
+#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON	0x18
+#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL	0x28
+#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED	0x40
+#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED	0x0E
+	u8 data_present;		/* low 2 bits */
+#define IOACCEL2_NO_DATAPRESENT		0x000
+#define IOACCEL2_RESPONSE_DATAPRESENT	0x001
+#define IOACCEL2_SENSE_DATA_PRESENT	0x002
+#define IOACCEL2_RESERVED		0x003
+	u8 sense_data_len;		/* sense/response data length */
+	u8 resid_cnt[4];		/* residual count */
+	u8 sense_data_buff[32];		/* sense/response data buffer */
+};
+
+#define IOACCEL2_64_PAD 76
+#define IOACCEL2_32_PAD 76
+#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
+			IS_64_BIT * IOACCEL2_64_PAD)
+/*
+ * Structure for I/O accelerator (mode 2 or m2) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel2_cmd {
+	u8  IU_type;			/* IU Type */
+	u8  direction;			/* direction, memtype, and encryption */
+#define IOACCEL2_DIRECTION_MASK		0x03 /* bits 0,1: direction  */
+#define IOACCEL2_DIRECTION_MEMTYPE_MASK	0x04 /* bit 2: memtype source/dest */
+					     /*     0b=PCIe, 1b=DDR */
+#define IOACCEL2_DIRECTION_ENCRYPT_MASK	0x08 /* bit 3: encryption flag */
+					     /*     0=off, 1=on */
+	u8  reply_queue;		/* Reply Queue ID */
+	u8  reserved1;			/* Reserved */
+	u32 scsi_nexus;			/* Device Handle */
+	u32 Tag;			/* cciss tag, lower 4 bytes only */
+	u32 tweak_lower;		/* Encryption tweak, lower 4 bytes */
+	u8  cdb[16];			/* SCSI Command Descriptor Block */
+	u8  cciss_lun[8];		/* 8 byte SCSI address */
+	u32 data_len;			/* Total bytes to transfer */
+	u8  cmd_priority_task_attr;	/* priority and task attrs */
+#define IOACCEL2_PRIORITY_MASK 0x78
+#define IOACCEL2_ATTR_MASK 0x07
+	u8  sg_count;			/* Number of sg elements */
+	u16 dekindex;			/* Data encryption key index */
+	u64 err_ptr;			/* Error Pointer */
+	u32 err_len;			/* Error Length*/
+	u32 tweak_upper;		/* Encryption tweak, upper 4 bytes */
+	struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
+	struct io_accel2_scsi_response error_data;
+	u8 pad[IOACCEL2_PAD];
+};
+
+/*
+ * defines for Mode 2 command struct
+ * FIXME: this can't be all I need mfm
+ */
+#define IOACCEL2_IU_TYPE	0x40
+#define IOACCEL2_IU_TMF_TYPE	0x41
+#define IOACCEL2_DIR_NO_DATA	0x00
+#define IOACCEL2_DIR_DATA_IN	0x01
+#define IOACCEL2_DIR_DATA_OUT	0x02
+/*
+ * SCSI Task Management Request format for Accelerator Mode 2
+ */
+struct hpsa_tmf_struct {
+	u8 iu_type;		/* Information Unit Type */
+	u8 reply_queue;		/* Reply Queue ID */
+	u8 tmf;			/* Task Management Function */
+	u8 reserved1;		/* byte 3 Reserved */
+	u32 it_nexus;		/* SCSI I-T Nexus */
+	u8 lun_id[8];		/* LUN ID for TMF request */
+	struct vals32 Tag;	/* cciss tag associated w/ request */
+	struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */
+	u64 error_ptr;		/* Error Pointer */
+	u32 error_len;		/* Error Length */
+};
+
 /* Configuration Table Structure */
 struct HostWrite {
 	u32 TransportRequest;
-	u32 Reserved;
+	u32 command_pool_addr_hi;
 	u32 CoalIntDelay;
 	u32 CoalIntCount;
 };
@@ -344,6 +583,9 @@
 #define SIMPLE_MODE     0x02
 #define PERFORMANT_MODE 0x04
 #define MEMQ_MODE       0x08
+#define IOACCEL_MODE_1  0x80
+
+#define DRIVER_SUPPORT_UA_ENABLE        0x00000001
 
 struct CfgTable {
 	u8            Signature[4];
@@ -373,8 +615,18 @@
 	u32		misc_fw_support; /* offset 0x78 */
 #define			MISC_FW_DOORBELL_RESET (0x02)
 #define			MISC_FW_DOORBELL_RESET2 (0x010)
+#define			MISC_FW_RAID_OFFLOAD_BASIC (0x020)
+#define			MISC_FW_EVENT_NOTIFY (0x080)
 	u8		driver_version[32];
-
+	u32             max_cached_write_size;
+	u8              driver_scratchpad[16];
+	u32             max_error_info_length;
+	u32		io_accel_max_embedded_sg_count;
+	u32		io_accel_request_size_offset;
+	u32		event_notify;
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+	u32		clear_event_notify;
 };
 
 #define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bf9eca8..56f8a86 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -589,7 +589,7 @@
 	}
 
 	err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
-			  IRQF_DISABLED, "ibmvstgt", target);
+			  0, "ibmvstgt", target);
 	if (err)
 		goto req_irq_failed;
 
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index bf02821..b1c4d83 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2015,7 +2015,7 @@
 		write1_io(0, IO_FIFO_READ);	/* start fifo out in read mode */
 		write1_io(0, IO_INTR_MASK);	/* allow all ints */
 		x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
-		if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
+		if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
 			printk("in2000_detect: Unable to allocate IRQ.\n");
 			detect_count--;
 			continue;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 280d5af..e5dae7b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2931,7 +2931,7 @@
 	shost->base = host->addr;
 	shost->sg_tablesize = TOTAL_SG_ENTRY;
 
-	error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
+	error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);
 	if (error < 0) {
 		printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
 		goto out_free_scbs;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3f5b56a..2f8dd8e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1143,6 +1143,7 @@
 	res->add_to_ml = 0;
 	res->del_from_ml = 0;
 	res->resetting_device = 0;
+	res->reset_occurred = 0;
 	res->sdev = NULL;
 	res->sata_port = NULL;
 
@@ -2367,6 +2368,42 @@
 }
 
 /**
+ * ipr_log_sis64_device_error - Log a cache error.
+ * @ioa_cfg:	ioa config struct
+ * @hostrcb:	hostrcb struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
+					 struct ipr_hostrcb *hostrcb)
+{
+	struct ipr_hostrcb_type_21_error *error;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+	error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+	ipr_err("-----Failing Device Information-----\n");
+	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
+		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
+		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
+	ipr_err("Device Resource Path: %s\n",
+		__ipr_format_res_path(error->res_path,
+				      buffer, sizeof(buffer)));
+	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
+	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
+	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
+	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
+	ipr_err("SCSI Sense Data:\n");
+	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
+	ipr_err("SCSI Command Descriptor Block: \n");
+	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
+
+	ipr_err("Additional IOA Data:\n");
+	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
+}
+
+/**
  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
  * @ioasc:	IOASC
  *
@@ -2467,6 +2504,9 @@
 	case IPR_HOST_RCB_OVERLAY_ID_20:
 		ipr_log_fabric_error(ioa_cfg, hostrcb);
 		break;
+	case IPR_HOST_RCB_OVERLAY_ID_21:
+		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
+		break;
 	case IPR_HOST_RCB_OVERLAY_ID_23:
 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
 		break;
@@ -5015,6 +5055,7 @@
 	} else
 		rc = ipr_device_reset(ioa_cfg, res);
 	res->resetting_device = 0;
+	res->reset_occurred = 1;
 
 	LEAVE;
 	return rc ? FAILED : SUCCESS;
@@ -6183,8 +6224,10 @@
 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 
 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
-		if (ipr_is_gscsi(res))
+		if (ipr_is_gscsi(res) && res->reset_occurred) {
+			res->reset_occurred = 0;
 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
+		}
 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
 	}
@@ -8641,6 +8684,25 @@
 }
 
 /**
+ * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
+ * @pdev:	PCI device struct
+ *
+ * Description: This routine is called to tell us that the MMIO
+ * access to the IOA has been restored
+ */
+static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	unsigned long flags = 0;
+	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+	if (!ioa_cfg->probe_done)
+		pci_save_state(pdev);
+	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
  * @pdev:	PCI device struct
  *
@@ -8654,7 +8716,8 @@
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
+	if (ioa_cfg->probe_done)
+		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
 
@@ -8672,11 +8735,14 @@
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-	if (ioa_cfg->needs_warm_reset)
-		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-	else
-		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
-					IPR_SHUTDOWN_NONE);
+	if (ioa_cfg->probe_done) {
+		if (ioa_cfg->needs_warm_reset)
+			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+		else
+			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+						IPR_SHUTDOWN_NONE);
+	} else
+		wake_up_all(&ioa_cfg->eeh_wait_q);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 	return PCI_ERS_RESULT_RECOVERED;
 }
@@ -8695,17 +8761,20 @@
 	int i;
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
-		ioa_cfg->sdt_state = ABORT_DUMP;
-	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
-	ioa_cfg->in_ioa_bringdown = 1;
-	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
-		spin_lock(&ioa_cfg->hrrq[i]._lock);
-		ioa_cfg->hrrq[i].allow_cmds = 0;
-		spin_unlock(&ioa_cfg->hrrq[i]._lock);
-	}
-	wmb();
-	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+	if (ioa_cfg->probe_done) {
+		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+			ioa_cfg->sdt_state = ABORT_DUMP;
+		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
+		ioa_cfg->in_ioa_bringdown = 1;
+		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+			spin_lock(&ioa_cfg->hrrq[i]._lock);
+			ioa_cfg->hrrq[i].allow_cmds = 0;
+			spin_unlock(&ioa_cfg->hrrq[i]._lock);
+		}
+		wmb();
+		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+	} else
+		wake_up_all(&ioa_cfg->eeh_wait_q);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
 
@@ -8725,7 +8794,7 @@
 	switch (state) {
 	case pci_channel_io_frozen:
 		ipr_pci_frozen(pdev);
-		return PCI_ERS_RESULT_NEED_RESET;
+		return PCI_ERS_RESULT_CAN_RECOVER;
 	case pci_channel_io_perm_failure:
 		ipr_pci_perm_failure(pdev);
 		return PCI_ERS_RESULT_DISCONNECT;
@@ -8755,6 +8824,7 @@
 	ENTER;
 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
+	ioa_cfg->probe_done = 1;
 	if (ioa_cfg->needs_hard_reset) {
 		ioa_cfg->needs_hard_reset = 0;
 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -9030,16 +9100,6 @@
 	if (!ioa_cfg->vpd_cbs)
 		goto out_free_res_entries;
 
-	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
-		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
-		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
-		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
-		if (i == 0)
-			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
-		else
-			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
-	}
-
 	if (ipr_alloc_cmd_blks(ioa_cfg))
 		goto out_free_vpd_cbs;
 
@@ -9140,61 +9200,18 @@
 }
 
 /**
- * ipr_init_ioa_cfg - Initialize IOA config struct
+ * ipr_init_regs - Initialize IOA registers
  * @ioa_cfg:	ioa config struct
- * @host:		scsi host struct
- * @pdev:		PCI dev struct
  *
  * Return value:
- * 	none
+ *	none
  **/
-static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
-			     struct Scsi_Host *host, struct pci_dev *pdev)
+static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
 {
 	const struct ipr_interrupt_offsets *p;
 	struct ipr_interrupts *t;
 	void __iomem *base;
 
-	ioa_cfg->host = host;
-	ioa_cfg->pdev = pdev;
-	ioa_cfg->log_level = ipr_log_level;
-	ioa_cfg->doorbell = IPR_DOORBELL;
-	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
-	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
-	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
-	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
-	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
-	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
-
-	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
-	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
-	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
-	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
-	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
-	init_waitqueue_head(&ioa_cfg->reset_wait_q);
-	init_waitqueue_head(&ioa_cfg->msi_wait_q);
-	ioa_cfg->sdt_state = INACTIVE;
-
-	ipr_initialize_bus_attr(ioa_cfg);
-	ioa_cfg->max_devs_supported = ipr_max_devs;
-
-	if (ioa_cfg->sis64) {
-		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
-		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
-		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
-			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
-	} else {
-		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
-		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
-		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
-			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
-	}
-	host->max_channel = IPR_MAX_BUS_TO_SCAN;
-	host->unique_id = host->host_no;
-	host->max_cmd_len = IPR_MAX_CDB_LEN;
-	host->can_queue = ioa_cfg->max_cmds;
-	pci_set_drvdata(pdev, ioa_cfg);
-
 	p = &ioa_cfg->chip_cfg->regs;
 	t = &ioa_cfg->regs;
 	base = ioa_cfg->hdw_dma_regs;
@@ -9225,6 +9242,79 @@
 }
 
 /**
+ * ipr_init_ioa_cfg - Initialize IOA config struct
+ * @ioa_cfg:	ioa config struct
+ * @host:		scsi host struct
+ * @pdev:		PCI dev struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+			     struct Scsi_Host *host, struct pci_dev *pdev)
+{
+	int i;
+
+	ioa_cfg->host = host;
+	ioa_cfg->pdev = pdev;
+	ioa_cfg->log_level = ipr_log_level;
+	ioa_cfg->doorbell = IPR_DOORBELL;
+	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
+	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
+	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
+	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
+	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
+	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
+
+	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
+	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
+	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
+	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+	init_waitqueue_head(&ioa_cfg->reset_wait_q);
+	init_waitqueue_head(&ioa_cfg->msi_wait_q);
+	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
+	ioa_cfg->sdt_state = INACTIVE;
+
+	ipr_initialize_bus_attr(ioa_cfg);
+	ioa_cfg->max_devs_supported = ipr_max_devs;
+
+	if (ioa_cfg->sis64) {
+		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
+		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
+		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
+			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+					   + ((sizeof(struct ipr_config_table_entry64)
+					       * ioa_cfg->max_devs_supported)));
+	} else {
+		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
+		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
+			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+					   + ((sizeof(struct ipr_config_table_entry)
+					       * ioa_cfg->max_devs_supported)));
+	}
+
+	host->max_channel = IPR_MAX_BUS_TO_SCAN;
+	host->unique_id = host->host_no;
+	host->max_cmd_len = IPR_MAX_CDB_LEN;
+	host->can_queue = ioa_cfg->max_cmds;
+	pci_set_drvdata(pdev, ioa_cfg);
+
+	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
+		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+		if (i == 0)
+			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+		else
+			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+	}
+}
+
+/**
  * ipr_get_chip_info - Find adapter chip information
  * @dev_id:		PCI device id struct
  *
@@ -9243,54 +9333,63 @@
 	return NULL;
 }
 
+/**
+ * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
+ *						during probe time
+ * @ioa_cfg:	ioa config struct
+ *
+ * Return value:
+ * 	None
+ **/
+static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
+{
+	struct pci_dev *pdev = ioa_cfg->pdev;
+
+	if (pci_channel_offline(pdev)) {
+		wait_event_timeout(ioa_cfg->eeh_wait_q,
+				   !pci_channel_offline(pdev),
+				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
+		pci_restore_state(pdev);
+	}
+}
+
 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
 {
 	struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
-	int i, err, vectors;
+	int i, vectors;
 
 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
 		entries[i].entry = i;
 
-	vectors = ipr_number_of_msix;
-
-	while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
-			vectors = err;
-
-	if (err < 0) {
-		pci_disable_msix(ioa_cfg->pdev);
-		return err;
+	vectors = pci_enable_msix_range(ioa_cfg->pdev,
+					entries, 1, ipr_number_of_msix);
+	if (vectors < 0) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		return vectors;
 	}
 
-	if (!err) {
-		for (i = 0; i < vectors; i++)
-			ioa_cfg->vectors_info[i].vec = entries[i].vector;
-		ioa_cfg->nvectors = vectors;
-	}
+	for (i = 0; i < vectors; i++)
+		ioa_cfg->vectors_info[i].vec = entries[i].vector;
+	ioa_cfg->nvectors = vectors;
 
-	return err;
+	return 0;
 }
 
 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
 {
-	int i, err, vectors;
+	int i, vectors;
 
-	vectors = ipr_number_of_msix;
-
-	while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
-			vectors = err;
-
-	if (err < 0) {
-		pci_disable_msi(ioa_cfg->pdev);
-		return err;
+	vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
+	if (vectors < 0) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		return vectors;
 	}
 
-	if (!err) {
-		for (i = 0; i < vectors; i++)
-			ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
-		ioa_cfg->nvectors = vectors;
-	}
+	for (i = 0; i < vectors; i++)
+		ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+	ioa_cfg->nvectors = vectors;
 
-	return err;
+	return 0;
 }
 
 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
@@ -9355,7 +9454,7 @@
  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
  * @pdev:		PCI device struct
  *
- * Description: The return value from pci_enable_msi() can not always be
+ * Description: The return value from pci_enable_msi_range() can not always be
  * trusted.  This routine sets up and initiates a test interrupt to determine
  * if the interrupt is received via the ipr_test_intr() service routine.
  * If the tests fails, the driver will fall back to LSI.
@@ -9434,19 +9533,13 @@
 
 	ENTER;
 
-	if ((rc = pci_enable_device(pdev))) {
-		dev_err(&pdev->dev, "Cannot enable adapter\n");
-		goto out;
-	}
-
 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
-
 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
 
 	if (!host) {
 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
 		rc = -ENOMEM;
-		goto out_disable;
+		goto out;
 	}
 
 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
@@ -9476,6 +9569,8 @@
 
 	ioa_cfg->revid = pdev->revision;
 
+	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
+
 	ipr_regs_pci = pci_resource_start(pdev, 0);
 
 	rc = pci_request_regions(pdev, IPR_NAME);
@@ -9485,22 +9580,35 @@
 		goto out_scsi_host_put;
 	}
 
+	rc = pci_enable_device(pdev);
+
+	if (rc || pci_channel_offline(pdev)) {
+		if (pci_channel_offline(pdev)) {
+			ipr_wait_for_pci_err_recovery(ioa_cfg);
+			rc = pci_enable_device(pdev);
+		}
+
+		if (rc) {
+			dev_err(&pdev->dev, "Cannot enable adapter\n");
+			ipr_wait_for_pci_err_recovery(ioa_cfg);
+			goto out_release_regions;
+		}
+	}
+
 	ipr_regs = pci_ioremap_bar(pdev, 0);
 
 	if (!ipr_regs) {
 		dev_err(&pdev->dev,
 			"Couldn't map memory range of registers\n");
 		rc = -ENOMEM;
-		goto out_release_regions;
+		goto out_disable;
 	}
 
 	ioa_cfg->hdw_dma_regs = ipr_regs;
 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
 
-	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
-
-	pci_set_master(pdev);
+	ipr_init_regs(ioa_cfg);
 
 	if (ioa_cfg->sis64) {
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -9508,7 +9616,6 @@
 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		}
-
 	} else
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 
@@ -9522,10 +9629,15 @@
 
 	if (rc != PCIBIOS_SUCCESSFUL) {
 		dev_err(&pdev->dev, "Write of cache line size failed\n");
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
 		rc = -EIO;
 		goto cleanup_nomem;
 	}
 
+	/* Issue MMIO read to ensure card is not in EEH */
+	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
+	ipr_wait_for_pci_err_recovery(ioa_cfg);
+
 	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
 		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
 			IPR_MAX_MSIX_VECTORS);
@@ -9544,10 +9656,22 @@
 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
 	}
 
+	pci_set_master(pdev);
+
+	if (pci_channel_offline(pdev)) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		pci_set_master(pdev);
+		if (pci_channel_offline(pdev)) {
+			rc = -EIO;
+			goto out_msi_disable;
+		}
+	}
+
 	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
 	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
 		rc = ipr_test_msi(ioa_cfg, pdev);
 		if (rc == -EOPNOTSUPP) {
+			ipr_wait_for_pci_err_recovery(ioa_cfg);
 			if (ioa_cfg->intr_flag == IPR_USE_MSI) {
 				ioa_cfg->intr_flag &= ~IPR_USE_MSI;
 				pci_disable_msi(pdev);
@@ -9577,30 +9701,12 @@
 				(unsigned int)num_online_cpus(),
 				(unsigned int)IPR_MAX_HRRQ_NUM);
 
-	/* Save away PCI config space for use following IOA reset */
-	rc = pci_save_state(pdev);
-
-	if (rc != PCIBIOS_SUCCESSFUL) {
-		dev_err(&pdev->dev, "Failed to save PCI config space\n");
-		rc = -EIO;
-		goto out_msi_disable;
-	}
-
 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
 		goto out_msi_disable;
 
 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
 		goto out_msi_disable;
 
-	if (ioa_cfg->sis64)
-		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
-				+ ((sizeof(struct ipr_config_table_entry64)
-				* ioa_cfg->max_devs_supported)));
-	else
-		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
-				+ ((sizeof(struct ipr_config_table_entry)
-				* ioa_cfg->max_devs_supported)));
-
 	rc = ipr_alloc_mem(ioa_cfg);
 	if (rc < 0) {
 		dev_err(&pdev->dev,
@@ -9608,6 +9714,15 @@
 		goto out_msi_disable;
 	}
 
+	/* Save away PCI config space for use following IOA reset */
+	rc = pci_save_state(pdev);
+
+	if (rc != PCIBIOS_SUCCESSFUL) {
+		dev_err(&pdev->dev, "Failed to save PCI config space\n");
+		rc = -EIO;
+		goto cleanup_nolog;
+	}
+
 	/*
 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
 	 * the card is in an unknown state and needs a hard reset
@@ -9664,18 +9779,19 @@
 cleanup_nolog:
 	ipr_free_mem(ioa_cfg);
 out_msi_disable:
+	ipr_wait_for_pci_err_recovery(ioa_cfg);
 	if (ioa_cfg->intr_flag == IPR_USE_MSI)
 		pci_disable_msi(pdev);
 	else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
 		pci_disable_msix(pdev);
 cleanup_nomem:
 	iounmap(ipr_regs);
+out_disable:
+	pci_disable_device(pdev);
 out_release_regions:
 	pci_release_regions(pdev);
 out_scsi_host_put:
 	scsi_host_put(host);
-out_disable:
-	pci_disable_device(pdev);
 	goto out;
 }
 
@@ -9994,6 +10110,8 @@
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
@@ -10005,12 +10123,19 @@
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
 
 static const struct pci_error_handlers ipr_err_handler = {
 	.error_detected = ipr_pci_error_detected,
+	.mmio_enabled = ipr_pci_mmio_enabled,
 	.slot_reset = ipr_pci_slot_reset,
 };
 
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ce38a2..31ed126 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -101,12 +101,16 @@
 #define IPR_SUBS_DEV_ID_57D7    0x03FF
 #define IPR_SUBS_DEV_ID_57D8    0x03FE
 #define IPR_SUBS_DEV_ID_57D9    0x046D
+#define IPR_SUBS_DEV_ID_57DA    0x04CA
 #define IPR_SUBS_DEV_ID_57EB    0x0474
 #define IPR_SUBS_DEV_ID_57EC    0x0475
 #define IPR_SUBS_DEV_ID_57ED    0x0499
 #define IPR_SUBS_DEV_ID_57EE    0x049A
 #define IPR_SUBS_DEV_ID_57EF    0x049B
 #define IPR_SUBS_DEV_ID_57F0    0x049C
+#define IPR_SUBS_DEV_ID_2CCA	0x04C7
+#define IPR_SUBS_DEV_ID_2CD2	0x04C8
+#define IPR_SUBS_DEV_ID_2CCD	0x04C9
 #define IPR_NAME				"ipr"
 
 /*
@@ -230,6 +234,7 @@
 #define IPR_WAIT_FOR_RESET_TIMEOUT		(2 * HZ)
 #define IPR_CHECK_FOR_RESET_TIMEOUT		(HZ / 10)
 #define IPR_WAIT_FOR_BIST_TIMEOUT		(2 * HZ)
+#define IPR_PCI_ERROR_RECOVERY_TIMEOUT	(120 * HZ)
 #define IPR_PCI_RESET_TIMEOUT			(HZ / 2)
 #define IPR_SIS32_DUMP_TIMEOUT			(15 * HZ)
 #define IPR_SIS64_DUMP_TIMEOUT			(40 * HZ)
@@ -897,6 +902,18 @@
 	__be32 ioa_data[236];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_type_21_error {
+	__be32 wwn[4];
+	u8 res_path[8];
+	u8 primary_problem_desc[32];
+	u8 second_problem_desc[32];
+	__be32 sense_data[8];
+	__be32 cdb[4];
+	__be32 residual_trans_length;
+	__be32 length_of_error;
+	__be32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_type_02_error {
 	struct ipr_vpd ioa_vpd;
 	struct ipr_vpd cfc_vpd;
@@ -1126,6 +1143,7 @@
 		struct ipr_hostrcb_type_ff_error type_ff_error;
 		struct ipr_hostrcb_type_12_error type_12_error;
 		struct ipr_hostrcb_type_17_error type_17_error;
+		struct ipr_hostrcb_type_21_error type_21_error;
 		struct ipr_hostrcb_type_23_error type_23_error;
 		struct ipr_hostrcb_type_24_error type_24_error;
 		struct ipr_hostrcb_type_30_error type_30_error;
@@ -1169,6 +1187,7 @@
 #define IPR_HOST_RCB_OVERLAY_ID_16				0x16
 #define IPR_HOST_RCB_OVERLAY_ID_17				0x17
 #define IPR_HOST_RCB_OVERLAY_ID_20				0x20
+#define IPR_HOST_RCB_OVERLAY_ID_21				0x21
 #define IPR_HOST_RCB_OVERLAY_ID_23				0x23
 #define IPR_HOST_RCB_OVERLAY_ID_24				0x24
 #define IPR_HOST_RCB_OVERLAY_ID_26				0x26
@@ -1252,6 +1271,7 @@
 	u8 add_to_ml:1;
 	u8 del_from_ml:1;
 	u8 resetting_device:1;
+	u8 reset_occurred:1;
 
 	u32 bus;		/* AKA channel */
 	u32 target;		/* AKA id */
@@ -1441,6 +1461,7 @@
 	u8 dump_timeout:1;
 	u8 cfg_locked:1;
 	u8 clear_isr:1;
+	u8 probe_done:1;
 
 	u8 revid;
 
@@ -1519,6 +1540,7 @@
 
 	wait_queue_head_t reset_wait_q;
 	wait_queue_head_t msi_wait_q;
+	wait_queue_head_t eeh_wait_q;
 
 	struct ipr_dump *dump;
 	enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d25d0d8..695b34e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -66,7 +66,7 @@
 #include "probe_roms.h"
 
 #define MAJ 1
-#define MIN 1
+#define MIN 2
 #define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 	__stringify(BUILD)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 99d2930..56e3809 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2723,13 +2723,9 @@
 	memcpy(resp->ending_fis, fis, sizeof(*fis));
 	ts->buf_valid_size = sizeof(*resp);
 
-	/* If the device fault bit is set in the status register, then
-	 * set the sense data and return.
-	 */
-	if (fis->status & ATA_DF)
+	/* If an error is flagged let libata decode the fis */
+	if (ac_err_mask(fis->status))
 		ts->stat = SAS_PROTO_RESPONSE;
-	else if (fis->status & ATA_ERR)
-		ts->stat = SAM_STAT_CHECK_CONDITION;
 	else
 		ts->stat = SAM_STAT_GOOD;
 
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 14c1c8f..680bf6f 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -490,5 +490,6 @@
 		iscsi_boot_remove_kobj(boot_kobj);
 
 	kset_unregister(boot_kset->kset);
+	kfree(boot_kset);
 }
 EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index add6d15..bfb6d07 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -593,9 +593,9 @@
 	iscsi_sw_tcp_conn_restore_callbacks(conn);
 	sock_put(sock->sk);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	tcp_sw_conn->sock = NULL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	sockfd_put(sock);
 }
 
@@ -663,10 +663,10 @@
 	if (err)
 		goto free_socket;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/* bind iSCSI connection and socket */
 	tcp_sw_conn->sock = sock;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/* setup Socket parameters */
 	sk = sock->sk;
@@ -726,14 +726,14 @@
 	switch(param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_ADDRESS:
-		spin_lock_bh(&conn->session->lock);
+		spin_lock_bh(&conn->session->frwd_lock);
 		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
-			spin_unlock_bh(&conn->session->lock);
+			spin_unlock_bh(&conn->session->frwd_lock);
 			return -ENOTCONN;
 		}
 		rc = kernel_getpeername(tcp_sw_conn->sock,
 					(struct sockaddr *)&addr, &len);
-		spin_unlock_bh(&conn->session->lock);
+		spin_unlock_bh(&conn->session->frwd_lock);
 		if (rc)
 			return rc;
 
@@ -759,23 +759,26 @@
 
 	switch (param) {
 	case ISCSI_HOST_PARAM_IPADDRESS:
-		spin_lock_bh(&session->lock);
+		if (!session)
+			return -ENOTCONN;
+
+		spin_lock_bh(&session->frwd_lock);
 		conn = session->leadconn;
 		if (!conn) {
-			spin_unlock_bh(&session->lock);
+			spin_unlock_bh(&session->frwd_lock);
 			return -ENOTCONN;
 		}
 		tcp_conn = conn->dd_data;
 
 		tcp_sw_conn = tcp_conn->dd_data;
 		if (!tcp_sw_conn->sock) {
-			spin_unlock_bh(&session->lock);
+			spin_unlock_bh(&session->frwd_lock);
 			return -ENOTCONN;
 		}
 
 		rc = kernel_getsockname(tcp_sw_conn->sock,
 					(struct sockaddr *)&addr, &len);
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		if (rc)
 			return rc;
 
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4046241..5b8605c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -110,16 +110,8 @@
 		session->exp_cmdsn = exp_cmdsn;
 
 	if (max_cmdsn != session->max_cmdsn &&
-	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
 		session->max_cmdsn = max_cmdsn;
-		/*
-		 * if the window closed with IO queued, then kick the
-		 * xmit thread
-		 */
-		if (!list_empty(&session->leadconn->cmdqueue) ||
-		    !list_empty(&session->leadconn->mgmtqueue))
-			iscsi_conn_queue_work(session->leadconn);
-	}
 }
 
 void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
@@ -481,7 +473,7 @@
  * iscsi_free_task - free a task
  * @task: iscsi cmd task
  *
- * Must be called with session lock.
+ * Must be called with session back_lock.
  * This function returns the scsi command to scsi-ml or cleans
  * up mgmt tasks then returns the task to the pool.
  */
@@ -535,9 +527,10 @@
 {
 	struct iscsi_session *session = task->conn->session;
 
-	spin_lock_bh(&session->lock);
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&session->back_lock);
 	__iscsi_put_task(task);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_put_task);
 
@@ -546,7 +539,7 @@
  * @task: iscsi cmd task
  * @state: state to complete task with
  *
- * Must be called with session lock.
+ * Must be called with session back_lock.
  */
 static void iscsi_complete_task(struct iscsi_task *task, int state)
 {
@@ -585,7 +578,7 @@
  * This is used when drivers do not need or cannot perform
  * lower level pdu processing.
  *
- * Called with session lock
+ * Called with session back_lock
  */
 void iscsi_complete_scsi_task(struct iscsi_task *task,
 			      uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -602,7 +595,7 @@
 
 
 /*
- * session lock must be held and if not called for a task that is
+ * session back_lock must be held and if not called for a task that is
  * still pending or from the xmit thread, then xmit thread must
  * be suspended.
  */
@@ -642,7 +635,10 @@
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
 
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&conn->session->back_lock);
 	iscsi_complete_task(task, state);
+	spin_unlock_bh(&conn->session->back_lock);
 }
 
 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -780,7 +776,10 @@
 	return task;
 
 free_task:
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&session->back_lock);
 	__iscsi_put_task(task);
+	spin_unlock_bh(&session->back_lock);
 	return NULL;
 }
 
@@ -791,10 +790,10 @@
 	struct iscsi_session *session = conn->session;
 	int err = 0;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
 		err = -EPERM;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	return err;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -1013,13 +1012,13 @@
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "pdu (op 0x%x itt 0x%x) rejected "
 				  "due to DataDigest error.\n",
-				  rejected_pdu.itt, opcode);
+				  opcode, rejected_pdu.itt);
 		break;
 	case ISCSI_REASON_IMM_CMD_REJECT:
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "pdu (op 0x%x itt 0x%x) rejected. Too many "
 				  "immediate commands.\n",
-				  rejected_pdu.itt, opcode);
+				  opcode, rejected_pdu.itt);
 		/*
 		 * We only send one TMF at a time so if the target could not
 		 * handle it, then it should get fixed (RFC mandates that
@@ -1031,14 +1030,19 @@
 		if (opcode != ISCSI_OP_NOOP_OUT)
 			return 0;
 
-		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
+		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
 			/*
 			 * nop-out in response to target's nop-out rejected.
 			 * Just resend.
 			 */
+			/* In RX path we are under back lock */
+			spin_unlock(&conn->session->back_lock);
+			spin_lock(&conn->session->frwd_lock);
 			iscsi_send_nopout(conn,
 					  (struct iscsi_nopin*)&rejected_pdu);
-		else {
+			spin_unlock(&conn->session->frwd_lock);
+			spin_lock(&conn->session->back_lock);
+		} else {
 			struct iscsi_task *task;
 			/*
 			 * Our nop as ping got dropped. We know the target
@@ -1059,8 +1063,8 @@
 	default:
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "pdu (op 0x%x itt 0x%x) rejected. Reason "
-				  "code 0x%x\n", rejected_pdu.itt,
-				  rejected_pdu.opcode, reject->reason);
+				  "code 0x%x\n", rejected_pdu.opcode,
+				  rejected_pdu.itt, reject->reason);
 		break;
 	}
 	return rc;
@@ -1074,7 +1078,7 @@
  * This should be used for mgmt tasks like login and nops, or if
  * the LDD's itt space does not include the session age.
  *
- * The session lock must be held.
+ * The session back_lock must be held.
  */
 struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 {
@@ -1103,7 +1107,7 @@
  * @datalen: len of data buffer
  *
  * Completes pdu processing by freeing any resources allocated at
- * queuecommand or send generic. session lock must be held and verify
+ * queuecommand or send generic. session back_lock must be held and verify
  * itt must have been called.
  */
 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -1140,7 +1144,12 @@
 			if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
 				break;
 
+			/* In RX path we are under back lock */
+			spin_unlock(&session->back_lock);
+			spin_lock(&session->frwd_lock);
 			iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
+			spin_unlock(&session->frwd_lock);
+			spin_lock(&session->back_lock);
 			break;
 		case ISCSI_OP_REJECT:
 			rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -1247,9 +1256,9 @@
 {
 	int rc;
 
-	spin_lock(&conn->session->lock);
+	spin_lock(&conn->session->back_lock);
 	rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
-	spin_unlock(&conn->session->lock);
+	spin_unlock(&conn->session->back_lock);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
@@ -1293,7 +1302,7 @@
  *
  * This should be used for cmd tasks.
  *
- * The session lock must be held.
+ * The session back_lock must be held.
  */
 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
 {
@@ -1323,15 +1332,15 @@
 	struct iscsi_conn *conn;
 	struct device *dev;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn = session->leadconn;
 	if (session->state == ISCSI_STATE_TERMINATE || !conn) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return;
 	}
 
 	dev = get_device(&conn->cls_conn->dev);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	if (!dev)
 	        return;
 	/*
@@ -1351,15 +1360,15 @@
 {
 	struct iscsi_session *session = conn->session;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state == ISCSI_STATE_FAILED) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return;
 	}
 
 	if (conn->stop_stage == 0)
 		session->state = ISCSI_STATE_FAILED;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1393,15 +1402,18 @@
 		return -ENODATA;
 
 	__iscsi_get_task(task);
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 	rc = conn->session->tt->xmit_task(task);
-	spin_lock_bh(&conn->session->lock);
+	spin_lock_bh(&conn->session->frwd_lock);
 	if (!rc) {
 		/* done with this task */
 		task->last_xfer = jiffies;
 		conn->task = NULL;
 	}
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&conn->session->back_lock);
 	__iscsi_put_task(task);
+	spin_unlock_bh(&conn->session->back_lock);
 	return rc;
 }
 
@@ -1410,7 +1422,7 @@
  * @task: task to requeue
  *
  * LLDs that need to run a task from the session workqueue should call
- * this. The session lock must be held. This should only be called
+ * this. The session frwd_lock must be held. This should only be called
  * by software drivers.
  */
 void iscsi_requeue_task(struct iscsi_task *task)
@@ -1441,10 +1453,10 @@
 	struct iscsi_task *task;
 	int rc = 0;
 
-	spin_lock_bh(&conn->session->lock);
+	spin_lock_bh(&conn->session->frwd_lock);
 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
 		ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
-		spin_unlock_bh(&conn->session->lock);
+		spin_unlock_bh(&conn->session->frwd_lock);
 		return -ENODATA;
 	}
 
@@ -1465,7 +1477,10 @@
 					 struct iscsi_task, running);
 		list_del_init(&conn->task->running);
 		if (iscsi_prep_mgmt_task(conn, conn->task)) {
+			/* regular RX path uses back_lock */
+			spin_lock_bh(&conn->session->back_lock);
 			__iscsi_put_task(conn->task);
+			spin_unlock_bh(&conn->session->back_lock);
 			conn->task = NULL;
 			continue;
 		}
@@ -1527,11 +1542,11 @@
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 	return -ENODATA;
 
 done:
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 	return rc;
 }
 
@@ -1600,7 +1615,7 @@
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 
 	reason = iscsi_session_chkready(cls_session);
 	if (reason) {
@@ -1686,13 +1701,13 @@
 	}
 
 	session->queued_cmdsn++;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	return 0;
 
 prepd_reject:
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 reject:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
 			  sc->cmnd[0], reason);
 	return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1700,7 +1715,7 @@
 prepd_fault:
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 fault:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
 			  sc->cmnd[0], reason);
 	if (!scsi_bidi_cmnd(sc))
@@ -1748,14 +1763,14 @@
 	struct iscsi_conn *conn = (struct iscsi_conn *)data;
 	struct iscsi_session *session = conn->session;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->frwd_lock);
 	if (conn->tmf_state == TMF_QUEUED) {
 		conn->tmf_state = TMF_TIMEDOUT;
 		ISCSI_DBG_EH(session, "tmf timedout\n");
 		/* unblock eh_abort() */
 		wake_up(&conn->ehwait);
 	}
-	spin_unlock(&session->lock);
+	spin_unlock(&session->frwd_lock);
 }
 
 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
@@ -1768,10 +1783,10 @@
 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
 				      NULL, 0);
 	if (!task) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&session->frwd_lock);
 		return -EPERM;
 	}
 	conn->tmfcmd_pdus_cnt++;
@@ -1781,7 +1796,7 @@
 	add_timer(&conn->tmf_timer);
 	ISCSI_DBG_EH(session, "tmf set timeout\n");
 
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 
 	/*
@@ -1800,7 +1815,7 @@
 	del_timer_sync(&conn->tmf_timer);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/* if the session drops it will clean up the task */
 	if (age != session->age ||
 	    session->state != ISCSI_STATE_LOGGED_IN)
@@ -1837,7 +1852,7 @@
  * iscsi_suspend_queue - suspend iscsi_queuecommand
  * @conn: iscsi conn to stop queueing IO on
  *
- * This grabs the session lock to make sure no one is in
+ * This grabs the session frwd_lock to make sure no one is in
  * xmit_task/queuecommand, and then sets suspend to prevent
  * new commands from being queued. This only needs to be called
  * by offload drivers that need to sync a path like ep disconnect
@@ -1846,9 +1861,9 @@
  */
 void iscsi_suspend_queue(struct iscsi_conn *conn)
 {
-	spin_lock_bh(&conn->session->lock);
+	spin_lock_bh(&conn->session->frwd_lock);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
 
@@ -1907,7 +1922,7 @@
 
 	ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
 
-	spin_lock(&session->lock);
+	spin_lock(&session->frwd_lock);
 	task = (struct iscsi_task *)sc->SCp.ptr;
 	if (!task) {
 		/*
@@ -2021,7 +2036,7 @@
 done:
 	if (task)
 		task->last_timeout = jiffies;
-	spin_unlock(&session->lock);
+	spin_unlock(&session->frwd_lock);
 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
 		     "timer reset" : "nh");
 	return rc;
@@ -2033,7 +2048,7 @@
 	struct iscsi_session *session = conn->session;
 	unsigned long recv_timeout, next_timeout = 0, last_recv;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->frwd_lock);
 	if (session->state != ISCSI_STATE_LOGGED_IN)
 		goto done;
 
@@ -2050,7 +2065,7 @@
 				  "last ping %lu, now %lu\n",
 				  conn->ping_timeout, conn->recv_timeout,
 				  last_recv, conn->last_ping, jiffies);
-		spin_unlock(&session->lock);
+		spin_unlock(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		return;
 	}
@@ -2066,7 +2081,7 @@
 	ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
 	mod_timer(&conn->transport_timer, next_timeout);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->frwd_lock);
 }
 
 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
@@ -2096,7 +2111,7 @@
 	ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/*
 	 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
 	 * got the command.
@@ -2104,7 +2119,7 @@
 	if (!sc->SCp.ptr) {
 		ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
 				      "it completed.\n");
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		return SUCCESS;
 	}
@@ -2115,7 +2130,7 @@
 	 */
 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
 	    sc->SCp.phase != session->age) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		ISCSI_DBG_EH(session, "failing abort due to dropped "
 				  "session.\n");
@@ -2156,7 +2171,7 @@
 
 	switch (conn->tmf_state) {
 	case TMF_SUCCESS:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		/*
 		 * stop tx side incase the target had sent a abort rsp but
 		 * the initiator was still writing out data.
@@ -2167,15 +2182,15 @@
 		 * good and have never sent us a successful tmf response
 		 * then sent more data for the cmd.
 		 */
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&session->frwd_lock);
 		fail_scsi_task(task, DID_ABORT);
 		conn->tmf_state = TMF_INITIAL;
 		memset(hdr, 0, sizeof(*hdr));
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_start_tx(conn);
 		goto success_unlocked;
 	case TMF_TIMEDOUT:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto failed_unlocked;
 	case TMF_NOT_FOUND:
@@ -2194,7 +2209,7 @@
 	}
 
 success:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 success_unlocked:
 	ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
 		     sc, task->itt);
@@ -2202,7 +2217,7 @@
 	return SUCCESS;
 
 failed:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 failed_unlocked:
 	ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
 		     task ? task->itt : 0);
@@ -2235,7 +2250,7 @@
 	ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/*
 	 * Just check if we are not logged in. We cannot check for
 	 * the phase because the reset could come from a ioctl.
@@ -2262,7 +2277,7 @@
 	case TMF_SUCCESS:
 		break;
 	case TMF_TIMEDOUT:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
@@ -2271,21 +2286,21 @@
 	}
 
 	rc = SUCCESS;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_start_tx(conn);
 	goto done;
 
 unlock:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 done:
 	ISCSI_DBG_EH(session, "dev reset result = %s\n",
 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2298,13 +2313,13 @@
 {
 	struct iscsi_session *session = cls_session->dd_data;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state != ISCSI_STATE_LOGGED_IN) {
 		session->state = ISCSI_STATE_RECOVERY_FAILED;
 		if (session->leadconn)
 			wake_up(&session->leadconn->ehwait);
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
 
@@ -2326,19 +2341,19 @@
 	conn = session->leadconn;
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state == ISCSI_STATE_TERMINATE) {
 failed:
 		ISCSI_DBG_EH(session,
 			     "failing session reset: Could not log back into "
 			     "%s, %s [age %d]\n", session->targetname,
 			     conn->persistent_address, session->age);
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		return FAILED;
 	}
 
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 	/*
 	 * we drop the lock here but the leadconn cannot be destoyed while
@@ -2355,14 +2370,14 @@
 		flush_signals(current);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state == ISCSI_STATE_LOGGED_IN) {
 		ISCSI_DBG_EH(session,
 			     "session reset succeeded for %s,%s\n",
 			     session->targetname, conn->persistent_address);
 	} else
 		goto failed;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 	return SUCCESS;
 }
@@ -2398,7 +2413,7 @@
 		     session->targetname);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/*
 	 * Just check if we are not logged in. We cannot check for
 	 * the phase because the reset could come from a ioctl.
@@ -2425,7 +2440,7 @@
 	case TMF_SUCCESS:
 		break;
 	case TMF_TIMEDOUT:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
@@ -2434,21 +2449,21 @@
 	}
 
 	rc = SUCCESS;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, -1, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_start_tx(conn);
 	goto done;
 
 unlock:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 done:
 	ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2746,8 +2761,10 @@
 	session->max_r2t = 1;
 	session->tt = iscsit;
 	session->dd_data = cls_session->dd_data + sizeof(*session);
+
 	mutex_init(&session->eh_mutex);
-	spin_lock_init(&session->lock);
+	spin_lock_init(&session->frwd_lock);
+	spin_lock_init(&session->back_lock);
 
 	/* initialize SCSI PDU commands pool */
 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -2861,14 +2878,14 @@
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 	/* allocate login_task used for the login/text sequences */
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!kfifo_out(&session->cmdpool.queue,
                          (void*)&conn->login_task,
 			 sizeof(void*))) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		goto login_task_alloc_fail;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	data = (char *) __get_free_pages(GFP_KERNEL,
 					 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
@@ -2905,7 +2922,7 @@
 
 	del_timer_sync(&conn->transport_timer);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
 	if (session->leadconn == conn) {
 		/*
@@ -2914,7 +2931,7 @@
 		session->state = ISCSI_STATE_TERMINATE;
 		wake_up(&conn->ehwait);
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/*
 	 * Block until all in-progress commands for this connection
@@ -2941,16 +2958,19 @@
 	/* flush queued up work because we free the connection below */
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	free_pages((unsigned long) conn->data,
 		   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
 	kfree(conn->persistent_address);
 	kfree(conn->local_ipaddr);
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&session->back_lock);
 	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
 		    sizeof(void*));
+	spin_unlock_bh(&session->back_lock);
 	if (session->leadconn == conn)
 		session->leadconn = NULL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_destroy_conn(cls_conn);
 }
@@ -2987,7 +3007,7 @@
 		conn->ping_timeout = 5;
 	}
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn->c_stage = ISCSI_CONN_STARTED;
 	session->state = ISCSI_STATE_LOGGED_IN;
 	session->queued_cmdsn = session->cmdsn;
@@ -3016,7 +3036,7 @@
 	default:
 		break;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_unblock_session(session->cls_session);
 	wake_up(&conn->ehwait);
@@ -3055,9 +3075,9 @@
 	int old_stop_stage;
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (conn->stop_stage == STOP_CONN_TERM) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		return;
 	}
@@ -3074,14 +3094,14 @@
 
 	old_stop_stage = conn->stop_stage;
 	conn->stop_stage = flag;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	del_timer_sync(&conn->transport_timer);
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn->c_stage = ISCSI_CONN_STOPPED;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/*
 	 * for connection level recovery we should not calculate
@@ -3102,11 +3122,11 @@
 	/*
 	 * flush queues.
 	 */
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
 	fail_mgmt_tasks(session, conn);
 	memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 }
 
@@ -3133,10 +3153,10 @@
 	struct iscsi_session *session = cls_session->dd_data;
 	struct iscsi_conn *conn = cls_conn->dd_data;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (is_leading)
 		session->leadconn = conn;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/*
 	 * Unblock xmitworker(), Login Phase will pass through.
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 1d58d53..60cb6dc 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -446,7 +446,7 @@
  * iscsi_tcp_cleanup_task - free tcp_task resources
  * @task: iscsi task
  *
- * must be called with session lock
+ * must be called with session back_lock
  */
 void iscsi_tcp_cleanup_task(struct iscsi_task *task)
 {
@@ -457,6 +457,7 @@
 	if (!task->sc)
 		return;
 
+	spin_lock_bh(&tcp_task->queue2pool);
 	/* flush task's r2t queues */
 	while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
 		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
@@ -470,6 +471,7 @@
 			    sizeof(void*));
 		tcp_task->r2t = NULL;
 	}
+	spin_unlock_bh(&tcp_task->queue2pool);
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
 
@@ -529,6 +531,8 @@
 	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
 	struct iscsi_r2t_info *r2t;
 	int r2tsn = be32_to_cpu(rhdr->r2tsn);
+	u32 data_length;
+	u32 data_offset;
 	int rc;
 
 	if (tcp_conn->in.datalen) {
@@ -554,39 +558,40 @@
 		return 0;
 	}
 
-	rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+	data_length = be32_to_cpu(rhdr->data_length);
+	if (data_length == 0) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2T with zero data len\n");
+		return ISCSI_ERR_DATALEN;
+	}
+
+	if (data_length > session->max_burst)
+		ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
+			      "burst %u. Attempting to execute request.\n",
+			      data_length, session->max_burst);
+
+	data_offset = be32_to_cpu(rhdr->data_offset);
+	if (data_offset + data_length > scsi_out(task->sc)->length) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2T with data len %u at offset %u "
+				  "and total length %d\n", data_length,
+				  data_offset, scsi_out(task->sc)->length);
+		return ISCSI_ERR_DATALEN;
+	}
+
+	spin_lock(&tcp_task->pool2queue);
+	rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
 	if (!rc) {
 		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
 				  "Target has sent more R2Ts than it "
 				  "negotiated for or driver has leaked.\n");
+		spin_unlock(&tcp_task->pool2queue);
 		return ISCSI_ERR_PROTO;
 	}
 
 	r2t->exp_statsn = rhdr->statsn;
-	r2t->data_length = be32_to_cpu(rhdr->data_length);
-	if (r2t->data_length == 0) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2T with zero data len\n");
-		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		return ISCSI_ERR_DATALEN;
-	}
-
-	if (r2t->data_length > session->max_burst)
-		ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
-			      "burst %u. Attempting to execute request.\n",
-			      r2t->data_length, session->max_burst);
-
-	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
-	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2T with data len %u at offset %u "
-				  "and total length %d\n", r2t->data_length,
-				  r2t->data_offset, scsi_out(task->sc)->length);
-		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		return ISCSI_ERR_DATALEN;
-	}
+	r2t->data_length = data_length;
+	r2t->data_offset = data_offset;
 
 	r2t->ttt = rhdr->ttt; /* no flip */
 	r2t->datasn = 0;
@@ -595,6 +600,7 @@
 	tcp_task->exp_datasn = r2tsn + 1;
 	kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
 	conn->r2t_pdus_cnt++;
+	spin_unlock(&tcp_task->pool2queue);
 
 	iscsi_requeue_task(task);
 	return 0;
@@ -667,14 +673,14 @@
 
 	switch(opcode) {
 	case ISCSI_OP_SCSI_DATA_IN:
-		spin_lock(&conn->session->lock);
+		spin_lock(&conn->session->back_lock);
 		task = iscsi_itt_to_ctask(conn, hdr->itt);
 		if (!task)
 			rc = ISCSI_ERR_BAD_ITT;
 		else
 			rc = iscsi_tcp_data_in(conn, task);
 		if (rc) {
-			spin_unlock(&conn->session->lock);
+			spin_unlock(&conn->session->back_lock);
 			break;
 		}
 
@@ -707,11 +713,11 @@
 						   tcp_conn->in.datalen,
 						   iscsi_tcp_process_data_in,
 						   rx_hash);
-			spin_unlock(&conn->session->lock);
+			spin_unlock(&conn->session->back_lock);
 			return rc;
 		}
 		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
-		spin_unlock(&conn->session->lock);
+		spin_unlock(&conn->session->back_lock);
 		break;
 	case ISCSI_OP_SCSI_CMD_RSP:
 		if (tcp_conn->in.datalen) {
@@ -721,18 +727,20 @@
 		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
 		break;
 	case ISCSI_OP_R2T:
-		spin_lock(&conn->session->lock);
+		spin_lock(&conn->session->back_lock);
 		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		spin_unlock(&conn->session->back_lock);
 		if (!task)
 			rc = ISCSI_ERR_BAD_ITT;
 		else if (ahslen)
 			rc = ISCSI_ERR_AHSLEN;
 		else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
 			task->last_xfer = jiffies;
+			spin_lock(&conn->session->frwd_lock);
 			rc = iscsi_tcp_r2t_rsp(conn, task);
+			spin_unlock(&conn->session->frwd_lock);
 		} else
 			rc = ISCSI_ERR_PROTO;
-		spin_unlock(&conn->session->lock);
 		break;
 	case ISCSI_OP_LOGIN_RSP:
 	case ISCSI_OP_TEXT_RSP:
@@ -980,14 +988,13 @@
 
 static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
 {
-	struct iscsi_session *session = task->conn->session;
 	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_r2t_info *r2t = NULL;
 
 	if (iscsi_task_has_unsol_data(task))
 		r2t = &task->unsol_r2t;
 	else {
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&tcp_task->queue2pool);
 		if (tcp_task->r2t) {
 			r2t = tcp_task->r2t;
 			/* Continue with this R2T? */
@@ -1009,7 +1016,7 @@
 			else
 				r2t = tcp_task->r2t;
 		}
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&tcp_task->queue2pool);
 	}
 
 	return r2t;
@@ -1139,6 +1146,8 @@
 			iscsi_pool_free(&tcp_task->r2tpool);
 			goto r2t_alloc_fail;
 		}
+		spin_lock_init(&tcp_task->pool2queue);
+		spin_lock_init(&tcp_task->queue2pool);
 	}
 
 	return 0;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da3aee1..25d0f127 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -862,7 +862,7 @@
 
 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
 {
-	scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
+	scmd_dbg(cmd, "command %p timed out\n", cmd);
 
 	return BLK_EH_NOT_HANDLED;
 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4e1b75c..94a3caf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,8 +73,6 @@
  */
 /* 1 Second */
 #define QUEUE_RAMP_DOWN_INTERVAL	(msecs_to_jiffies(1000 * 1))
-/* 5 minutes */
-#define QUEUE_RAMP_UP_INTERVAL		(msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -722,6 +720,20 @@
 	uint32_t cfg_hba_queue_depth;
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_heartbeat;
+	uint32_t cfg_fof;
+	uint32_t cfg_EnableXLane;
+	uint8_t cfg_oas_tgt_wwpn[8];
+	uint8_t cfg_oas_vpt_wwpn[8];
+	uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE	1
+#define OAS_LUN_DISABLE	0
+	uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS	0x01
+	uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT	0x01
+#define OAS_FIND_ANY_TARGET	0x02
+#define OAS_LUN_VALID	0x04
+	uint32_t cfg_XLanePriority;
 	uint32_t cfg_enable_bg;
 	uint32_t cfg_hostmem_hgp;
 	uint32_t cfg_log_verbose;
@@ -730,6 +742,7 @@
 	uint32_t cfg_request_firmware_upgrade;
 	uint32_t cfg_iocb_cnt;
 	uint32_t cfg_suppress_link_up;
+	uint32_t cfg_rrq_xri_bitmap_sz;
 #define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
 #define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
@@ -835,6 +848,7 @@
 	mempool_t *mbox_mem_pool;
 	mempool_t *nlp_mem_pool;
 	mempool_t *rrq_pool;
+	mempool_t *active_rrq_pool;
 
 	struct fc_host_statistics link_stats;
 	enum intr_type_t intr_type;
@@ -869,7 +883,6 @@
 	atomic_t num_cmd_success;
 	unsigned long last_rsrc_error_time;
 	unsigned long last_ramp_down_time;
-	unsigned long last_ramp_up_time;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct dentry *hba_debugfs_root;
 	atomic_t debugfs_vport_count;
@@ -971,6 +984,9 @@
 	atomic_t sdev_cnt;
 	uint8_t fips_spec_rev;
 	uint8_t fips_level;
+	spinlock_t devicelock;	/* lock for luns list */
+	mempool_t *device_data_mem_pool;
+	struct list_head luns;
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc..8d5b6ce 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@
 }
 
 /**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ *			    (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
  * lpfc_link_state_store - Transition the link_state on an HBA port
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@
 static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
 		   lpfc_sriov_hw_max_virtfn_show, NULL);
 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+		   NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+	unsigned int i, j;
 
+	/* Count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+		return -EINVAL;
+
+	memset(wwn, 0, WWN_SZ);
+
+	/* Validate and store the new name */
+	for (i = 0, j = 0; i < 16; i++) {
+		if ((*buf >= 'a') && (*buf <= 'f'))
+			j = ((j << 4) | ((*buf++ - 'a') + 10));
+		else if ((*buf >= 'A') && (*buf <= 'F'))
+			j = ((j << 4) | ((*buf++ - 'A') + 10));
+		else if ((*buf >= '0') && (*buf <= '9'))
+			j = ((j << 4) | (*buf++ - '0'));
+		else
+			return -EINVAL;
+		if (i % 2) {
+			wwn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+	return 0;
+}
 /**
  * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
  * @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct completion online_compl;
-	int stat1=0, stat2=0;
-	unsigned int i, j, cnt=count;
-	u8 wwpn[8];
+	int stat1 = 0, stat2 = 0;
+	unsigned int cnt = count;
+	u8 wwpn[WWN_SZ];
 	int rc;
 
 	if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@
 	if (buf[cnt-1] == '\n')
 		cnt--;
 
-	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
-	    ((cnt == 17) && (*buf++ != 'x')) ||
-	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+	if (!phba->soft_wwn_enable)
 		return -EINVAL;
 
+	/* lock setting wwpn, wwnn down */
 	phba->soft_wwn_enable = 0;
 
-	memset(wwpn, 0, sizeof(wwpn));
-
-	/* Validate and store the new name */
-	for (i=0, j=0; i < 16; i++) {
-		int value;
-
-		value = hex_to_bin(*buf++);
-		if (value >= 0)
-			j = (j << 4) | value;
-		else
-			return -EINVAL;
-		if (i % 2) {
-			wwpn[i/2] = j & 0xff;
-			j = 0;
-		}
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (!rc) {
+		/* not able to set wwpn, unlock it */
+		phba->soft_wwn_enable = 1;
+		return rc;
 	}
+
 	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
 	fc_host_port_name(shost) = phba->cfg_soft_wwpn;
 	if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@
 				"reinit adapter - %d\n", stat2);
 	return (stat1 || stat2) ? -EIO : count;
 }
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
 		   lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
 
 /**
@@ -2235,39 +2290,25 @@
 {
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	unsigned int i, j, cnt=count;
-	u8 wwnn[8];
+	unsigned int cnt = count;
+	u8 wwnn[WWN_SZ];
+	int rc;
 
 	/* count may include a LF at end of string */
 	if (buf[cnt-1] == '\n')
 		cnt--;
 
-	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
-	    ((cnt == 17) && (*buf++ != 'x')) ||
-	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+	if (!phba->soft_wwn_enable)
 		return -EINVAL;
 
-	/*
-	 * Allow wwnn to be set many times, as long as the enable is set.
-	 * However, once the wwpn is set, everything locks.
-	 */
-
-	memset(wwnn, 0, sizeof(wwnn));
-
-	/* Validate and store the new name */
-	for (i=0, j=0; i < 16; i++) {
-		int value;
-
-		value = hex_to_bin(*buf++);
-		if (value >= 0)
-			j = (j << 4) | value;
-		else
-			return -EINVAL;
-		if (i % 2) {
-			wwnn[i/2] = j & 0xff;
-			j = 0;
-		}
+	rc = lpfc_wwn_set(buf, cnt, wwnn);
+	if (!rc) {
+		/* Allow wwnn to be set many times, as long as the enable
+		 * is set. However, once the wwpn is set, everything locks.
+		 */
+		return rc;
 	}
+
 	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
 
 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@
 
 	return count;
 }
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
 		   lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
 
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	uint8_t wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc)
+		return rc;
+
+	memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	if (wwn_to_u64(wwpn) == 0)
+		phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+	else
+		phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+		   lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ *		      for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ *		      for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	uint8_t wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc)
+		return rc;
+
+	memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	if (wwn_to_u64(wwpn) == 0)
+		phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+	else
+		phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+		   lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ *			    of whether luns will be enabled or disabled
+ *			    for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ *			    of whether luns will be enabled or disabled
+ *			    for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int val = 0;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if ((val != 0) && (val != 1))
+		return -EINVAL;
+
+	phba->cfg_oas_lun_state = val;
+
+	return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+		   lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ *                          Storage (OAS) lun returned by the
+ *                          lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+		return -EFAULT;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+		   lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ *			   (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+		       uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+	int rc = 0;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (oas_state) {
+		if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+					 (struct lpfc_name *)tgt_wwpn, lun))
+			rc = -ENOMEM;
+	} else {
+		lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+				     (struct lpfc_name *)tgt_wwpn, lun);
+	}
+	return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ *			  Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified.  If a lun is found, its vport wwpn, target wwpn and status is
+ * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+		      uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+	uint64_t found_lun;
+
+	if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+		return NOT_OAS_ENABLED_LUN;
+	if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+				   phba->sli4_hba.oas_next_vpt_wwpn,
+				   (struct lpfc_name *)
+				   phba->sli4_hba.oas_next_tgt_wwpn,
+				   &phba->sli4_hba.oas_next_lun,
+				   (struct lpfc_name *)vpt_wwpn,
+				   (struct lpfc_name *)tgt_wwpn,
+				   &found_lun, lun_status))
+		return found_lun;
+	else
+		return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+			  uint8_t tgt_wwpn[], uint64_t lun,
+			  uint32_t oas_state)
+{
+
+	int rc;
+
+	rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+					oas_state);
+	return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	uint64_t oas_lun;
+	int len = 0;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+			return -EFAULT;
+
+	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+			return -EFAULT;
+
+	oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+					phba->cfg_oas_tgt_wwpn,
+					&phba->cfg_oas_lun_status);
+	if (oas_lun != NOT_OAS_ENABLED_LUN)
+		phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+	len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+	return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun.  Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint64_t scsi_lun;
+	ssize_t rc;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+		return -EFAULT;
+
+	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+		return -EFAULT;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+		return -EINVAL;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+			"with oas set to %d\n",
+			wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+			wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+			phba->cfg_oas_lun_state);
+
+	rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+					   phba->cfg_oas_tgt_wwpn, scsi_lun,
+					   phba->cfg_oas_lun_state);
+
+	if (rc)
+		return rc;
+
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+		   lpfc_oas_lun_show, lpfc_oas_lun_store);
 
 static int lpfc_poll = 0;
 module_param(lpfc_poll, int, S_IRUGO);
@@ -3818,7 +4288,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_vector_map_info *cpup;
-	int  idx, len = 0;
+	int  len = 0;
 
 	if ((phba->sli_rev != LPFC_SLI_REV4) ||
 	    (phba->intr_type != MSIX))
@@ -3846,23 +4316,39 @@
 		break;
 	}
 
-	cpup = phba->sli4_hba.cpu_map;
-	for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+	while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+		/* margin should fit in this and the truncated message */
 		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
 			len += snprintf(buf + len, PAGE_SIZE-len,
 					"CPU %02d io_chan %02d "
 					"physid %d coreid %d\n",
-					idx, cpup->channel_id, cpup->phys_id,
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->channel_id, cpup->phys_id,
 					cpup->core_id);
 		else
 			len += snprintf(buf + len, PAGE_SIZE-len,
 					"CPU %02d io_chan %02d "
 					"physid %d coreid %d IRQ %d\n",
-					idx, cpup->channel_id, cpup->phys_id,
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->channel_id, cpup->phys_id,
 					cpup->core_id, cpup->irq);
 
-		cpup++;
+		phba->sli4_hba.curr_disp_cpu++;
+
+		/* display max number of CPUs keeping some margin */
+		if (phba->sli4_hba.curr_disp_cpu <
+				phba->sli4_hba.num_present_cpu &&
+				(len >= (PAGE_SIZE - 64))) {
+			len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+			break;
+		}
 	}
+
+	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+		phba->sli4_hba.curr_disp_cpu = 0;
+
 	return len;
 }
 
@@ -4157,6 +4643,21 @@
 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
 
 /*
+# lpfc_EnableXLane: Enable Express Lane Feature
+#      0x0   Express Lane Feature disabled
+#      0x1   Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature
+#       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
 #       0  = BlockGuard disabled (default)
 #       1  = BlockGuard enabled
@@ -4317,6 +4818,13 @@
 	&dev_attr_lpfc_soft_wwn_enable,
 	&dev_attr_lpfc_enable_hba_reset,
 	&dev_attr_lpfc_enable_hba_heartbeat,
+	&dev_attr_lpfc_EnableXLane,
+	&dev_attr_lpfc_XLanePriority,
+	&dev_attr_lpfc_xlane_lun,
+	&dev_attr_lpfc_xlane_tgt,
+	&dev_attr_lpfc_xlane_vpt,
+	&dev_attr_lpfc_xlane_lun_state,
+	&dev_attr_lpfc_xlane_lun_status,
 	&dev_attr_lpfc_sg_seg_cnt,
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4843,7 @@
 	&dev_attr_lpfc_dss,
 	&dev_attr_lpfc_sriov_hw_max_virtfn,
 	&dev_attr_protocol,
+	&dev_attr_lpfc_xlane_supported,
 	NULL,
 };
 
@@ -5296,11 +5805,20 @@
 	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+	lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		phba->cfg_EnableXLane = 0;
+	lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+	memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+	memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+	phba->cfg_oas_lun_state = 0;
+	phba->cfg_oas_lun_status = 0;
+	phba->cfg_oas_flags = 0;
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		phba->cfg_poll = 0;
 	else
-	phba->cfg_poll = lpfc_poll;
+		phba->cfg_poll = lpfc_poll;
 	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 82134d20..ca2f4ea 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4153,6 +4153,7 @@
 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
 			switch (opcode) {
 			case FCOE_OPCODE_READ_FCF:
+			case FCOE_OPCODE_GET_DPORT_RESULTS:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"2957 Handled SLI_CONFIG "
 						"subsys_fcoe, opcode:x%x\n",
@@ -4161,6 +4162,8 @@
 							nemb_mse, dmabuf);
 				break;
 			case FCOE_OPCODE_ADD_FCF:
+			case FCOE_OPCODE_SET_DPORT_MODE:
+			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"2958 Handled SLI_CONFIG "
 						"subsys_fcoe, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 67f7d0a..a94d4c9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -231,6 +231,8 @@
 #define SLI_CONFIG_SUBSYS_FCOE		0x0C
 #define FCOE_OPCODE_READ_FCF		0x08
 #define FCOE_OPCODE_ADD_FCF		0x09
+#define FCOE_OPCODE_SET_DPORT_MODE	0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS	0x28
 };
 
 struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cda076a..adda0bf 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@
 void lpfc_offline(struct lpfc_hba *);
 void lpfc_reset_hba(struct lpfc_hba *);
 
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 
@@ -242,6 +247,7 @@
 void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
 void lpfc_mem_free(struct lpfc_hba *);
 void lpfc_mem_free_all(struct lpfc_hba *);
 void lpfc_stop_vport_timers(struct lpfc_vport *);
@@ -399,7 +405,6 @@
 void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
 void lpfc_rampdown_queue_depth(struct lpfc_hba *);
 void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
-void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
 void lpfc_scsi_dev_block(struct lpfc_hba *);
 
 void
@@ -471,3 +476,20 @@
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
 void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+						struct lpfc_name *,
+						struct lpfc_name *,
+						uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+					struct list_head *list,
+					struct lpfc_name *,
+					struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			 struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			  struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			    struct lpfc_name *, uint64_t *, struct lpfc_name *,
+			    struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b800cc9..828c08e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@
 		}
 	}
 
+	if (phba->cfg_fof) {
+		/* FOF EQ */
+		qp = phba->sli4_hba.fof_eq;
+		if (!qp)
+			goto out;
+
+		len += snprintf(pbuffer+len,
+			LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\nFOF EQ info: "
+			"EQ-STAT[max:x%x noE:x%x "
+			"bs:x%x proc:x%llx]\n",
+			qp->q_cnt_1, qp->q_cnt_2,
+			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+		len += snprintf(pbuffer+len,
+			LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"EQID[%02d], "
+			"QE-CNT[%04d], QE-SIZE[%04d], "
+			"HOST-IDX[%04d], PORT-IDX[%04d]",
+			qp->queue_id,
+			qp->entry_count,
+			qp->entry_size,
+			qp->host_index,
+			qp->hba_index);
+
+		/* Reset max counter */
+		qp->EQ_max_eqe = 0;
+
+		len +=  snprintf(pbuffer+len,
+			LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+		if (len >= max_cnt)
+			goto too_big;
+	}
+
+	if (phba->cfg_EnableXLane) {
+
+		/* OAS CQ */
+		qp = phba->sli4_hba.oas_cq;
+		if (qp) {
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tOAS CQ info: ");
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"AssocEQID[%02d]: "
+				"CQ STAT[max:x%x relw:x%x "
+				"xabt:x%x wq:x%llx]\n",
+				qp->assoc_qid,
+				qp->q_cnt_1, qp->q_cnt_2,
+				qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tCQID[%02d], "
+				"QE-CNT[%04d], QE-SIZE[%04d], "
+				"HOST-IDX[%04d], PORT-IDX[%04d]",
+				qp->queue_id, qp->entry_count,
+				qp->entry_size, qp->host_index,
+				qp->hba_index);
+
+			/* Reset max counter */
+			qp->CQ_max_cqe = 0;
+
+			len +=  snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+			if (len >= max_cnt)
+				goto too_big;
+		}
+
+		/* OAS WQ */
+		qp = phba->sli4_hba.oas_wq;
+		if (qp) {
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\t\tOAS WQ info: ");
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"AssocCQID[%02d]: "
+				"WQ-STAT[oflow:x%x posted:x%llx]\n",
+				qp->assoc_qid,
+				qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\t\tWQID[%02d], "
+				"QE-CNT[%04d], QE-SIZE[%04d], "
+				"HOST-IDX[%04d], PORT-IDX[%04d]",
+				qp->queue_id,
+				qp->entry_count,
+				qp->entry_size,
+				qp->host_index,
+				qp->hba_index);
+
+			len +=  snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+			if (len >= max_cnt)
+				goto too_big;
+		}
+	}
+out:
 	spin_unlock_irq(&phba->hbalock);
 	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 
@@ -3927,6 +4025,7 @@
 	struct lpfc_hba   *phba = vport->phba;
 	char name[64];
 	uint32_t num, i;
+	bool pport_setup = false;
 
 	if (!lpfc_debugfs_enable)
 		return;
@@ -3947,6 +4046,7 @@
 	/* Setup funcX directory for specific HBA PCI function */
 	snprintf(name, sizeof(name), "fn%d", phba->brd_no);
 	if (!phba->hba_debugfs_root) {
+		pport_setup = true;
 		phba->hba_debugfs_root =
 			debugfs_create_dir(name, lpfc_debugfs_root);
 		if (!phba->hba_debugfs_root) {
@@ -4239,6 +4339,14 @@
 	}
 
 	/*
+	 * The following section is for additional directories/files for the
+	 * physical port.
+	 */
+
+	if (!pport_setup)
+		goto debug_failed;
+
+	/*
 	 * iDiag debugfs root entry points for SLI4 device only
 	 */
 	if (phba->sli_rev < LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index e409ba5..1a6fe52 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -116,7 +116,7 @@
 	atomic_t cmd_pending;
 	uint32_t cmd_qdepth;
 	unsigned long last_change_time;
-	struct lpfc_node_rrqs active_rrqs;
+	unsigned long *active_rrqs_xri_bitmap;
 	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */
 };
 struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 110445f..624fe0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1516,7 +1516,7 @@
 	uint32_t rc, keepDID = 0;
 	int  put_node;
 	int  put_rport;
-	struct lpfc_node_rrqs rrq;
+	unsigned long *active_rrqs_xri_bitmap = NULL;
 
 	/* Fabric nodes can have the same WWPN so we don't bother searching
 	 * by WWPN.  Just return the ndlp that was given to us.
@@ -1534,7 +1534,13 @@
 
 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
 		return ndlp;
-	memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+						       GFP_KERNEL);
+		if (active_rrqs_xri_bitmap)
+			memset(active_rrqs_xri_bitmap, 0,
+			       phba->cfg_rrq_xri_bitmap_sz);
+	}
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 		 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
@@ -1543,41 +1549,58 @@
 	if (!new_ndlp) {
 		rc = memcmp(&ndlp->nlp_portname, name,
 			    sizeof(struct lpfc_name));
-		if (!rc)
+		if (!rc) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
-		if (!new_ndlp)
+		if (!new_ndlp) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
 	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
 		rc = memcmp(&ndlp->nlp_portname, name,
 			    sizeof(struct lpfc_name));
-		if (!rc)
+		if (!rc) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		new_ndlp = lpfc_enable_node(vport, new_ndlp,
 						NLP_STE_UNUSED_NODE);
-		if (!new_ndlp)
+		if (!new_ndlp) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		keepDID = new_ndlp->nlp_DID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&rrq.xri_bitmap,
-				&new_ndlp->active_rrqs.xri_bitmap,
-				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+			memcpy(active_rrqs_xri_bitmap,
+			       new_ndlp->active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 	} else {
 		keepDID = new_ndlp->nlp_DID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&rrq.xri_bitmap,
-				&new_ndlp->active_rrqs.xri_bitmap,
-				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(active_rrqs_xri_bitmap,
+			       new_ndlp->active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 	}
 
 	lpfc_unreg_rpi(vport, new_ndlp);
 	new_ndlp->nlp_DID = ndlp->nlp_DID;
 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
 	if (phba->sli_rev == LPFC_SLI_REV4)
-		memcpy(new_ndlp->active_rrqs.xri_bitmap,
-			&ndlp->active_rrqs.xri_bitmap,
-			sizeof(ndlp->active_rrqs.xri_bitmap));
+		memcpy(new_ndlp->active_rrqs_xri_bitmap,
+		       ndlp->active_rrqs_xri_bitmap,
+		       phba->cfg_rrq_xri_bitmap_sz);
 
 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
 		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1619,10 +1642,11 @@
 
 		/* Two ndlps cannot have the same did on the nodelist */
 		ndlp->nlp_DID = keepDID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&ndlp->active_rrqs.xri_bitmap,
-				&rrq.xri_bitmap,
-				sizeof(ndlp->active_rrqs.xri_bitmap));
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(ndlp->active_rrqs_xri_bitmap,
+			       active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 		lpfc_drop_node(vport, ndlp);
 	}
 	else {
@@ -1634,10 +1658,11 @@
 
 		/* Two ndlps cannot have the same did */
 		ndlp->nlp_DID = keepDID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&ndlp->active_rrqs.xri_bitmap,
-				&rrq.xri_bitmap,
-				sizeof(ndlp->active_rrqs.xri_bitmap));
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(ndlp->active_rrqs_xri_bitmap,
+			       active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 
 		/* Since we are swapping the ndlp passed in with the new one
 		 * and the did has already been swapped, copy over state.
@@ -1668,6 +1693,10 @@
 				put_device(&rport->dev);
 		}
 	}
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    active_rrqs_xri_bitmap)
+		mempool_free(active_rrqs_xri_bitmap,
+			     phba->active_rrq_pool);
 	return new_ndlp;
 }
 
@@ -2772,6 +2801,7 @@
 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
 	 * trigger the release of node.
 	 */
+
 	lpfc_nlp_put(ndlp);
 	return 0;
 }
@@ -6193,11 +6223,11 @@
 
 	spin_lock_irqsave(&vport->work_port_lock, iflag);
 	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
-	if (!tmo_posted)
+	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
 		vport->work_port_events |= WORKER_ELS_TMO;
 	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-	if (!tmo_posted)
+	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
 		lpfc_worker_wake_up(phba);
 	return;
 }
@@ -6223,19 +6253,26 @@
 	uint32_t els_command = 0;
 	uint32_t timeout;
 	uint32_t remote_ID = 0xffffffff;
-	LIST_HEAD(txcmplq_completions);
 	LIST_HEAD(abort_list);
 
 
 	timeout = (uint32_t)(phba->fc_ratov << 1);
 
 	pring = &phba->sli.ring[LPFC_ELS_RING];
-
+	if ((phba->pport->load_flag & FC_UNLOADING))
+		return;
 	spin_lock_irq(&phba->hbalock);
-	list_splice_init(&pring->txcmplq, &txcmplq_completions);
-	spin_unlock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
 
-	list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
+	if ((phba->pport->load_flag & FC_UNLOADING)) {
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			spin_unlock(&pring->ring_lock);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
 		cmd = &piocb->iocb;
 
 		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -6274,11 +6311,12 @@
 		}
 		list_add_tail(&piocb->dlist, &abort_list);
 	}
-	spin_lock_irq(&phba->hbalock);
-	list_splice(&txcmplq_completions, &pring->txcmplq);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		cmd = &piocb->iocb;
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 			 "0127 ELS timeout Data: x%x x%x x%x "
 			 "x%x\n", els_command,
@@ -6290,8 +6328,9 @@
 	}
 
 	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-		mod_timer(&vport->els_tmofunc,
-			  jiffies + msecs_to_jiffies(1000 * timeout));
+		if (!(phba->pport->load_flag & FC_UNLOADING))
+			mod_timer(&vport->els_tmofunc,
+				  jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6317,15 +6356,50 @@
 void
 lpfc_els_flush_cmd(struct lpfc_vport *vport)
 {
-	LIST_HEAD(completions);
+	LIST_HEAD(abort_list);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	struct lpfc_iocbq *tmp_iocb, *piocb;
 	IOCB_t *cmd = NULL;
 
 	lpfc_fabric_abort_vport(vport);
+	/*
+	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
+	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
+	 * ultimately grabs the ring_lock, the driver must splice the list into
+	 * a working list and release the locks before calling the abort.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+		list_add_tail(&piocb->dlist, &abort_list);
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+	/* Abort each iocb on the aborted list and remove the dlist links. */
+	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		spin_lock_irq(&phba->hbalock);
+		list_del_init(&piocb->dlist);
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+		spin_unlock_irq(&phba->hbalock);
+	}
+	if (!list_empty(&abort_list))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "3387 abort list for txq not empty\n");
+	INIT_LIST_HEAD(&abort_list);
 
 	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
 		cmd = &piocb->iocb;
 
@@ -6343,24 +6417,16 @@
 		if (piocb->vport != vport)
 			continue;
 
-		list_move_tail(&piocb->list, &completions);
+		list_del_init(&piocb->list);
+		list_add_tail(&piocb->list, &abort_list);
 	}
-
-	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
-		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
-			continue;
-		}
-
-		if (piocb->vport != vport)
-			continue;
-
-		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
-	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Cancell all the IOCBs from the completions list */
-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
-			      IOERR_SLI_ABORTED);
+	lpfc_sli_cancel_iocbs(phba, &abort_list,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
 	return;
 }
@@ -6385,35 +6451,9 @@
 void
 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
 {
-	LIST_HEAD(completions);
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-	struct lpfc_iocbq *tmp_iocb, *piocb;
-	IOCB_t *cmd = NULL;
-
-	lpfc_fabric_abort_hba(phba);
-	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
-		cmd = &piocb->iocb;
-		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
-			continue;
-		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
-		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
-		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
-		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
-		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
-			continue;
-		list_move_tail(&piocb->list, &completions);
-	}
-	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
-		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
-			continue;
-		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
-	}
-	spin_unlock_irq(&phba->hbalock);
-
-	/* Cancel all the IOCBs from the completions list */
-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
-			      IOERR_SLI_ABORTED);
+	struct lpfc_vport *vport;
+	list_for_each_entry(vport, &phba->port_list, listentry)
+		lpfc_els_flush_cmd(vport);
 
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 883ea2d..59b51c5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -674,8 +674,6 @@
 				lpfc_fdmi_timeout_handler(vport);
 			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
 				lpfc_ramp_down_queue_handler(phba);
-			if (work_port_events & WORKER_RAMP_UP_QUEUE)
-				lpfc_ramp_up_queue_handler(phba);
 			if (work_port_events & WORKER_DELAYED_DISC_TMO)
 				lpfc_delayed_disc_timeout_handler(vport);
 		}
@@ -2545,8 +2543,11 @@
 	if (!new_fcf_record) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
 				"2766 Mailbox command READ_FCF_RECORD "
-				"failed to retrieve a FCF record.\n");
-		goto error_out;
+				"failed to retrieve a FCF record. "
+				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+				phba->fcf.fcf_flag);
+		lpfc_unregister_fcf_rescan(phba);
+		goto out;
 	}
 
 	/* Get the needed parameters from FCF record */
@@ -3973,7 +3974,10 @@
 		vport->fc_map_cnt += count;
 		break;
 	case NLP_STE_NPR_NODE:
-		vport->fc_npr_cnt += count;
+		if (vport->fc_npr_cnt == 0 && count == -1)
+			vport->fc_npr_cnt = 0;
+		else
+			vport->fc_npr_cnt += count;
 		break;
 	}
 	spin_unlock_irq(shost->host_lock);
@@ -4180,6 +4184,7 @@
 	struct lpfc_hba *phba = vport->phba;
 	uint32_t did;
 	unsigned long flags;
+	unsigned long *active_rrqs_xri_bitmap = NULL;
 
 	if (!ndlp)
 		return NULL;
@@ -4208,12 +4213,17 @@
 
 	/* Keep the original DID */
 	did = ndlp->nlp_DID;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
 
 	/* re-initialize ndlp except of ndlp linked list pointer */
 	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
 		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
 	lpfc_initialize_node(vport, ndlp, did);
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
 		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
@@ -4799,9 +4809,10 @@
 				 ((uint32_t) ndlp->nlp_rpi & 0xff));
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
 					 "0929 FIND node DID "
-					 "Data: x%p x%x x%x x%x\n",
+					 "Data: x%p x%x x%x x%x %p\n",
 					 ndlp, ndlp->nlp_DID,
-					 ndlp->nlp_flag, data1);
+					 ndlp->nlp_flag, data1,
+					 ndlp->active_rrqs_xri_bitmap);
 			return ndlp;
 		}
 	}
@@ -5618,8 +5629,13 @@
 
 	lpfc_initialize_node(vport, ndlp, did);
 	INIT_LIST_HEAD(&ndlp->nlp_listp);
-	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
 		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+		ndlp->active_rrqs_xri_bitmap =
+				mempool_alloc(vport->phba->active_rrq_pool,
+					      GFP_KERNEL);
+	}
+
 
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
@@ -5664,6 +5680,9 @@
 	/* free ndlp memory for final ndlp release */
 	if (NLP_CHK_FREE_REQ(ndlp)) {
 		kfree(ndlp->lat_data);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			mempool_free(ndlp->active_rrqs_xri_bitmap,
+				     ndlp->phba->active_rrq_pool);
 		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
 	}
 }
@@ -6170,10 +6189,6 @@
 
 		memcpy(&conn_entry->conn_rec, &conn_rec[i],
 			sizeof(struct lpfc_fcf_conn_rec));
-		conn_entry->conn_rec.vlan_tag =
-			conn_entry->conn_rec.vlan_tag;
-		conn_entry->conn_rec.flags =
-			conn_entry->conn_rec.flags;
 		list_add_tail(&conn_entry->list,
 			&phba->fcf_conn_rec_list);
 	}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d3..3d9438c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
 #define LPFC_EXTRA_RING          1	/* ring 1 for other protocols */
 #define LPFC_ELS_RING            2	/* ring 2 for ELS commands */
 #define LPFC_FCP_NEXT_RING       3
+#define LPFC_FCP_OAS_RING        3
 
 #define SLI2_IOCB_CMD_R0_ENTRIES    172	/* SLI-2 FCP command ring entries */
 #define SLI2_IOCB_RSP_R0_ENTRIES    134	/* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b11..fd79f7d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@
 #define cfg_phwq_SHIFT				15
 #define cfg_phwq_MASK				0x00000001
 #define cfg_phwq_WORD				word12
+#define cfg_oas_SHIFT				25
+#define cfg_oas_MASK				0x00000001
+#define cfg_oas_WORD				word12
 #define cfg_loopbk_scope_SHIFT			28
 #define cfg_loopbk_scope_MASK			0x0000000f
 #define cfg_loopbk_scope_WORD			word12
@@ -3322,6 +3325,9 @@
 #define wqe_ebde_cnt_SHIFT    0
 #define wqe_ebde_cnt_MASK     0x0000000f
 #define wqe_ebde_cnt_WORD     word10
+#define wqe_oas_SHIFT         6
+#define wqe_oas_MASK          0x00000001
+#define wqe_oas_WORD          word10
 #define wqe_lenloc_SHIFT      7
 #define wqe_lenloc_MASK       0x00000003
 #define wqe_lenloc_WORD       word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68c94cc..635eeb3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@
 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1005,9 +1006,14 @@
 
 	phba = (struct lpfc_hba *)ptr;
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-	phba->hba_flag |= HBA_RRQ_ACTIVE;
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		phba->hba_flag |= HBA_RRQ_ACTIVE;
+	else
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
-	lpfc_worker_wake_up(phba);
+
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_worker_wake_up(phba);
 }
 
 /**
@@ -1468,7 +1474,8 @@
  * for handling possible port resource change.
  **/
 static int
-lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+			    bool en_rn_msg)
 {
 	int rc;
 	uint32_t intr_mode;
@@ -1480,9 +1487,10 @@
 	rc = lpfc_sli4_pdev_status_reg_wait(phba);
 	if (!rc) {
 		/* need reset: attempt for port recovery */
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"2887 Reset Needed: Attempting Port "
-				"Recovery...\n");
+		if (en_rn_msg)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2887 Reset Needed: Attempting Port "
+					"Recovery...\n");
 		lpfc_offline_prep(phba, mbx_action);
 		lpfc_offline(phba);
 		/* release interrupt for possible resource change */
@@ -1522,6 +1530,7 @@
 	uint32_t reg_err1, reg_err2;
 	uint32_t uerrlo_reg, uemasklo_reg;
 	uint32_t pci_rd_rc1, pci_rd_rc2;
+	bool en_rn_msg = true;
 	int rc;
 
 	/* If the pci channel is offline, ignore possible errors, since
@@ -1572,10 +1581,12 @@
 			break;
 		}
 		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
-		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"3143 Port Down: Firmware Restarted\n");
-		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+					"3143 Port Down: Firmware Update "
+					"Detected\n");
+			en_rn_msg = false;
+		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
 			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"3144 Port Down: Debug Dump\n");
@@ -1585,7 +1596,8 @@
 					"3145 Port Down: Provisioning\n");
 
 		/* Check port status register for function reset */
-		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
+		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+				en_rn_msg);
 		if (rc == 0) {
 			/* don't report event on forced debug dump */
 			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
@@ -4856,6 +4868,7 @@
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
 	int longs;
+	int fof_vectors = 0;
 
 	/* Get all the module params for configuring this host */
 	lpfc_get_cfgparam(phba);
@@ -5061,6 +5074,9 @@
 	rc = lpfc_sli4_read_config(phba);
 	if (unlikely(rc))
 		goto out_free_bsmbx;
+	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
 
 	/* IF Type 0 ports get initialized now. */
 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -5118,6 +5134,12 @@
 		}
 	}
 	mempool_free(mboxq, phba->mbox_mem_pool);
+
+	/* Verify OAS is supported */
+	lpfc_sli4_oas_verify(phba);
+	if (phba->cfg_fof)
+		fof_vectors = 1;
+
 	/* Verify all the SLI4 queues */
 	rc = lpfc_sli4_queue_verify(phba);
 	if (rc)
@@ -5159,7 +5181,8 @@
 
 	phba->sli4_hba.fcp_eq_hdl =
 			kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
-			    phba->cfg_fcp_io_channel), GFP_KERNEL);
+			    (fof_vectors + phba->cfg_fcp_io_channel)),
+			    GFP_KERNEL);
 	if (!phba->sli4_hba.fcp_eq_hdl) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2572 Failed allocate memory for "
@@ -5169,7 +5192,8 @@
 	}
 
 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
-				      phba->cfg_fcp_io_channel), GFP_KERNEL);
+				  (fof_vectors +
+				   phba->cfg_fcp_io_channel)), GFP_KERNEL);
 	if (!phba->sli4_hba.msix_entries) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2573 Failed allocate memory for msi-x "
@@ -5267,6 +5291,7 @@
 	kfree(phba->sli4_hba.cpu_map);
 	phba->sli4_hba.num_present_cpu = 0;
 	phba->sli4_hba.num_online_cpu = 0;
+	phba->sli4_hba.curr_disp_cpu = 0;
 
 	/* Free memory allocated for msi-x interrupt vector entries */
 	kfree(phba->sli4_hba.msix_entries);
@@ -5390,6 +5415,10 @@
 	/* Initialize FCF connection rec list */
 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
 
+	/* Initialize OAS configuration list */
+	spin_lock_init(&phba->devicelock);
+	INIT_LIST_HEAD(&phba->luns);
+
 	return 0;
 }
 
@@ -6816,6 +6845,7 @@
 	int cfg_fcp_io_channel;
 	uint32_t cpu;
 	uint32_t i = 0;
+	int fof_vectors = phba->cfg_fof ? 1 : 0;
 
 	/*
 	 * Sanity check for configured queue parameters against the run-time
@@ -6832,6 +6862,7 @@
 	}
 	phba->sli4_hba.num_online_cpu = i;
 	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+	phba->sli4_hba.curr_disp_cpu = 0;
 
 	if (i < cfg_fcp_io_channel) {
 		lpfc_printf_log(phba,
@@ -6842,7 +6873,7 @@
 		cfg_fcp_io_channel = i;
 	}
 
-	if (cfg_fcp_io_channel >
+	if (cfg_fcp_io_channel + fof_vectors >
 	    phba->sli4_hba.max_cfg_param.max_eq) {
 		if (phba->sli4_hba.max_cfg_param.max_eq <
 		    LPFC_FCP_IO_CHAN_MIN) {
@@ -6859,7 +6890,8 @@
 				"available EQs: from %d to %d\n",
 				cfg_fcp_io_channel,
 				phba->sli4_hba.max_cfg_param.max_eq);
-		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+			fof_vectors;
 	}
 
 	/* The actual number of FCP event queues adopted */
@@ -7070,6 +7102,9 @@
 	}
 	phba->sli4_hba.dat_rq = qdesc;
 
+	/* Create the Queues needed for Flash Optimized Fabric operations */
+	if (phba->cfg_fof)
+		lpfc_fof_queue_create(phba);
 	return 0;
 
 out_error:
@@ -7094,6 +7129,9 @@
 {
 	int idx;
 
+	if (phba->cfg_fof)
+		lpfc_fof_queue_destroy(phba);
+
 	if (phba->sli4_hba.hba_eq != NULL) {
 		/* Release HBA event queue */
 		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7478,8 +7516,20 @@
 			phba->sli4_hba.hdr_rq->queue_id,
 			phba->sli4_hba.dat_rq->queue_id,
 			phba->sli4_hba.els_cq->queue_id);
+
+	if (phba->cfg_fof) {
+		rc = lpfc_fof_queue_setup(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0549 Failed setup of FOF Queues: "
+					"rc = 0x%x\n", rc);
+			goto out_destroy_els_rq;
+		}
+	}
 	return 0;
 
+out_destroy_els_rq:
+	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
 out_destroy_els_wq:
 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
@@ -7518,6 +7568,9 @@
 {
 	int fcp_qidx;
 
+	/* Unset the queues created for Flash Optimized Fabric operations */
+	if (phba->cfg_fof)
+		lpfc_fof_queue_destroy(phba);
 	/* Unset mailbox command work queue */
 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
 	/* Unset ELS work queue */
@@ -8635,6 +8688,10 @@
 
 	/* Configure MSI-X capability structure */
 	vectors = phba->cfg_fcp_io_channel;
+	if (phba->cfg_fof) {
+		phba->sli4_hba.msix_entries[index].entry = index;
+		vectors++;
+	}
 enable_msix_vectors:
 	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
 			     vectors);
@@ -8664,7 +8721,15 @@
 		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
 		atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
-		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+		if (phba->cfg_fof && (index == (vectors - 1)))
+			rc = request_irq(
+				phba->sli4_hba.msix_entries[index].vector,
+				 &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+				 (char *)&phba->sli4_hba.handler_name[index],
+				 &phba->sli4_hba.fcp_eq_hdl[index]);
+		else
+			rc = request_irq(
+				phba->sli4_hba.msix_entries[index].vector,
 				 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
 				 (char *)&phba->sli4_hba.handler_name[index],
 				 &phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8676,6 +8741,9 @@
 		}
 	}
 
+	if (phba->cfg_fof)
+		vectors--;
+
 	if (vectors != phba->cfg_fcp_io_channel) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3238 Reducing IO channels to match number of "
@@ -8721,7 +8789,10 @@
 		free_irq(phba->sli4_hba.msix_entries[index].vector,
 			 &phba->sli4_hba.fcp_eq_hdl[index]);
 	}
-
+	if (phba->cfg_fof) {
+		free_irq(phba->sli4_hba.msix_entries[index].vector,
+			 &phba->sli4_hba.fcp_eq_hdl[index]);
+	}
 	/* Disable MSI-X */
 	pci_disable_msix(phba->pcidev);
 
@@ -8771,6 +8842,10 @@
 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
 	}
 
+	if (phba->cfg_fof) {
+		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+	}
 	return 0;
 }
 
@@ -8853,6 +8928,12 @@
 				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
 					fcp_eq_in_use, 1);
 			}
+			if (phba->cfg_fof) {
+				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+					fcp_eq_in_use, 1);
+			}
 		}
 	}
 	return intr_mode;
@@ -9163,6 +9244,7 @@
 		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
 	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
 	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
 	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
 	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10796,6 +10878,169 @@
 	return;
 }
 
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+	if (!phba->cfg_EnableXLane)
+		return;
+
+	if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+		phba->cfg_fof = 1;
+	} else {
+		phba->cfg_EnableXLane = 0;
+		if (phba->device_data_mem_pool)
+			mempool_destroy(phba->device_data_mem_pool);
+		phba->device_data_mem_pool = NULL;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	int rc;
+
+	rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+	if (rc)
+		return -ENOMEM;
+
+	if (phba->cfg_EnableXLane) {
+
+		rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+				    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+		if (rc)
+			goto out_oas_cq;
+
+		rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+				    phba->sli4_hba.oas_cq, LPFC_FCP);
+		if (rc)
+			goto out_oas_wq;
+
+		phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+		phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+	}
+
+	return 0;
+
+out_oas_wq:
+	if (phba->cfg_EnableXLane)
+		lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+	lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+	return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No availble memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+	struct lpfc_queue *qdesc;
+
+	/* Create FOF EQ */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+				      phba->sli4_hba.eq_ecount);
+	if (!qdesc)
+		goto out_error;
+
+	phba->sli4_hba.fof_eq = qdesc;
+
+	if (phba->cfg_EnableXLane) {
+
+		/* Create OAS CQ */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+						      phba->sli4_hba.cq_ecount);
+		if (!qdesc)
+			goto out_error;
+
+		phba->sli4_hba.oas_cq = qdesc;
+
+		/* Create OAS WQ */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+					      phba->sli4_hba.wq_ecount);
+		if (!qdesc)
+			goto out_error;
+
+		phba->sli4_hba.oas_wq = qdesc;
+
+	}
+	return 0;
+
+out_error:
+	lpfc_fof_queue_destroy(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+	/* Release FOF Event queue */
+	if (phba->sli4_hba.fof_eq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+		phba->sli4_hba.fof_eq = NULL;
+	}
+
+	/* Release OAS Completion queue */
+	if (phba->sli4_hba.oas_cq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+		phba->sli4_hba.oas_cq = NULL;
+	}
+
+	/* Release OAS Work queue */
+	if (phba->sli4_hba.oas_wq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+		phba->sli4_hba.oas_wq = NULL;
+	}
+	return 0;
+}
+
 static struct pci_device_id lpfc_id_table[] = {
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
 		PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 812d0cd..ed419aa 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,10 +38,29 @@
 #include "lpfc_scsi.h"
 #include "lpfc.h"
 #include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
 
 #define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
 #define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64   /* max elements in device data pool */
 
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+	size_t bytes;
+	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+	if (max_xri <= 0)
+		return -ENOMEM;
+	bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+		  sizeof(unsigned long);
+	phba->cfg_rrq_xri_bitmap_sz = bytes;
+	phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+							    bytes);
+	if (!phba->active_rrq_pool)
+		return -ENOMEM;
+	else
+		return 0;
+}
 
 /**
  * lpfc_mem_alloc - create and allocate all PCI and memory pools
@@ -146,6 +165,16 @@
 		phba->lpfc_drb_pool = NULL;
 	}
 
+	if (phba->cfg_EnableXLane) {
+		phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+					LPFC_DEVICE_DATA_POOL_SIZE,
+					sizeof(struct lpfc_device_data));
+		if (!phba->device_data_mem_pool)
+			goto fail_free_hrb_pool;
+	} else {
+		phba->device_data_mem_pool = NULL;
+	}
+
 	return 0;
  fail_free_hrb_pool:
 	pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -188,6 +217,7 @@
 {
 	int i;
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	struct lpfc_device_data *device_data;
 
 	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
@@ -209,6 +239,10 @@
 	/* Free NLP memory pool */
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
+	if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+		mempool_destroy(phba->active_rrq_pool);
+		phba->active_rrq_pool = NULL;
+	}
 
 	/* Free mbox memory pool */
 	mempool_destroy(phba->mbox_mem_pool);
@@ -227,6 +261,19 @@
 	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
 	phba->lpfc_scsi_dma_buf_pool = NULL;
 
+	/* Free Device Data memory pool */
+	if (phba->device_data_mem_pool) {
+		/* Ensure all objects have been returned to the pool */
+		while (!list_empty(&phba->luns)) {
+			device_data = list_first_entry(&phba->luns,
+						       struct lpfc_device_data,
+						       listentry);
+			list_del(&device_data->listentry);
+			mempool_free(device_data, phba->device_data_mem_pool);
+		}
+		mempool_destroy(phba->device_data_mem_pool);
+	}
+	phba->device_data_mem_pool = NULL;
 	return;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index abc3612..c342f6a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -203,8 +203,6 @@
 int
 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
-	LIST_HEAD(completions);
-	LIST_HEAD(txcmplq_completions);
 	LIST_HEAD(abort_list);
 	struct lpfc_sli  *psli = &phba->sli;
 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
@@ -216,32 +214,27 @@
 			 "Data: x%x x%x x%x\n",
 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 			 ndlp->nlp_rpi);
-
+	/* Clean up all fabric IOs first.*/
 	lpfc_fabric_abort_nport(ndlp);
 
-	/* First check the txq */
+	/*
+	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
+	 * txcmplq so that the abort operation completes them successfully.
+	 */
 	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-		/* Check to see if iocb matches the nport we are looking for */
-		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
-			/* It matches, so deque and call compl with anp error */
-			list_move_tail(&iocb->list, &completions);
-		}
-	}
-
-	/* Next check the txcmplq */
-	list_splice_init(&pring->txcmplq, &txcmplq_completions);
-	spin_unlock_irq(&phba->hbalock);
-
-	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
-		/* Check to see if iocb matches the nport we are looking for */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+	/* Add to abort_list on on NDLP match. */
 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
 			list_add_tail(&iocb->dlist, &abort_list);
 	}
-	spin_lock_irq(&phba->hbalock);
-	list_splice(&txcmplq_completions, &pring->txcmplq);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
+	/* Abort the targeted IOs and remove them from the abort list. */
 	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
 			spin_lock_irq(&phba->hbalock);
 			list_del_init(&iocb->dlist);
@@ -249,9 +242,28 @@
 			spin_unlock_irq(&phba->hbalock);
 	}
 
+	INIT_LIST_HEAD(&abort_list);
+
+	/* Now process the txq */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		/* Check to see if iocb matches the nport we are looking for */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+			list_del_init(&iocb->list);
+			list_add_tail(&iocb->list, &abort_list);
+		}
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
 	/* Cancel all the IOCBs from the completions list */
-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
-			      IOERR_SLI_ABORTED);
+	lpfc_sli_cancel_iocbs(phba, &abort_list,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
 	return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b2ede05..462453e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@
 	__be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+	if (vport->phba->cfg_EnableXLane)
+		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+	else
+		return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -304,9 +315,27 @@
 	unsigned long new_queue_depth, old_queue_depth;
 
 	old_queue_depth = sdev->queue_depth;
-	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		/* change request from sysfs, fall through */
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		if (scsi_track_queue_full(sdev, qdepth) == 0)
+			return sdev->queue_depth;
+
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0711 detected queue full - lun queue "
+				 "depth adjusted to %d.\n", sdev->queue_depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
 	new_queue_depth = sdev->queue_depth;
-	rdata = sdev->hostdata;
+	rdata = lpfc_rport_data_from_scsi_device(sdev);
 	if (rdata)
 		lpfc_send_sdev_queuedepth_change_event(phba, vport,
 						       rdata->pnode, sdev->lun,
@@ -377,50 +406,6 @@
 }
 
 /**
- * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
- * @phba: The Hba for which this call is being executed.
- *
- * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
- * post at most 1 event every 5 minute after last_ramp_up_time or
- * last_rsrc_error_time.  This routine wakes up worker thread of @phba
- * to process WORKER_RAM_DOWN_EVENT event.
- *
- * This routine should be called with no lock held.
- **/
-static inline void
-lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
-			uint32_t queue_depth)
-{
-	unsigned long flags;
-	struct lpfc_hba *phba = vport->phba;
-	uint32_t evt_posted;
-	atomic_inc(&phba->num_cmd_success);
-
-	if (vport->cfg_lun_queue_depth <= queue_depth)
-		return;
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (time_before(jiffies,
-			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
-	    time_before(jiffies,
-			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-		return;
-	}
-	phba->last_ramp_up_time = jiffies;
-	spin_unlock_irqrestore(&phba->hbalock, flags);
-
-	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
-	if (!evt_posted)
-		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
-	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
-
-	if (!evt_posted)
-		lpfc_worker_wake_up(phba);
-	return;
-}
-
-/**
  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
  * @phba: The Hba for which this call is being executed.
  *
@@ -472,41 +457,6 @@
 }
 
 /**
- * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
- * @phba: The Hba for which this call is being executed.
- *
- * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
- * thread.This routine increases queue depth for all scsi device on each vport
- * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
- * num_cmd_success to zero.
- **/
-void
-lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
-{
-	struct lpfc_vport **vports;
-	struct Scsi_Host  *shost;
-	struct scsi_device *sdev;
-	int i;
-
-	vports = lpfc_create_vport_work_array(phba);
-	if (vports != NULL)
-		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
-			shost = lpfc_shost_from_vport(vports[i]);
-			shost_for_each_device(sdev, shost) {
-				if (vports[i]->cfg_lun_queue_depth <=
-				    sdev->queue_depth)
-					continue;
-				lpfc_change_queue_depth(sdev,
-							sdev->queue_depth+1,
-							SCSI_QDEPTH_RAMP_UP);
-			}
-		}
-	lpfc_destroy_vport_work_array(phba, vports);
-	atomic_set(&phba->num_rsrc_err, 0);
-	atomic_set(&phba->num_cmd_success, 0);
-}
-
-/**
  * lpfc_scsi_dev_block - set all scsi hosts to block state
  * @phba: Pointer to HBA context object.
  *
@@ -1502,7 +1452,7 @@
 	}
 
 	/* Next check if we need to match the remote NPortID or WWPN */
-	rdata = sc->device->hostdata;
+	rdata = lpfc_rport_data_from_scsi_device(sc->device);
 	if (rdata && rdata->pnode) {
 		ndlp = rdata->pnode;
 
@@ -3507,6 +3457,14 @@
 	 * we need to set word 4 of IOCB here
 	 */
 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+	/*
+	 * If the OAS driver feature is enabled and the lun is enabled for
+	 * OAS, set the oas iocb related flags.
+	 */
+	if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+		scsi_cmnd->device->hostdata)->oas_enabled)
+		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
 	return 0;
 }
 
@@ -4021,7 +3979,6 @@
 	struct lpfc_nodelist *pnode = rdata->pnode;
 	struct scsi_cmnd *cmd;
 	int result;
-	struct scsi_device *tmp_sdev;
 	int depth;
 	unsigned long flags;
 	struct lpfc_fast_path_event *fast_path_evt;
@@ -4266,32 +4223,6 @@
 		return;
 	}
 
-	if (!result)
-		lpfc_rampup_queue_depth(vport, queue_depth);
-
-	/*
-	 * Check for queue full.  If the lun is reporting queue full, then
-	 * back off the lun queue depth to prevent target overloads.
-	 */
-	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
-	    NLP_CHK_NODE_ACT(pnode)) {
-		shost_for_each_device(tmp_sdev, shost) {
-			if (tmp_sdev->id != scsi_id)
-				continue;
-			depth = scsi_track_queue_full(tmp_sdev,
-						      tmp_sdev->queue_depth-1);
-			if (depth <= 0)
-				continue;
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-					 "0711 detected queue full - lun queue "
-					 "depth adjusted to %d.\n", depth);
-			lpfc_send_sdev_queuedepth_change_event(phba, vport,
-							       pnode,
-							       tmp_sdev->lun,
-							       depth+1, depth);
-		}
-	}
-
 	spin_lock_irqsave(&phba->hbalock, flags);
 	lpfc_cmd->pCmd = NULL;
 	spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -4492,6 +4423,8 @@
 	}
 	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+	piocb->ulpPU = 0;
+	piocb->un.fcpi.fcpi_parm = 0;
 
 	/* ulpTimeout is only one byte */
 	if (lpfc_cmd->timeout > 0xff) {
@@ -4691,12 +4624,13 @@
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_scsi_buf *lpfc_cmd;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 	int err;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	err = fc_remote_port_chkready(rport);
 	if (err) {
 		cmnd->result = err;
@@ -4782,6 +4716,24 @@
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
 	if (err) {
 		atomic_dec(&ndlp->cmd_pending);
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "3376 FCP could not issue IOCB err %x"
+				 "FCP cmd x%x <%d/%d> "
+				 "sid: x%x did: x%x oxid: x%x "
+				 "Data: x%x x%x x%x x%x\n",
+				 err, cmnd->cmnd[0],
+				 cmnd->device ? cmnd->device->id : 0xffff,
+				 cmnd->device ? cmnd->device->lun : 0xffff,
+				 vport->fc_myDID, ndlp->nlp_DID,
+				 phba->sli_rev == LPFC_SLI_REV4 ?
+				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
+				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+				 (uint32_t)
+				 (cmnd->request->timeout / 1000));
+
+
 		goto out_host_busy_free_buf;
 	}
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -5161,10 +5113,11 @@
 static int
 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
 {
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *pnode;
 	unsigned long later;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5182,7 +5135,7 @@
 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
 			return SUCCESS;
 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-		rdata = cmnd->device->hostdata;
+		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 		if (!rdata)
 			return FAILED;
 		pnode = rdata->pnode;
@@ -5254,13 +5207,14 @@
 {
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *pnode;
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
 	int status;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			"0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5323,13 +5277,14 @@
 {
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *pnode;
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
 	int status;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			"0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5529,11 +5484,45 @@
 	uint32_t num_to_alloc = 0;
 	int num_allocated = 0;
 	uint32_t sdev_cnt;
+	struct lpfc_device_data *device_data;
+	unsigned long flags;
+	struct lpfc_name target_wwpn;
 
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
 
-	sdev->hostdata = rport->dd_data;
+	if (phba->cfg_EnableXLane) {
+
+		/*
+		 * Check to see if the device data structure for the lun
+		 * exists.  If not, create one.
+		 */
+
+		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+		spin_lock_irqsave(&phba->devicelock, flags);
+		device_data = __lpfc_get_device_data(phba,
+						     &phba->luns,
+						     &vport->fc_portname,
+						     &target_wwpn,
+						     sdev->lun);
+		if (!device_data) {
+			spin_unlock_irqrestore(&phba->devicelock, flags);
+			device_data = lpfc_create_device_data(phba,
+							&vport->fc_portname,
+							&target_wwpn,
+							sdev->lun, true);
+			if (!device_data)
+				return -ENOMEM;
+			spin_lock_irqsave(&phba->devicelock, flags);
+			list_add_tail(&device_data->listentry, &phba->luns);
+		}
+		device_data->rport_data = rport->dd_data;
+		device_data->available = true;
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		sdev->hostdata = device_data;
+	} else {
+		sdev->hostdata = rport->dd_data;
+	}
 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
 
 	/*
@@ -5623,11 +5612,344 @@
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
+	unsigned long flags;
+	struct lpfc_device_data *device_data = sdev->hostdata;
+
 	atomic_dec(&phba->sdev_cnt);
+	if ((phba->cfg_EnableXLane) && (device_data)) {
+		spin_lock_irqsave(&phba->devicelock, flags);
+		device_data->available = false;
+		if (!device_data->oas_enabled)
+			lpfc_delete_device_data(phba, device_data);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+	}
 	sdev->hostdata = NULL;
 	return;
 }
 
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ *		  GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+			struct lpfc_name *target_wwpn, uint64_t lun,
+			bool atomic_create)
+{
+
+	struct lpfc_device_data *lun_info;
+	int memory_flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
+	    !(phba->cfg_EnableXLane))
+		return NULL;
+
+	/* Attempt to create the device data to contain lun info */
+
+	if (atomic_create)
+		memory_flags = GFP_ATOMIC;
+	else
+		memory_flags = GFP_KERNEL;
+	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+	if (!lun_info)
+		return NULL;
+	INIT_LIST_HEAD(&lun_info->listentry);
+	lun_info->rport_data  = NULL;
+	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+	       sizeof(struct lpfc_name));
+	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+	       sizeof(struct lpfc_name));
+	lun_info->device_id.lun = lun;
+	lun_info->oas_enabled = false;
+	lun_info->available = false;
+	return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+			struct lpfc_device_data *lun_info)
+{
+
+	if (unlikely(!phba) || !lun_info  ||
+	    !(phba->cfg_EnableXLane))
+		return;
+
+	if (!list_empty(&lun_info->listentry))
+		list_del(&lun_info->listentry);
+	mempool_free(lun_info, phba->device_data_mem_pool);
+	return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+		       struct lpfc_name *vport_wwpn,
+		       struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+
+	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_EnableXLane)
+		return NULL;
+
+	/* Check to see if the lun is already enabled for OAS. */
+
+	list_for_each_entry(lun_info, list, listentry) {
+		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+			    sizeof(struct lpfc_name)) == 0) &&
+		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+			    sizeof(struct lpfc_name)) == 0) &&
+		    (lun_info->device_id.lun == lun))
+			return lun_info;
+	}
+
+	return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target.  If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match.  If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned.  The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+		       struct lpfc_name *found_vport_wwpn,
+		       struct lpfc_name *found_target_wwpn,
+		       uint64_t *found_lun,
+		       uint32_t *found_lun_status)
+{
+
+	unsigned long flags;
+	struct lpfc_device_data *lun_info;
+	struct lpfc_device_id *device_id;
+	uint64_t lun;
+	bool found = false;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !starting_lun || !found_vport_wwpn ||
+	    !found_target_wwpn || !found_lun || !found_lun_status ||
+	    (*starting_lun == NO_MORE_OAS_LUN) ||
+	    !phba->cfg_EnableXLane)
+		return false;
+
+	lun = *starting_lun;
+	*found_lun = NO_MORE_OAS_LUN;
+	*starting_lun = NO_MORE_OAS_LUN;
+
+	/* Search for lun or the lun closet in value */
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+	list_for_each_entry(lun_info, &phba->luns, listentry) {
+		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+			    sizeof(struct lpfc_name)) == 0)) &&
+		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+			    sizeof(struct lpfc_name)) == 0)) &&
+		    (lun_info->oas_enabled)) {
+			device_id = &lun_info->device_id;
+			if ((!found) &&
+			    ((lun == FIND_FIRST_OAS_LUN) ||
+			     (device_id->lun == lun))) {
+				*found_lun = device_id->lun;
+				memcpy(found_vport_wwpn,
+				       &device_id->vport_wwpn,
+				       sizeof(struct lpfc_name));
+				memcpy(found_target_wwpn,
+				       &device_id->target_wwpn,
+				       sizeof(struct lpfc_name));
+				if (lun_info->available)
+					*found_lun_status =
+						OAS_LUN_STATUS_EXISTS;
+				else
+					*found_lun_status = 0;
+				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+					memset(vport_wwpn, 0x0,
+					       sizeof(struct lpfc_name));
+				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+					memset(target_wwpn, 0x0,
+					       sizeof(struct lpfc_name));
+				found = true;
+			} else if (found) {
+				*starting_lun = device_id->lun;
+				memcpy(vport_wwpn, &device_id->vport_wwpn,
+				       sizeof(struct lpfc_name));
+				memcpy(target_wwpn, &device_id->target_wwpn,
+				       sizeof(struct lpfc_name));
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun has been created.
+ *   2) If found, sets the OAS enabled flag if not set and returns.
+ *   3) Otherwise, creates a device data structure.
+ *   4) If successfully created, indicates the device data is for an OAS lun,
+ *   indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		    struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+	unsigned long flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_EnableXLane)
+		return false;
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+
+	/* Check to see if the device data for the lun has been created */
+	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+					  target_wwpn, lun);
+	if (lun_info) {
+		if (!lun_info->oas_enabled)
+			lun_info->oas_enabled = true;
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+
+	/* Create an lun info structure and add to list of luns */
+	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+					   false);
+	if (lun_info) {
+		lun_info->oas_enabled = true;
+		lun_info->available = false;
+		list_add_tail(&lun_info->listentry, &phba->luns);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun is created.
+ *   2) If present, clears the flag indicating this lun is for OAS.
+ *   3) If the lun is not available by the system, the device data is
+ *   freed.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		     struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+	unsigned long flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_EnableXLane)
+		return false;
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+
+	/* Check to see if the lun is available. */
+	lun_info = __lpfc_get_device_data(phba,
+					  &phba->luns, vport_wwpn,
+					  target_wwpn, lun);
+	if (lun_info) {
+		lun_info->oas_enabled = false;
+		if (!lun_info->available)
+			lpfc_delete_device_data(phba, lun_info);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return false;
+}
 
 struct scsi_host_template lpfc_template = {
 	.module			= THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7d..0120bfc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@
 	struct lpfc_nodelist *pnode;	/* Pointer to the node structure. */
 };
 
+struct lpfc_device_id {
+	struct lpfc_name vport_wwpn;
+	struct lpfc_name target_wwpn;
+	uint64_t lun;
+};
+
+struct lpfc_device_data {
+	struct list_head listentry;
+	struct lpfc_rport_data *rport_data;
+	struct lpfc_device_id device_id;
+	bool oas_enabled;
+	bool available;
+};
+
 struct fcp_rsp {
 	uint32_t rspRsvd1;	/* FC Word 0, byte 0:3 */
 	uint32_t rspRsvd2;	/* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
 #define MDAC_DIRECT_CMD                  0x22
+
+#define FIND_FIRST_OAS_LUN		 0
+#define NO_MORE_OAS_LUN			-1
+#define NOT_OAS_ENABLED_LUN		NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f580fd..6bb51f8e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -635,7 +635,7 @@
 	if (!ndlp)
 		goto out;
 
-	if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
 		rrq->send_rrq = 0;
 		rrq->xritag = 0;
 		rrq->rrq_stop_time = 0;
@@ -678,7 +678,8 @@
 			next_time = rrq->rrq_stop_time;
 	}
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
-	if (!list_empty(&phba->active_rrq_list))
+	if ((!list_empty(&phba->active_rrq_list)) &&
+	    (!(phba->pport->load_flag & FC_UNLOADING)))
 		mod_timer(&phba->rrq_tmr, next_time);
 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
 		list_del(&rrq->list);
@@ -792,7 +793,9 @@
 		list_del(&rrq->list);
 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 	}
-	if (!list_empty(&phba->active_rrq_list))
+	if ((!list_empty(&phba->active_rrq_list)) &&
+	    (!(phba->pport->load_flag & FC_UNLOADING)))
+
 		mod_timer(&phba->rrq_tmr, next_time);
 }
 
@@ -813,7 +816,9 @@
 {
 	if (!ndlp)
 		return 0;
-	if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+	if (!ndlp->active_rrqs_xri_bitmap)
+		return 0;
+	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
 			return 1;
 	else
 		return 0;
@@ -863,7 +868,10 @@
 	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
 		goto out;
 
-	if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+	if (!ndlp->active_rrqs_xri_bitmap)
+		goto out;
+
+	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
 		goto out;
 
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1318,7 +1326,8 @@
 
 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
-	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+	 (!(piocb->vport->load_flag & FC_UNLOADING))) {
 		if (!piocb->vport)
 			BUG();
 		else
@@ -4971,12 +4980,19 @@
 					     LPFC_QUEUE_REARM);
 		} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
 	}
+
+	if (phba->cfg_EnableXLane)
+		lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
 	if (phba->sli4_hba.hba_eq) {
 		for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
 		     fcp_eqidx++)
 			lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
 					     LPFC_QUEUE_REARM);
 	}
+
+	if (phba->cfg_fof)
+		lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
 }
 
 /**
@@ -8032,7 +8048,8 @@
 	struct lpfc_vector_map_info *cpup;
 	int chann, cpu;
 
-	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+	    && phba->cfg_fcp_io_channel > 1) {
 		cpu = smp_processor_id();
 		if (cpu < phba->sli4_hba.num_present_cpu) {
 			cpup = phba->sli4_hba.cpu_map;
@@ -8250,6 +8267,14 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+			if (phba->cfg_XLanePriority) {
+				bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
 		break;
 	case CMD_FCP_IREAD64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8271,6 +8296,14 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
 		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+			if (phba->cfg_XLanePriority) {
+				bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
 		break;
 	case CMD_FCP_ICMND64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8291,6 +8324,14 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
 		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
 		       iocbq->iocb.ulpFCP2Rcvy);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+			if (phba->cfg_XLanePriority) {
+				bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
 		break;
 	case CMD_GEN_REQUEST64_CR:
 		/* For this command calculate the xmit length of the
@@ -8523,6 +8564,7 @@
 {
 	struct lpfc_sglq *sglq;
 	union lpfc_wqe wqe;
+	struct lpfc_queue *wq;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
 
 	if (piocb->sli4_xritag == NO_XRI) {
@@ -8575,11 +8617,14 @@
 		return IOCB_ERROR;
 
 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
-		(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-		if (unlikely(!phba->sli4_hba.fcp_wq))
-			return IOCB_ERROR;
-		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
-				     &wqe))
+	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+		if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+			LPFC_IO_OAS))) {
+			wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+		} else {
+			wq = phba->sli4_hba.oas_wq;
+		}
+		if (lpfc_sli4_wq_put(wq, &wqe))
 			return IOCB_ERROR;
 	} else {
 		if (unlikely(!phba->sli4_hba.els_wq))
@@ -8669,12 +8714,20 @@
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
 		if (piocb->iocb_flag &  LPFC_IO_FCP) {
-			if (unlikely(!phba->sli4_hba.fcp_wq))
-				return IOCB_ERROR;
-			idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
-			piocb->fcp_wqidx = idx;
-			ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+			if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+				LPFC_IO_OAS))) {
+				if (unlikely(!phba->sli4_hba.fcp_wq))
+					return IOCB_ERROR;
+				idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+				piocb->fcp_wqidx = idx;
+				ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+			} else {
+				if (unlikely(!phba->sli4_hba.oas_wq))
+					return IOCB_ERROR;
+				idx = 0;
+				piocb->fcp_wqidx = 0;
+				ring_number =  LPFC_FCP_OAS_RING;
+			}
 			pring = &phba->sli.ring[ring_number];
 			spin_lock_irqsave(&pring->ring_lock, iflags);
 			rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12132,6 +12185,175 @@
 	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
 }
 
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ *			     entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue.  It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+	struct lpfc_queue *cq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	uint16_t cqid;
+	int ecount = 0;
+
+	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9147 Not a valid completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get_le32(lpfc_eqe_major_code, eqe),
+				bf_get_le32(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+	/* Next check for OAS */
+	cq = phba->sli4_hba.oas_cq;
+	if (unlikely(!cq)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"9148 OAS completion queue "
+					"does not exist\n");
+		return;
+	}
+
+	if (unlikely(cqid != cq->queue_id)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9149 Miss-matched fast-path compl "
+				"queue id: eqcqid=%d, fcpcqid=%d\n",
+				cqid, cq->queue_id);
+		return;
+	}
+
+	/* Process all the entries to the OAS CQ */
+	while ((cqe = lpfc_sli4_cq_get(cq))) {
+		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+		if (!(++ecount % cq->entry_repost))
+			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Track the max number of CQEs processed in 1 EQ */
+	if (ecount > cq->CQ_max_cqe)
+		cq->CQ_max_cqe = ecount;
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9153 No entry from fast-path completion "
+				"queue fcpcqid=%d\n", cq->queue_id);
+
+	/* In any case, flash and re-arm the CQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+	struct lpfc_queue *eq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+	uint32_t eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+	phba = fcp_eq_hdl->phba;
+	eqidx = fcp_eq_hdl->idx;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	eq = phba->sli4_hba.fof_eq;
+	if (unlikely(!eq))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		eq->EQ_badstate++;
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, eq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP fast-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(eq))) {
+		lpfc_sli4_fof_handle_eqe(phba, eqe);
+		if (!(++ecount % eq->entry_repost))
+			lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+		eq->EQ_processed++;
+	}
+
+	/* Track the max number of EQEs processed in 1 intr */
+	if (ecount > eq->EQ_max_eqe)
+		eq->EQ_max_eqe = ecount;
+
+
+	if (unlikely(ecount == 0)) {
+		eq->EQ_no_entry++;
+
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"9145 MSI-X interrupt with no EQE\n");
+		else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"9146 ISR interrupt with no EQE\n");
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+		}
+	}
+	/* Always clear and re-arm the fast-path EQ */
+	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+	return IRQ_HANDLED;
+}
+
 /**
  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
  * @irq: Interrupt number.
@@ -12287,6 +12509,13 @@
 			hba_handled |= true;
 	}
 
+	if (phba->cfg_fof) {
+		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+					&phba->sli4_hba.fcp_eq_hdl[0]);
+		if (hba_irq_rc == IRQ_HANDLED)
+			hba_handled |= true;
+	}
+
 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
 } /* lpfc_sli4_intr_handler */
 
@@ -16544,7 +16773,7 @@
 {
 	LIST_HEAD(completions);
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-	struct lpfc_iocbq *piocbq = 0;
+	struct lpfc_iocbq *piocbq = NULL;
 	unsigned long iflags = 0;
 	char *fail_msg = NULL;
 	struct lpfc_sglq *sglq;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f247..6f04080 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@
 #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT	14
 
+#define LPFC_IO_OAS		0x10000 /* OAS FCP IO */
+
 	uint32_t drvrTimeout;	/* driver timeout in seconds */
 	uint32_t fcp_wqidx;	/* index to FCP work queue */
 	struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd..9b8cda8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
 #define LPFC_FCP_IO_CHAN_MIN       1
 #define LPFC_FCP_IO_CHAN_MAX       16
 
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM       1
+
 /*
  * Provide the default FCF Record attributes used by the driver
  * when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@
 	uint32_t if_page_sz;
 	uint32_t rq_db_window;
 	uint32_t loopbk_scope;
+	uint32_t oas_supported;
 	uint32_t eq_pages_max;
 	uint32_t eqe_size;
 	uint32_t cq_pages_max;
@@ -439,6 +444,8 @@
 	uint8_t lnk_no;
 };
 
+#define LPFC_SLI4_HANDLER_CNT		(LPFC_FCP_IO_CHAN_MAX+ \
+					 LPFC_FOF_IO_CHAN_NUM)
 #define LPFC_SLI4_HANDLER_NAME_SZ	16
 
 /* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@
 	struct lpfc_register sli_intf;
 	struct lpfc_pc_sli4_params pc_sli4_params;
 	struct msix_entry *msix_entries;
-	uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
+	uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
 
 	/* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@
 	uint32_t ulp0_mode;	/* ULP0 protocol mode */
 	uint32_t ulp1_mode;	/* ULP1 protocol mode */
 
+	struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+	/* Optimized Access Storage specific queues/structures */
+
+	struct lpfc_queue *oas_cq; /* OAS completion queue */
+	struct lpfc_queue *oas_wq; /* OAS Work queue */
+	struct lpfc_sli_ring *oas_ring;
+	uint64_t oas_next_lun;
+	uint8_t oas_next_tgt_wwpn[8];
+	uint8_t oas_next_vpt_wwpn[8];
+
 	/* Setup information for various queue parameters */
 	int eq_esize;
 	int eq_ecount;
@@ -589,6 +607,7 @@
 	struct lpfc_vector_map_info *cpu_map;
 	uint16_t num_online_cpu;
 	uint16_t num_present_cpu;
+	uint16_t curr_disp_cpu;
 };
 
 enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e3094c4..e32cbec 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.43"
+#define LPFC_DRIVER_VERSION "8.3.45"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 816db12..b777051 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -531,13 +531,6 @@
 	int	target = 0;
 	int	ldrv_num = 0;   /* logical drive number */
 
-
-	/*
-	 * filter the internal and ioctl commands
-	 */
-	if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
-		return (scb_t *)cmd->host_scribble;
-
 	/*
 	 * We know what channels our logical drives are on - mega_find_card()
 	 */
@@ -1439,19 +1432,22 @@
 
 		cmdid = completed[i];
 
-		if( cmdid == CMDID_INT_CMDS ) { /* internal command */
+		/*
+		 * Only free SCBs for the commands coming down from the
+		 * mid-layer, not for which were issued internally
+		 *
+		 * For internal command, restore the status returned by the
+		 * firmware so that user can interpret it.
+		 */
+		if (cmdid == CMDID_INT_CMDS) {
 			scb = &adapter->int_scb;
-			cmd = scb->cmd;
-			mbox = (mbox_t *)scb->raw_mbox;
 
-			/*
-			 * Internal command interface do not fire the extended
-			 * passthru or 64-bit passthru
-			 */
-			pthru = scb->pthru;
+			list_del_init(&scb->list);
+			scb->state = SCB_FREE;
 
-		}
-		else {
+			adapter->int_status = status;
+			complete(&adapter->int_waitq);
+		} else {
 			scb = &adapter->scb_list[cmdid];
 
 			/*
@@ -1640,25 +1636,7 @@
 				cmd->result |= (DID_BAD_TARGET << 16)|status;
 		}
 
-		/*
-		 * Only free SCBs for the commands coming down from the
-		 * mid-layer, not for which were issued internally
-		 *
-		 * For internal command, restore the status returned by the
-		 * firmware so that user can interpret it.
-		 */
-		if( cmdid == CMDID_INT_CMDS ) { /* internal command */
-			cmd->result = status;
-
-			/*
-			 * Remove the internal command from the pending list
-			 */
-			list_del_init(&scb->list);
-			scb->state = SCB_FREE;
-		}
-		else {
-			mega_free_scb(adapter, scb);
-		}
+		mega_free_scb(adapter, scb);
 
 		/* Add Scsi_Command to end of completed queue */
 		list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
@@ -4133,23 +4111,15 @@
  * The last argument is the address of the passthru structure if the command
  * to be fired is a passthru command
  *
- * lockscope specifies whether the caller has already acquired the lock. Of
- * course, the caller must know which lock we are talking about.
- *
  * Note: parameter 'pthru' is null for non-passthru commands.
  */
 static int
 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
 {
-	Scsi_Cmnd	*scmd;
-	struct	scsi_device *sdev;
+	unsigned long flags;
 	scb_t	*scb;
 	int	rval;
 
-	scmd = scsi_allocate_command(GFP_KERNEL);
-	if (!scmd)
-		return -ENOMEM;
-
 	/*
 	 * The internal commands share one command id and hence are
 	 * serialized. This is so because we want to reserve maximum number of
@@ -4160,73 +4130,45 @@
 	scb = &adapter->int_scb;
 	memset(scb, 0, sizeof(scb_t));
 
-	sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
-	scmd->device = sdev;
-
-	memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
-	scmd->cmnd = adapter->int_cdb;
-	scmd->device->host = adapter->host;
-	scmd->host_scribble = (void *)scb;
-	scmd->cmnd[0] = MEGA_INTERNAL_CMD;
-
-	scb->state |= SCB_ACTIVE;
-	scb->cmd = scmd;
+	scb->idx = CMDID_INT_CMDS;
+	scb->state |= SCB_ACTIVE | SCB_PENDQ;
 
 	memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
 
 	/*
 	 * Is it a passthru command
 	 */
-	if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
-
+	if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
 		scb->pthru = pthru;
-	}
 
-	scb->idx = CMDID_INT_CMDS;
-
-	megaraid_queue_lck(scmd, mega_internal_done);
+	spin_lock_irqsave(&adapter->lock, flags);
+	list_add_tail(&scb->list, &adapter->pending_list);
+	/*
+	 * Check if the HBA is in quiescent state, e.g., during a
+	 * delete logical drive opertion. If it is, don't run
+	 * the pending_list.
+	 */
+	if (atomic_read(&adapter->quiescent) == 0)
+		mega_runpendq(adapter);
+	spin_unlock_irqrestore(&adapter->lock, flags);
 
 	wait_for_completion(&adapter->int_waitq);
 
-	rval = scmd->result;
-	mc->status = scmd->result;
-	kfree(sdev);
+	mc->status = rval = adapter->int_status;
 
 	/*
 	 * Print a debug message for all failed commands. Applications can use
 	 * this information.
 	 */
-	if( scmd->result && trace_level ) {
+	if (rval && trace_level) {
 		printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
-			mc->cmd, mc->opcode, mc->subopcode, scmd->result);
+			mc->cmd, mc->opcode, mc->subopcode, rval);
 	}
 
 	mutex_unlock(&adapter->int_mtx);
-
-	scsi_free_command(GFP_KERNEL, scmd);
-
 	return rval;
 }
 
-
-/**
- * mega_internal_done()
- * @scmd - internal scsi command
- *
- * Callback routine for internal commands.
- */
-static void
-mega_internal_done(Scsi_Cmnd *scmd)
-{
-	adapter_t	*adapter;
-
-	adapter = (adapter_t *)scmd->device->host->hostdata;
-
-	complete(&adapter->int_waitq);
-
-}
-
-
 static struct scsi_host_template megaraid_template = {
 	.module				= THIS_MODULE,
 	.name				= "MegaRAID",
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4d0ce4e..508d65e 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -853,10 +853,10 @@
 
 	u8	sglen;	/* f/w supported scatter-gather list length */
 
-	unsigned char int_cdb[MAX_COMMAND_SIZE];
 	scb_t			int_scb;
 	struct mutex		int_mtx;	/* To synchronize the internal
 						commands */
+	int			int_status;	/* status of internal cmd */
 	struct completion	int_waitq;	/* wait queue for internal
 						 cmds */
 
@@ -1004,7 +1004,6 @@
 static int mega_do_del_logdrv(adapter_t *, int);
 static void mega_get_max_sgl(adapter_t *);
 static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *);
-static void mega_internal_done(Scsi_Cmnd *);
 static int mega_support_cluster(adapter_t *);
 #endif
 
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index dfffd0f..a706927 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -486,6 +486,8 @@
 
 	pthru32->dataxferaddr	= kioc->buf_paddr;
 	if (kioc->data_dir & UIOC_WR) {
+		if (pthru32->dataxferlen > kioc->xferlen)
+			return -EINVAL;
 		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
 						pthru32->dataxferlen)) {
 			return (-EFAULT);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 34452ea..32166c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.700.06.00-rc1"
-#define MEGASAS_RELDATE				"Aug. 31, 2013"
-#define MEGASAS_EXT_VERSION			"Sat. Aug. 31 17:00:00 PDT 2013"
+#define MEGASAS_VERSION				"06.803.01.00-rc1"
+#define MEGASAS_RELDATE				"Mar. 10, 2014"
+#define MEGASAS_EXT_VERSION			"Mon. Mar. 10 17:00:00 PDT 2014"
 
 /*
  * Device IDs
@@ -48,6 +48,7 @@
 #define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073
 #define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
 #define	PCI_DEVICE_ID_LSI_FUSION		0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA		0x002f
 #define PCI_DEVICE_ID_LSI_INVADER		0x005d
 #define PCI_DEVICE_ID_LSI_FURY			0x005f
 
@@ -559,7 +560,8 @@
 		u8 PCIE:1;
 		u8 iSCSI:1;
 		u8 SAS_3G:1;
-		u8 reserved_0:4;
+		u8 SRIOV:1;
+		u8 reserved_0:3;
 		u8 reserved_1[6];
 		u8 port_count;
 		u64 port_addr[8];
@@ -839,7 +841,12 @@
 
 	struct {                                /*7A4h */
 #if   defined(__BIG_ENDIAN_BITFIELD)
-		u32     reserved:11;
+		u32     reserved:5;
+		u32	activePassive:2;
+		u32	supportConfigAutoBalance:1;
+		u32	mpio:1;
+		u32	supportDataLDonSSCArray:1;
+		u32	supportPointInTimeProgress:1;
 		u32     supportUnevenSpans:1;
 		u32     dedicatedHotSparesLimited:1;
 		u32     headlessMode:1;
@@ -886,7 +893,12 @@
 
 
 		u32     supportUnevenSpans:1;
-		u32     reserved:11;
+		u32	supportPointInTimeProgress:1;
+		u32	supportDataLDonSSCArray:1;
+		u32	mpio:1;
+		u32	supportConfigAutoBalance:1;
+		u32	activePassive:2;
+		u32     reserved:5;
 #endif
 	} adapterOperations2;
 
@@ -914,8 +926,14 @@
 	} cluster;
 
 	char clusterId[16];                     /*7D4h */
+	struct {
+		u8  maxVFsSupported;            /*0x7E4*/
+		u8  numVFsEnabled;              /*0x7E5*/
+		u8  requestorId;                /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+		u8  reserved;                   /*0x7E7*/
+	} iov;
 
-	u8          pad[0x800-0x7E4];           /*7E4 */
+	u8          pad[0x800-0x7E8];           /*0x7E8 pad to 2k */
 } __packed;
 
 /*
@@ -986,7 +1004,9 @@
 
 #define MFI_OB_INTR_STATUS_MASK			0x00000002
 #define MFI_POLL_TIMEOUT_SECS			60
-
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF	(5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF		(1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF		300
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
 #define MFI_GEN2_ENABLE_INTERRUPT_MASK		(0x00000001 | 0x00000004)
@@ -1347,9 +1367,15 @@
 union megasas_evt_class_locale {
 
 	struct {
+#ifndef __BIG_ENDIAN_BITFIELD
 		u16 locale;
 		u8 reserved;
 		s8 class;
+#else
+		s8 class;
+		u8 reserved;
+		u16 locale;
+#endif
 	} __attribute__ ((packed)) members;
 
 	u32 word;
@@ -1523,6 +1549,12 @@
 	dma_addr_t producer_h;
 	u32 *consumer;
 	dma_addr_t consumer_h;
+	struct MR_LD_VF_AFFILIATION *vf_affiliation;
+	dma_addr_t vf_affiliation_h;
+	struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+	dma_addr_t vf_affiliation_111_h;
+	struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+	dma_addr_t hb_host_mem_h;
 
 	u32 *reply_queue;
 	dma_addr_t reply_queue_h;
@@ -1598,10 +1630,73 @@
 	unsigned long bar;
 	long reset_flags;
 	struct mutex reset_mutex;
+	struct timer_list sriov_heartbeat_timer;
+	char skip_heartbeat_timer_del;
+	u8 requestorId;
+	u64 initiator_sas_address;
+	u64 ld_sas_address[64];
+	char PlasmaFW111;
+	char mpio;
 	int throttlequeuedepth;
 	u8 mask_interrupts;
 	u8 is_imr;
 };
+struct MR_LD_VF_MAP {
+	u32 size;
+	union MR_LD_REF ref;
+	u8 ldVfCount;
+	u8 reserved[6];
+	u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+	u32 size;
+	u8 ldCount;
+	u8 vfCount;
+	u8 thisVf;
+	u8 reserved[9];
+	struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+
+struct IOV_111 {
+	u8 maxVFsSupported;
+	u8 numVFsEnabled;
+	u8 requestorId;
+	u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+	u8 targetId;
+	u8 reserved[3];
+	u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+	u8 vdCount;
+	u8 vfCount;
+	u8 thisVf;
+	u8 reserved[5];
+	struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+	struct {
+		u32 fwCounter;	/* Firmware heart beat counter */
+		struct {
+			u32 debugmode:1; /* 1=Firmware is in debug mode.
+					    Heart beat will not be updated. */
+			u32 reserved:31;
+		} debug;
+		u32 reserved_fw[6];
+		u32 driverCounter; /* Driver heart beat counter.  0x20 */
+		u32 reserved_driver[7];
+	} HB;
+	u8 pad[0x400-0x40];
+};
 
 enum {
 	MEGASAS_HBA_OPERATIONAL			= 0,
@@ -1609,6 +1704,7 @@
 	MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS	= 2,
 	MEGASAS_ADPRESET_SM_OPERATIONAL		= 3,
 	MEGASAS_HW_CRITICAL_ERROR		= 4,
+	MEGASAS_ADPRESET_SM_POLLING		= 5,
 	MEGASAS_ADPRESET_INPROG_SIGN		= 0xDEADDEAD,
 };
 
@@ -1728,7 +1824,7 @@
 		    struct IO_REQUEST_INFO *io_info,
 		    struct RAID_CONTEXT *pRAID_Context,
 		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3b7ad10..d84d02c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : 06.700.06.00-rc1
+ *  Version : 06.803.01.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -75,6 +75,10 @@
 module_param(msix_vectors, int, S_IRUGO);
 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
 
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
 static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
 module_param(throttlequeuedepth, int, S_IRUGO);
 MODULE_PARM_DESC(throttlequeuedepth,
@@ -122,6 +126,8 @@
 	/* xscale IOP */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
 	/* Fusion */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+	/* Plasma */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
 	/* Invader */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
@@ -132,7 +138,7 @@
 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
 
 static int megasas_mgmt_majorno;
-static struct megasas_mgmt_info megasas_mgmt_info;
+struct megasas_mgmt_info megasas_mgmt_info;
 static struct fasync_struct *megasas_async_queue;
 static DEFINE_MUTEX(megasas_async_queue_mutex);
 
@@ -171,10 +177,15 @@
 int
 megasas_sync_map_info(struct megasas_instance *instance);
 int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	int seconds);
 void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
 void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+					 int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+			     struct scsi_cmnd *scmd);
 
 void
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -224,6 +235,7 @@
 	cmd->scmd = NULL;
 	cmd->frame_count = 0;
 	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
 	    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
 	    (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
 	    (reset_devices))
@@ -877,6 +889,7 @@
 int
 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
+	int seconds;
 
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
@@ -891,13 +904,18 @@
 	/*
 	 * Wait for cmd_status to change
 	 */
-	return wait_and_poll(instance, cmd);
+	if (instance->requestorId)
+		seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+	else
+		seconds = MFI_POLL_TIMEOUT_SECS;
+	return wait_and_poll(instance, cmd, seconds);
 }
 
 /**
  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
  * @instance:			Adapter soft state
  * @cmd:			Command to be issued
+ * @timeout:			Timeout in seconds
  *
  * This function waits on an event for the command to be returned from ISR.
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -905,13 +923,20 @@
  */
 static int
 megasas_issue_blocked_cmd(struct megasas_instance *instance,
-			  struct megasas_cmd *cmd)
+			  struct megasas_cmd *cmd, int timeout)
 {
+	int ret = 0;
 	cmd->cmd_status = ENODATA;
 
 	instance->instancet->issue_dcmd(instance, cmd);
-
-	wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
+	if (timeout) {
+		ret = wait_event_timeout(instance->int_cmd_wait_q,
+				cmd->cmd_status != ENODATA, timeout * HZ);
+		if (!ret)
+			return 1;
+	} else
+		wait_event(instance->int_cmd_wait_q,
+				cmd->cmd_status != ENODATA);
 
 	return 0;
 }
@@ -920,18 +945,20 @@
  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
  * @instance:				Adapter soft state
  * @cmd_to_abort:			Previously issued cmd to be aborted
+ * @timeout:				Timeout in seconds
  *
- * MFI firmware can abort previously issued AEN command (automatic event
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
  * cmd and waits for return status.
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
  */
 static int
 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
-				struct megasas_cmd *cmd_to_abort)
+				struct megasas_cmd *cmd_to_abort, int timeout)
 {
 	struct megasas_cmd *cmd;
 	struct megasas_abort_frame *abort_fr;
+	int ret = 0;
 
 	cmd = megasas_get_cmd(instance);
 
@@ -957,10 +984,18 @@
 
 	instance->instancet->issue_dcmd(instance, cmd);
 
-	/*
-	 * Wait for this cmd to complete
-	 */
-	wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
+	if (timeout) {
+		ret = wait_event_timeout(instance->abort_cmd_wait_q,
+				cmd->cmd_status != ENODATA, timeout * HZ);
+		if (!ret) {
+			dev_err(&instance->pdev->dev, "Command timedout"
+				"from %s\n", __func__);
+			return 1;
+		}
+	} else
+		wait_event(instance->abort_cmd_wait_q,
+				cmd->cmd_status != ENODATA);
+
 	cmd->sync_cmd = 0;
 
 	megasas_return_cmd(instance, cmd);
@@ -1514,9 +1549,23 @@
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
 
+	/* Check for an mpio path and adjust behavior */
+	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+		if (megasas_check_mpio_paths(instance, scmd) ==
+		    (DID_RESET << 16)) {
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			return SCSI_MLQUEUE_HOST_BUSY;
+		} else {
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			scmd->result = DID_NO_CONNECT << 16;
+			done(scmd);
+			return 0;
+		}
+	}
+
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		scmd->result = DID_ERROR << 16;
+		scmd->result = DID_NO_CONNECT << 16;
 		done(scmd);
 		return 0;
 	}
@@ -1641,9 +1690,14 @@
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+		/* Flush */
+		readl(&instance->reg_set->doorbell);
+		if (instance->mpio && instance->requestorId)
+			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 	} else {
 		writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
 	}
@@ -1730,6 +1784,25 @@
 	megasas_check_and_restore_queue_depth(instance);
 }
 
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance:		Adapter soft state
+ * @timer:		timer object to be initialized
+ * @fn:			timer function
+ * @interval:		time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			void *fn, unsigned long interval)
+{
+	init_timer(timer);
+	timer->expires = jiffies + interval;
+	timer->data = (unsigned long)instance;
+	timer->function = fn;
+	add_timer(timer);
+}
+
 static void
 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
 
@@ -1752,6 +1825,295 @@
 	process_fw_state_change_wq(&instance->work_init);
 }
 
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+	int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+	dma_addr_t new_affiliation_h;
+	dma_addr_t new_affiliation_111_h;
+	int ld, retval = 0;
+	u8 thisVf;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
+		       "affiliation: Failed to get cmd for scsi%d.\n",
+			instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d.\n", instance->host->host_no);
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	if (initial)
+		if (instance->PlasmaFW111)
+			memset(instance->vf_affiliation_111, 0,
+			       sizeof(struct MR_LD_VF_AFFILIATION_111));
+		else
+			memset(instance->vf_affiliation, 0,
+			       (MAX_LOGICAL_DRIVES + 1) *
+			       sizeof(struct MR_LD_VF_AFFILIATION));
+	else {
+		if (instance->PlasmaFW111)
+			new_affiliation_111 =
+				pci_alloc_consistent(instance->pdev,
+						     sizeof(struct MR_LD_VF_AFFILIATION_111),
+						     &new_affiliation_111_h);
+		else
+			new_affiliation =
+				pci_alloc_consistent(instance->pdev,
+						     (MAX_LOGICAL_DRIVES + 1) *
+						     sizeof(struct MR_LD_VF_AFFILIATION),
+						     &new_affiliation_h);
+		if (!new_affiliation && !new_affiliation_111) {
+			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d.\n",
+				instance->host->host_no);
+			megasas_return_cmd(instance, cmd);
+			return -ENOMEM;
+		}
+		if (instance->PlasmaFW111)
+			memset(new_affiliation_111, 0,
+			       sizeof(struct MR_LD_VF_AFFILIATION_111));
+		else
+			memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+			       sizeof(struct MR_LD_VF_AFFILIATION));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_BOTH;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	if (instance->PlasmaFW111) {
+		dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+	} else {
+		dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+			sizeof(struct MR_LD_VF_AFFILIATION);
+		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+	}
+
+	if (initial) {
+		if (instance->PlasmaFW111)
+			dcmd->sgl.sge32[0].phys_addr =
+			  instance->vf_affiliation_111_h;
+		else
+			dcmd->sgl.sge32[0].phys_addr =
+			  instance->vf_affiliation_h;
+	} else {
+		if (instance->PlasmaFW111)
+			dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+		else
+			dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+	}
+	if (instance->PlasmaFW111)
+		dcmd->sgl.sge32[0].length =
+		  sizeof(struct MR_LD_VF_AFFILIATION_111);
+	else
+		dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+			sizeof(struct MR_LD_VF_AFFILIATION);
+
+	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+	       "scsi%d\n", instance->host->host_no);
+
+	megasas_issue_blocked_cmd(instance, cmd, 0);
+
+	if (dcmd->cmd_status) {
+		printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d.\n",
+		       dcmd->cmd_status, instance->host->host_no);
+		retval = 1; /* Do a scan if we couldn't get affiliation */
+		goto out;
+	}
+
+	if (!initial) {
+		if (instance->PlasmaFW111) {
+			if (!new_affiliation_111->vdCount) {
+				printk(KERN_WARNING "megasas: SR-IOV: Got new "
+				       "LD/VF affiliation for passive path "
+				       "for scsi%d.\n",
+					instance->host->host_no);
+				retval = 1;
+				goto out;
+			}
+			thisVf = new_affiliation_111->thisVf;
+			for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+				if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
+					printk(KERN_WARNING "megasas: SR-IOV: "
+					       "Got new LD/VF affiliation "
+					       "for scsi%d.\n",
+						instance->host->host_no);
+					memcpy(instance->vf_affiliation_111,
+					       new_affiliation_111,
+					       sizeof(struct MR_LD_VF_AFFILIATION_111));
+					retval = 1;
+					goto out;
+				}
+		} else {
+			if (!new_affiliation->ldCount) {
+				printk(KERN_WARNING "megasas: SR-IOV: Got new "
+				       "LD/VF affiliation for passive "
+				       "path for scsi%d.\n",
+				       instance->host->host_no);
+				retval = 1;
+				goto out;
+			}
+			newmap = new_affiliation->map;
+			savedmap = instance->vf_affiliation->map;
+			thisVf = new_affiliation->thisVf;
+			for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
+				if (savedmap->policy[thisVf] !=
+				    newmap->policy[thisVf]) {
+					printk(KERN_WARNING "megasas: SR-IOV: "
+					       "Got new LD/VF affiliation "
+					       "for scsi%d.\n",
+						instance->host->host_no);
+					memcpy(instance->vf_affiliation,
+					       new_affiliation,
+					       new_affiliation->size);
+					retval = 1;
+					goto out;
+				}
+				savedmap = (struct MR_LD_VF_MAP *)
+					((unsigned char *)savedmap +
+					 savedmap->size);
+				newmap = (struct MR_LD_VF_MAP *)
+					((unsigned char *)newmap +
+					 newmap->size);
+			}
+		}
+	}
+out:
+	if (new_affiliation) {
+		if (instance->PlasmaFW111)
+			pci_free_consistent(instance->pdev,
+					    sizeof(struct MR_LD_VF_AFFILIATION_111),
+					    new_affiliation_111,
+					    new_affiliation_111_h);
+		else
+			pci_free_consistent(instance->pdev,
+					    (MAX_LOGICAL_DRIVES + 1) *
+					    sizeof(struct MR_LD_VF_AFFILIATION),
+					    new_affiliation, new_affiliation_h);
+	}
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+					 int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	int retval = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+		       "Failed to get cmd for scsi%d.\n",
+		       instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (initial) {
+		instance->hb_host_mem =
+			pci_alloc_consistent(instance->pdev,
+					     sizeof(struct MR_CTRL_HB_HOST_MEM),
+					     &instance->hb_host_mem_h);
+		if (!instance->hb_host_mem) {
+			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+			       " memory for heartbeat host memory for "
+			       "scsi%d.\n", instance->host->host_no);
+			retval = -ENOMEM;
+			goto out;
+		}
+		memset(instance->hb_host_mem, 0,
+		       sizeof(struct MR_CTRL_HB_HOST_MEM));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_BOTH;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+	dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+	dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+	printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+	       instance->host->host_no);
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		retval = 0;
+	} else {
+		printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+		       "_MEM_ALLOC DCMD timed out for scsi%d\n",
+		       instance->host->host_no);
+		retval = 1;
+		goto out;
+	}
+
+
+	if (dcmd->cmd_status) {
+		printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+		       "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+		       dcmd->cmd_status,
+		       instance->host->host_no);
+		retval = 1;
+		goto out;
+	}
+
+out:
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+	struct megasas_instance *instance =
+		(struct megasas_instance *)instance_addr;
+
+	if (instance->hb_host_mem->HB.fwCounter !=
+	    instance->hb_host_mem->HB.driverCounter) {
+		instance->hb_host_mem->HB.driverCounter =
+			instance->hb_host_mem->HB.fwCounter;
+		mod_timer(&instance->sriov_heartbeat_timer,
+			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+	} else {
+		printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+		       "completed for scsi%d\n", instance->host->host_no);
+		schedule_work(&instance->work_init);
+	}
+}
+
 /**
  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
  * @instance:				Adapter soft state
@@ -2014,9 +2376,10 @@
 	 * First wait for all commands to complete
 	 */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
-		ret = megasas_reset_fusion(scmd->device->host);
+		ret = megasas_reset_fusion(scmd->device->host, 1);
 	else
 		ret = megasas_generic_reset(scmd);
 
@@ -2731,6 +3094,8 @@
 				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_FUSION) ||
 				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_PLASMA) ||
+				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_INVADER) ||
 				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_FURY)) {
@@ -2755,6 +3120,8 @@
 			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_FUSION) ||
 			    (instance->pdev->device ==
+			     PCI_DEVICE_ID_LSI_PLASMA) ||
+			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_INVADER) ||
 			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_FURY)) {
@@ -2780,6 +3147,8 @@
 				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_FUSION) ||
 				(instance->pdev->device
+					== PCI_DEVICE_ID_LSI_PLASMA) ||
+				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_INVADER) ||
 				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_FURY)) {
@@ -2788,6 +3157,8 @@
 				if ((instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_FUSION) ||
 					(instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_PLASMA) ||
+					(instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_INVADER) ||
 					(instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_FURY)) {
@@ -3014,6 +3385,7 @@
 		cmd->frame->io.context = cpu_to_le32(cmd->index);
 		cmd->frame->io.pad_0 = 0;
 		if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+		    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
 		    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
 			(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
 		    (reset_devices))
@@ -3620,6 +3992,7 @@
 	struct megasas_ctrl_info *ctrl_info;
 	unsigned long bar_list;
 	int i, loop, fw_msix_count = 0;
+	struct IOV_111 *iovPtr;
 
 	/* Find first memory bar */
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3642,6 +4015,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 		instance->instancet = &megasas_instance_template_fusion;
@@ -3696,7 +4070,8 @@
 		scratch_pad_2 = readl
 			(&instance->reg_set->outbound_scratch_pad_2);
 		/* Check max MSI-X vectors */
-		if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+		    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
 			instance->msix_vectors = (scratch_pad_2
 				& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
 			fw_msix_count = instance->msix_vectors;
@@ -3763,7 +4138,10 @@
 
 	memset(instance->pd_list, 0 ,
 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
-	megasas_get_pd_list(instance);
+	if (megasas_get_pd_list(instance) < 0) {
+		printk(KERN_ERR "megasas: failed to get PD list\n");
+		goto fail_init_adapter;
+	}
 
 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 	if (megasas_ld_list_query(instance,
@@ -3807,6 +4185,7 @@
 		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
 		/* adapterOperations2 are converted into CPU arch*/
 		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+		instance->mpio = ctrl_info->adapterOperations2.mpio;
 		instance->UnevenSpanSupport =
 			ctrl_info->adapterOperations2.supportUnevenSpans;
 		if (instance->UnevenSpanSupport) {
@@ -3819,6 +4198,20 @@
 				fusion->fast_path_io = 0;
 
 		}
+		if (ctrl_info->host_interface.SRIOV) {
+			if (!ctrl_info->adapterOperations2.activePassive)
+				instance->PlasmaFW111 = 1;
+
+			if (!instance->PlasmaFW111)
+				instance->requestorId =
+					ctrl_info->iov.requestorId;
+			else {
+				iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+				instance->requestorId = iovPtr->requestorId;
+			}
+			printk(KERN_WARNING "megaraid_sas: I am VF "
+			       "requestorId %d\n", instance->requestorId);
+		}
 	}
 	instance->max_sectors_per_req = instance->max_num_sge *
 						PAGE_SIZE / 512;
@@ -3851,6 +4244,17 @@
 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
 		(unsigned long)instance);
 
+	/* Launch SR-IOV heartbeat timer */
+	if (instance->requestorId) {
+		if (!megasas_sriov_start_heartbeat(instance, 1))
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		else
+			instance->skip_heartbeat_timer_del = 1;
+	}
+
 	return 0;
 
 fail_init_adapter:
@@ -3933,16 +4337,19 @@
 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
 
-	megasas_issue_blocked_cmd(instance, cmd);
-
-	/*
-	 * Copy the data back into callers buffer
-	 */
-	eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
-	eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
-	eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
-	eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
-	eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+	if (megasas_issue_blocked_cmd(instance, cmd, 30))
+		dev_err(&instance->pdev->dev, "Command timedout"
+			"from %s\n", __func__);
+	else {
+		/*
+		 * Copy the data back into callers buffer
+		 */
+		eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+		eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+		eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+		eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+		eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+	}
 
 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
 			    el_info, el_info_h);
@@ -4018,7 +4425,7 @@
 			instance->aen_cmd->abort_aen = 1;
 			ret_val = megasas_issue_blocked_abort_cmd(instance,
 								  instance->
-								  aen_cmd);
+								  aen_cmd, 30);
 
 			if (ret_val) {
 				printk(KERN_DEBUG "megasas: Failed to abort "
@@ -4160,6 +4567,7 @@
 
 	/* Fusion only supports host reset */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		host->hostt->eh_device_reset_handler = NULL;
@@ -4197,6 +4605,19 @@
 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
 			goto fail_set_dma_mask;
 	}
+	/*
+	 * Ensure that all data structures are allocated in 32-bit
+	 * memory.
+	 */
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+		/* Try 32bit DMA mask and 32 bit Consistent dma mask */
+		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+			&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+			dev_info(&pdev->dev, "set 32bit DMA mask"
+				"and 32 bit consistent mask\n");
+		else
+			goto fail_set_dma_mask;
+	}
 
 	return 0;
 
@@ -4212,7 +4633,7 @@
 static int megasas_probe_one(struct pci_dev *pdev,
 			     const struct pci_device_id *id)
 {
-	int rval, pos, i, j;
+	int rval, pos, i, j, cpu;
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
 	u16 control = 0;
@@ -4272,6 +4693,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 	{
@@ -4368,6 +4790,7 @@
 	instance->UnevenSpanSupport = 0;
 
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@@ -4380,12 +4803,33 @@
 	if (megasas_init_fw(instance))
 		goto fail_init_mfi;
 
+	if (instance->requestorId) {
+		if (instance->PlasmaFW111) {
+			instance->vf_affiliation_111 =
+				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+						     &instance->vf_affiliation_111_h);
+			if (!instance->vf_affiliation_111)
+				printk(KERN_WARNING "megasas: Can't allocate "
+				       "memory for VF affiliation buffer\n");
+		} else {
+			instance->vf_affiliation =
+				pci_alloc_consistent(pdev,
+						     (MAX_LOGICAL_DRIVES + 1) *
+						     sizeof(struct MR_LD_VF_AFFILIATION),
+						     &instance->vf_affiliation_h);
+			if (!instance->vf_affiliation)
+				printk(KERN_WARNING "megasas: Can't allocate "
+				       "memory for VF affiliation buffer\n");
+		}
+	}
+
 retry_irq_register:
 	/*
 	 * Register IRQ
 	 */
 	if (instance->msix_vectors) {
-		for (i = 0 ; i < instance->msix_vectors; i++) {
+		cpu = cpumask_first(cpu_online_mask);
+		for (i = 0; i < instance->msix_vectors; i++) {
 			instance->irq_context[i].instance = instance;
 			instance->irq_context[i].MSIxIndex = i;
 			if (request_irq(instance->msixentry[i].vector,
@@ -4394,14 +4838,22 @@
 					&instance->irq_context[i])) {
 				printk(KERN_DEBUG "megasas: Failed to "
 				       "register IRQ for vector %d.\n", i);
-				for (j = 0 ; j < i ; j++)
+				for (j = 0; j < i; j++) {
+					irq_set_affinity_hint(
+						instance->msixentry[j].vector, NULL);
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
+				}
 				/* Retry irq register for IO_APIC */
 				instance->msix_vectors = 0;
 				goto retry_irq_register;
 			}
+			if (irq_set_affinity_hint(instance->msixentry[i].vector,
+				get_cpu_mask(cpu)))
+				dev_err(&instance->pdev->dev, "Error setting"
+					"affinity hint for cpu %d\n", cpu);
+			cpu = cpumask_next(cpu, cpu_online_mask);
 		}
 	} else {
 		instance->irq_context[0].instance = instance;
@@ -4455,13 +4907,17 @@
 
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 fail_irq:
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		megasas_release_fusion(instance);
@@ -4522,7 +4978,9 @@
 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 
-	megasas_issue_blocked_cmd(instance, cmd);
+	if (megasas_issue_blocked_cmd(instance, cmd, 30))
+		dev_err(&instance->pdev->dev, "Command timedout"
+			" from %s\n", __func__);
 
 	megasas_return_cmd(instance, cmd);
 
@@ -4549,10 +5007,11 @@
 		return;
 
 	if (instance->aen_cmd)
-		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
+		megasas_issue_blocked_abort_cmd(instance,
+			instance->aen_cmd, 30);
 	if (instance->map_update_cmd)
 		megasas_issue_blocked_abort_cmd(instance,
-						instance->map_update_cmd);
+			instance->map_update_cmd, 30);
 	dcmd = &cmd->frame->dcmd;
 
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4566,7 +5025,9 @@
 	dcmd->data_xfer_len = 0;
 	dcmd->opcode = cpu_to_le32(opcode);
 
-	megasas_issue_blocked_cmd(instance, cmd);
+	if (megasas_issue_blocked_cmd(instance, cmd, 30))
+		dev_err(&instance->pdev->dev, "Command timedout"
+			"from %s\n", __func__);
 
 	megasas_return_cmd(instance, cmd);
 
@@ -4590,6 +5051,10 @@
 	host = instance->host;
 	instance->unload = 1;
 
+	/* Shutdown SR-IOV heartbeat timer */
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
 
@@ -4606,9 +5071,12 @@
 	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 	if (instance->msix_vectors)
@@ -4629,7 +5097,7 @@
 static int
 megasas_resume(struct pci_dev *pdev)
 {
-	int rval, i, j;
+	int rval, i, j, cpu;
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
 
@@ -4673,6 +5141,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 	{
@@ -4701,6 +5170,7 @@
 	 * Register IRQ
 	 */
 	if (instance->msix_vectors) {
+		cpu = cpumask_first(cpu_online_mask);
 		for (i = 0 ; i < instance->msix_vectors; i++) {
 			instance->irq_context[i].instance = instance;
 			instance->irq_context[i].MSIxIndex = i;
@@ -4710,12 +5180,21 @@
 					&instance->irq_context[i])) {
 				printk(KERN_DEBUG "megasas: Failed to "
 				       "register IRQ for vector %d.\n", i);
-				for (j = 0 ; j < i ; j++)
+				for (j = 0; j < i; j++) {
+					irq_set_affinity_hint(
+						instance->msixentry[j].vector, NULL);
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
+				}
 				goto fail_irq;
 			}
+
+			if (irq_set_affinity_hint(instance->msixentry[i].vector,
+				get_cpu_mask(cpu)))
+				dev_err(&instance->pdev->dev, "Error setting"
+					"affinity hint for cpu %d\n", cpu);
+			cpu = cpumask_next(cpu, cpu_online_mask);
 		}
 	} else {
 		instance->irq_context[0].instance = instance;
@@ -4728,6 +5207,17 @@
 		}
 	}
 
+	/* Re-launch SR-IOV heartbeat timer */
+	if (instance->requestorId) {
+		if (!megasas_sriov_start_heartbeat(instance, 0))
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		else
+			instance->skip_heartbeat_timer_del = 1;
+	}
+
 	instance->instancet->enable_intr(instance);
 	instance->unload = 0;
 
@@ -4782,6 +5272,10 @@
 	host = instance->host;
 	fusion = instance->ctrl_context;
 
+	/* Shutdown SR-IOV heartbeat timer */
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+
 	scsi_remove_host(instance->host);
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4793,6 +5287,9 @@
 		instance->ev = NULL;
 	}
 
+	/* cancel all wait events */
+	wake_up_all(&instance->int_cmd_wait_q);
+
 	tasklet_kill(&instance->isr_tasklet);
 
 	/*
@@ -4811,9 +5308,12 @@
 	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 	if (instance->msix_vectors)
@@ -4821,6 +5321,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 		megasas_release_fusion(instance);
@@ -4847,6 +5348,24 @@
 	if (instance->evt_detail)
 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
 				instance->evt_detail, instance->evt_detail_h);
+
+	if (instance->vf_affiliation)
+		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+				    sizeof(struct MR_LD_VF_AFFILIATION),
+				    instance->vf_affiliation,
+				    instance->vf_affiliation_h);
+
+	if (instance->vf_affiliation_111)
+		pci_free_consistent(pdev,
+				    sizeof(struct MR_LD_VF_AFFILIATION_111),
+				    instance->vf_affiliation_111,
+				    instance->vf_affiliation_111_h);
+
+	if (instance->hb_host_mem)
+		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+				    instance->hb_host_mem,
+				    instance->hb_host_mem_h);
+
 	scsi_host_put(host);
 
 	pci_disable_device(pdev);
@@ -4868,9 +5387,12 @@
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 	if (instance->msix_vectors)
@@ -5045,7 +5567,7 @@
 	 * cmd to the SCSI mid-layer
 	 */
 	cmd->sync_cmd = 1;
-	megasas_issue_blocked_cmd(instance, cmd);
+	megasas_issue_blocked_cmd(instance, cmd, 0);
 	cmd->sync_cmd = 0;
 
 	/*
@@ -5132,6 +5654,16 @@
 		goto out_kfree_ioc;
 	}
 
+	/* Adjust ioctl wait time for VF mode */
+	if (instance->requestorId)
+		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+	/* Block ioctls in VF mode */
+	if (instance->requestorId && !allow_vf_ioctls) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		printk(KERN_ERR "Controller in crit error\n");
 		error = -ENODEV;
@@ -5441,7 +5973,7 @@
 	u16     pd_index = 0;
 	u16	ld_index = 0;
 	int     i, j, doscan = 0;
-	u32 seq_num;
+	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
 	int error;
 
 	if (!instance) {
@@ -5449,6 +5981,23 @@
 		kfree(ev);
 		return;
 	}
+
+	/* Adjust event workqueue thread wait time for VF mode */
+	if (instance->requestorId)
+		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+	/* Don't run the event workqueue thread if OCR is running */
+	for (i = 0; i < wait_time; i++) {
+		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+			break;
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			printk(KERN_NOTICE "megasas: %s waiting for "
+			       "controller reset to finish for scsi%d\n",
+			       __func__, instance->host->host_no);
+		}
+		msleep(1000);
+	}
+
 	instance->ev = NULL;
 	host = instance->host;
 	if (instance->evt_detail) {
@@ -5515,65 +6064,64 @@
 		case MR_EVT_LD_OFFLINE:
 		case MR_EVT_CFG_CLEARED:
 		case MR_EVT_LD_DELETED:
-			if (megasas_ld_list_query(instance,
-					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-				megasas_get_ld_list(instance);
-			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-				for (j = 0;
-				j < MEGASAS_MAX_DEV_PER_CHANNEL;
-				j++) {
+			if (!instance->requestorId ||
+			    (instance->requestorId &&
+			     megasas_get_ld_vf_affiliation(instance, 0))) {
+				if (megasas_ld_list_query(instance,
+							  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+					megasas_get_ld_list(instance);
+				for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+					for (j = 0;
+					     j < MEGASAS_MAX_DEV_PER_CHANNEL;
+					     j++) {
 
-				ld_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+						ld_index =
+							(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
-				sdev1 = scsi_device_lookup(host,
-					MEGASAS_MAX_PD_CHANNELS + i,
-					j,
-					0);
+						sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
 
-				if (instance->ld_ids[ld_index] != 0xff) {
-					if (sdev1) {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
-					}
-				}
-				}
-			}
-			doscan = 0;
-			break;
-		case MR_EVT_LD_CREATED:
-			if (megasas_ld_list_query(instance,
-					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-				megasas_get_ld_list(instance);
-			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-				for (j = 0;
-					j < MEGASAS_MAX_DEV_PER_CHANNEL;
-					j++) {
-					ld_index =
-					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-					sdev1 = scsi_device_lookup(host,
-						MEGASAS_MAX_PD_CHANNELS + i,
-						j, 0);
-
-					if (instance->ld_ids[ld_index] !=
-								0xff) {
-						if (!sdev1) {
-							scsi_add_device(host,
-						MEGASAS_MAX_PD_CHANNELS + i,
-								j, 0);
+						if (instance->ld_ids[ld_index]
+						    != 0xff) {
+							if (sdev1)
+								scsi_device_put(sdev1);
+						} else {
+							if (sdev1) {
+								scsi_remove_device(sdev1);
+								scsi_device_put(sdev1);
+							}
 						}
 					}
-					if (sdev1) {
-						scsi_device_put(sdev1);
+				}
+				doscan = 0;
+			}
+			break;
+		case MR_EVT_LD_CREATED:
+			if (!instance->requestorId ||
+			    (instance->requestorId &&
+			     megasas_get_ld_vf_affiliation(instance, 0))) {
+				if (megasas_ld_list_query(instance,
+							  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+					megasas_get_ld_list(instance);
+				for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+					for (j = 0;
+					     j < MEGASAS_MAX_DEV_PER_CHANNEL;
+					     j++) {
+						ld_index =
+							(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+						sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+						if (instance->ld_ids[ld_index]
+						    != 0xff) {
+							if (!sdev1)
+								scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+						}
+						if (sdev1)
+							scsi_device_put(sdev1);
 					}
 				}
+				doscan = 0;
 			}
-			doscan = 0;
 			break;
 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
 		case MR_EVT_FOREIGN_CFG_IMPORTED:
@@ -5591,50 +6139,55 @@
 	}
 
 	if (doscan) {
-		printk(KERN_INFO "scanning ...\n");
-		megasas_get_pd_list(instance);
-		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
-				sdev1 = scsi_device_lookup(host, i, j, 0);
-				if (instance->pd_list[pd_index].driveState ==
-							MR_PD_STATE_SYSTEM) {
-					if (!sdev1) {
-						scsi_add_device(host, i, j, 0);
-					}
-					if (sdev1)
-						scsi_device_put(sdev1);
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
+		printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+		       instance->host->host_no);
+		if (megasas_get_pd_list(instance) == 0) {
+			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+					pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+					sdev1 = scsi_device_lookup(host, i, j, 0);
+					if (instance->pd_list[pd_index].driveState ==
+					    MR_PD_STATE_SYSTEM) {
+						if (!sdev1) {
+							scsi_add_device(host, i, j, 0);
+						}
+						if (sdev1)
+							scsi_device_put(sdev1);
+					} else {
+						if (sdev1) {
+							scsi_remove_device(sdev1);
+							scsi_device_put(sdev1);
+						}
 					}
 				}
 			}
 		}
 
-		if (megasas_ld_list_query(instance,
-					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-			megasas_get_ld_list(instance);
-		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				ld_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+		if (!instance->requestorId ||
+		    (instance->requestorId &&
+		     megasas_get_ld_vf_affiliation(instance, 0))) {
+			if (megasas_ld_list_query(instance,
+						  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+				megasas_get_ld_list(instance);
+			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				     j++) {
+					ld_index =
+						(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
-				sdev1 = scsi_device_lookup(host,
-					MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-				if (instance->ld_ids[ld_index] != 0xff) {
-					if (!sdev1) {
-						scsi_add_device(host,
-						MEGASAS_MAX_PD_CHANNELS + i,
-								j, 0);
+					sdev1 = scsi_device_lookup(host,
+								   MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+					if (instance->ld_ids[ld_index]
+					    != 0xff) {
+						if (!sdev1)
+							scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+						else
+							scsi_device_put(sdev1);
 					} else {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
+						if (sdev1) {
+							scsi_remove_device(sdev1);
+							scsi_device_put(sdev1);
+						}
 					}
 				}
 			}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e24b6eb..081bfff 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -143,12 +143,12 @@
 
 u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
+	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
 }
 
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
+	return map->raidMap.ldTgtIdToLd[ldTgtId];
 }
 
 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -975,7 +975,10 @@
 			regSize += stripSize;
 	}
 
-	pRAID_Context->timeoutValue     = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
+	pRAID_Context->timeoutValue =
+		cpu_to_le16(raid->fpIoTimeoutForLd ?
+			    raid->fpIoTimeoutForLd :
+			    map->raidMap.fpPdIoTimeoutSec);
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		pRAID_Context->regLockFlags = (isRead) ?
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f655592..2260041 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -62,7 +62,8 @@
 		     struct megasas_cmd *cmd, u8 alt_status);
 int megasas_is_ldio(struct scsi_cmnd *cmd);
 int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	      int seconds);
 
 void
 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
@@ -81,6 +82,13 @@
 void megaraid_sas_kill_hba(struct megasas_instance *instance);
 
 extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+				  int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			 void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
 extern int resetwaittime;
 
 /**
@@ -549,12 +557,13 @@
  * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
  */
 int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd)
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	int seconds)
 {
 	int i;
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
-	u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
+	u32 msecs = seconds * 1000;
 
 	/*
 	 * Wait for cmd_status to change
@@ -585,7 +594,7 @@
 	struct megasas_cmd *cmd;
 	u8 ret;
 	struct fusion_context *fusion;
-	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
 	int i;
 	struct megasas_header *frame_hdr;
 
@@ -644,18 +653,18 @@
 	/* Convert capability to LE32 */
 	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
-	init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+	init_frame->queue_info_new_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(ioc_init_handle));
+	init_frame->queue_info_new_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(ioc_init_handle));
 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
 
-	req_desc =
-	  (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
-
-	req_desc->Words = 0;
-	req_desc->MFAIo.RequestFlags =
+	req_desc.Words = 0;
+	req_desc.MFAIo.RequestFlags =
 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
 		 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-	cpu_to_le32s((u32 *)&req_desc->MFAIo);
-	req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
+	cpu_to_le32s((u32 *)&req_desc.MFAIo);
+	req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
 
 	/*
 	 * disable the intr before firing the init frame
@@ -669,10 +678,10 @@
 			break;
 	}
 
-	instance->instancet->fire_cmd(instance, req_desc->u.low,
-				      req_desc->u.high, instance->reg_set);
+	instance->instancet->fire_cmd(instance, req_desc.u.low,
+				      req_desc.u.high, instance->reg_set);
 
-	wait_and_poll(instance, cmd);
+	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
 
 	frame_hdr = &cmd->frame->hdr;
 	if (frame_hdr->cmd_status != 0) {
@@ -723,7 +732,7 @@
 
 	if (!fusion) {
 		megasas_return_cmd(instance, cmd);
-		return 1;
+		return -ENXIO;
 	}
 
 	dcmd = &cmd->frame->dcmd;
@@ -1604,13 +1613,15 @@
 			MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
-			io_request->IoFlags |=
-				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+			io_request->IoFlags |= cpu_to_le16(
+				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
 			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 		cmd->request_desc->SCSIIO.DevHandle =
 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
 		/*
 		 * If the command is for the tape device, set the
 		 * FP timeout to the os layer timeout value.
@@ -1770,7 +1781,8 @@
 
 	if (index >= instance->max_fw_cmds) {
 		printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
-		       "descriptor\n", index);
+		       "descriptor for scsi%d\n", index,
+			instance->host->host_no);
 		return NULL;
 	}
 	fusion = instance->ctrl_context;
@@ -2038,8 +2050,11 @@
 		/* If we didn't complete any commands, check for FW fault */
 		fw_state = instance->instancet->read_fw_status_reg(
 			instance->reg_set) & MFI_STATE_MASK;
-		if (fw_state == MFI_STATE_FAULT)
+		if (fw_state == MFI_STATE_FAULT) {
+			printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+			       "for scsi%d\n", instance->host->host_no);
 			schedule_work(&instance->work_init);
+		}
 	}
 
 	return IRQ_HANDLED;
@@ -2210,9 +2225,10 @@
 }
 
 /* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+					int iotimeout, int *convert)
 {
-	int i, outstanding, retval = 0;
+	int i, outstanding, retval = 0, hb_seconds_missed = 0;
 	u32 fw_state;
 
 	for (i = 0; i < resetwaittime; i++) {
@@ -2221,10 +2237,40 @@
 			instance->reg_set) & MFI_STATE_MASK;
 		if (fw_state == MFI_STATE_FAULT) {
 			printk(KERN_WARNING "megasas: Found FW in FAULT state,"
-			       " will reset adapter.\n");
+			       " will reset adapter scsi%d.\n",
+				instance->host->host_no);
 			retval = 1;
 			goto out;
 		}
+		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
+		if (instance->requestorId && !iotimeout) {
+			retval = 1;
+			goto out;
+		}
+
+		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+		if (instance->requestorId && iotimeout) {
+			if (instance->hb_host_mem->HB.fwCounter !=
+			    instance->hb_host_mem->HB.driverCounter) {
+				instance->hb_host_mem->HB.driverCounter =
+					instance->hb_host_mem->HB.fwCounter;
+				hb_seconds_missed = 0;
+			} else {
+				hb_seconds_missed++;
+				if (hb_seconds_missed ==
+				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+					printk(KERN_WARNING "megasas: SR-IOV:"
+					       " Heartbeat never completed "
+					       " while polling during I/O "
+					       " timeout handling for "
+					       "scsi%d.\n",
+					       instance->host->host_no);
+					       *convert = 1;
+					       retval = 1;
+					       goto out;
+				}
+			}
+		}
 
 		outstanding = atomic_read(&instance->fw_outstanding);
 		if (!outstanding)
@@ -2232,7 +2278,8 @@
 
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
 			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
-			       "commands to complete\n", i, outstanding);
+			       "commands to complete for scsi%d\n", i,
+			       outstanding, instance->host->host_no);
 			megasas_complete_cmd_dpc_fusion(
 				(unsigned long)instance);
 		}
@@ -2241,7 +2288,8 @@
 
 	if (atomic_read(&instance->fw_outstanding)) {
 		printk("megaraid_sas: pending commands remain after waiting, "
-		       "will reset adapter.\n");
+		       "will reset adapter scsi%d.\n",
+		       instance->host->host_no);
 		retval = 1;
 	}
 out:
@@ -2263,10 +2311,34 @@
 		reply_desc->Words = ULLONG_MAX;
 }
 
-/* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost)
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+	struct scsi_cmnd *scmd)
 {
-	int retval = SUCCESS, i, j, retry = 0;
+	int i, j, retval = (DID_RESET << 16);
+
+	if (instance->mpio && instance->requestorId) {
+		for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+			for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+				if (megasas_mgmt_info.instance[i] &&
+				    (megasas_mgmt_info.instance[i] != instance) &&
+				    megasas_mgmt_info.instance[i]->mpio &&
+				    megasas_mgmt_info.instance[i]->requestorId
+				    &&
+				    (megasas_mgmt_info.instance[i]->ld_ids[j]
+				     == scmd->device->id)) {
+					    retval = (DID_NO_CONNECT << 16);
+					    goto out;
+				}
+	}
+out:
+	return retval;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+{
+	int retval = SUCCESS, i, j, retry = 0, convert = 0;
 	struct megasas_instance *instance;
 	struct megasas_cmd_fusion *cmd_fusion;
 	struct fusion_context *fusion;
@@ -2277,28 +2349,39 @@
 	instance = (struct megasas_instance *)shost->hostdata;
 	fusion = instance->ctrl_context;
 
+	mutex_lock(&instance->reset_mutex);
+
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
-		       "returning FAILED.\n");
+		       "returning FAILED for scsi%d.\n",
+			instance->host->host_no);
 		return FAILED;
 	}
 
-	mutex_lock(&instance->reset_mutex);
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
-	instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+	instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
 	instance->instancet->disable_intr(instance);
 	msleep(1000);
 
 	/* First try waiting for commands to complete */
-	if (megasas_wait_for_outstanding_fusion(instance)) {
+	if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+						&convert)) {
+		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
 		printk(KERN_WARNING "megaraid_sas: resetting fusion "
-		       "adapter.\n");
+		       "adapter scsi%d.\n", instance->host->host_no);
+		if (convert)
+			iotimeout = 0;
+
 		/* Now return commands back to the OS */
 		for (i = 0 ; i < instance->max_fw_cmds; i++) {
 			cmd_fusion = fusion->cmd_list[i];
 			if (cmd_fusion->scmd) {
 				scsi_dma_unmap(cmd_fusion->scmd);
-				cmd_fusion->scmd->result = (DID_RESET << 16);
+				cmd_fusion->scmd->result =
+					megasas_check_mpio_paths(instance,
+								 cmd_fusion->scmd);
 				cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
 				megasas_return_cmd_fusion(instance, cmd_fusion);
 				atomic_dec(&instance->fw_outstanding);
@@ -2313,13 +2396,67 @@
 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
 			/* Reset not supported, kill adapter */
 			printk(KERN_WARNING "megaraid_sas: Reset not supported"
-			       ", killing adapter.\n");
+			       ", killing adapter scsi%d.\n",
+				instance->host->host_no);
 			megaraid_sas_kill_hba(instance);
+			instance->skip_heartbeat_timer_del = 1;
 			instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
 			retval = FAILED;
 			goto out;
 		}
 
+		/* Let SR-IOV VF & PF sync up if there was a HB failure */
+		if (instance->requestorId && !iotimeout) {
+			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+			/* Look for a late HB update after VF settle time */
+			if (abs_state == MFI_STATE_OPERATIONAL &&
+			    (instance->hb_host_mem->HB.fwCounter !=
+			     instance->hb_host_mem->HB.driverCounter)) {
+					instance->hb_host_mem->HB.driverCounter =
+						instance->hb_host_mem->HB.fwCounter;
+					printk(KERN_WARNING "megasas: SR-IOV:"
+					       "Late FW heartbeat update for "
+					       "scsi%d.\n",
+					       instance->host->host_no);
+			} else {
+				/* In VF mode, first poll for FW ready */
+				for (i = 0;
+				     i < (MEGASAS_RESET_WAIT_TIME * 1000);
+				     i += 20) {
+					status_reg =
+						instance->instancet->
+						read_fw_status_reg(
+							instance->reg_set);
+					abs_state = status_reg &
+						MFI_STATE_MASK;
+					if (abs_state == MFI_STATE_READY) {
+						printk(KERN_WARNING "megasas"
+						       ": SR-IOV: FW was found"
+						       "to be in ready state "
+						       "for scsi%d.\n",
+						       instance->host->host_no);
+						break;
+					}
+					msleep(20);
+				}
+				if (abs_state != MFI_STATE_READY) {
+					printk(KERN_WARNING "megasas: SR-IOV: "
+					       "FW not in ready state after %d"
+					       " seconds for scsi%d, status_reg = "
+					       "0x%x.\n",
+					       MEGASAS_RESET_WAIT_TIME,
+					       instance->host->host_no,
+					       status_reg);
+					megaraid_sas_kill_hba(instance);
+					instance->skip_heartbeat_timer_del = 1;
+					instance->adprecovery =
+						MEGASAS_HW_CRITICAL_ERROR;
+					retval = FAILED;
+					goto out;
+				}
+			}
+		}
+
 		/* Now try to reset the chip */
 		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
 			writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
@@ -2346,7 +2483,9 @@
 				readl(&instance->reg_set->fusion_host_diag);
 				if (retry++ == 100) {
 					printk(KERN_WARNING "megaraid_sas: "
-					       "Host diag unlock failed!\n");
+					       "Host diag unlock failed! "
+					       "for scsi%d\n",
+						instance->host->host_no);
 					break;
 				}
 			}
@@ -2368,7 +2507,8 @@
 				if (retry++ == 1000) {
 					printk(KERN_WARNING "megaraid_sas: "
 					       "Diag reset adapter never "
-					       "cleared!\n");
+					       "cleared for scsi%d!\n",
+						instance->host->host_no);
 					break;
 				}
 			}
@@ -2390,29 +2530,29 @@
 			if (abs_state <= MFI_STATE_FW_INIT) {
 				printk(KERN_WARNING "megaraid_sas: firmware "
 				       "state < MFI_STATE_FW_INIT, state = "
-				       "0x%x\n", abs_state);
+				       "0x%x for scsi%d\n", abs_state,
+					instance->host->host_no);
 				continue;
 			}
 
 			/* Wait for FW to become ready */
 			if (megasas_transition_to_ready(instance, 1)) {
 				printk(KERN_WARNING "megaraid_sas: Failed to "
-				       "transition controller to ready.\n");
+				       "transition controller to ready "
+				       "for scsi%d.\n",
+				       instance->host->host_no);
 				continue;
 			}
 
 			megasas_reset_reply_desc(instance);
 			if (megasas_ioc_init_fusion(instance)) {
 				printk(KERN_WARNING "megaraid_sas: "
-				       "megasas_ioc_init_fusion() failed!\n");
+				       "megasas_ioc_init_fusion() failed!"
+				       " for scsi%d\n",
+				       instance->host->host_no);
 				continue;
 			}
 
-			clear_bit(MEGASAS_FUSION_IN_RESET,
-				  &instance->reset_flags);
-			instance->instancet->enable_intr(instance);
-			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
-
 			/* Re-fire management commands */
 			for (j = 0 ; j < instance->max_fw_cmds; j++) {
 				cmd_fusion = fusion->cmd_list[j];
@@ -2422,7 +2562,7 @@
 					instance->
 					cmd_list[cmd_fusion->sync_cmd_idx];
 					if (cmd_mfi->frame->dcmd.opcode ==
-					    MR_DCMD_LD_MAP_GET_INFO) {
+					    cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
 						megasas_return_cmd(instance,
 								   cmd_mfi);
 						megasas_return_cmd_fusion(
@@ -2433,11 +2573,15 @@
 							instance,
 							cmd_mfi->context.smid
 							-1);
-						if (!req_desc)
+						if (!req_desc) {
 							printk(KERN_WARNING
 							       "req_desc NULL"
-							       "\n");
-						else {
+							       " for scsi%d\n",
+								instance->host->host_no);
+							/* Return leaked MPT
+							   frame */
+							megasas_return_cmd_fusion(instance, cmd_fusion);
+						} else {
 							instance->instancet->
 							fire_cmd(instance,
 								 req_desc->
@@ -2451,6 +2595,11 @@
 				}
 			}
 
+			clear_bit(MEGASAS_FUSION_IN_RESET,
+				  &instance->reset_flags);
+			instance->instancet->enable_intr(instance);
+			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
 			/* Reset load balance info */
 			memset(fusion->load_balance_info, 0,
 			       sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2459,18 +2608,39 @@
 			if (!megasas_get_map_info(instance))
 				megasas_sync_map_info(instance);
 
+			/* Restart SR-IOV heartbeat */
+			if (instance->requestorId) {
+				if (!megasas_sriov_start_heartbeat(instance, 0))
+					megasas_start_timer(instance,
+							    &instance->sriov_heartbeat_timer,
+							    megasas_sriov_heartbeat_handler,
+							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+				else
+					instance->skip_heartbeat_timer_del = 1;
+			}
+
 			/* Adapter reset completed successfully */
 			printk(KERN_WARNING "megaraid_sas: Reset "
-			       "successful.\n");
+			       "successful for scsi%d.\n",
+				instance->host->host_no);
 			retval = SUCCESS;
 			goto out;
 		}
 		/* Reset failed, kill the adapter */
 		printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
-		       "adapter.\n");
+		       "adapter scsi%d.\n", instance->host->host_no);
 		megaraid_sas_kill_hba(instance);
+		instance->skip_heartbeat_timer_del = 1;
+		instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
 		retval = FAILED;
 	} else {
+		/* For VF: Restart HB timer if we didn't OCR */
+		if (instance->requestorId) {
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		}
 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
 		instance->instancet->enable_intr(instance);
 		instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
@@ -2487,7 +2657,7 @@
 	struct megasas_instance *instance =
 		container_of(work, struct megasas_instance, work_init);
 
-	megasas_reset_fusion(instance->host);
+	megasas_reset_fusion(instance->host, 0);
 }
 
 struct megasas_instance_template megasas_instance_template_fusion = {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 35a5139..e76af54 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -485,6 +485,9 @@
 #define MAX_PHYSICAL_DEVICES 256
 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
 #define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111   0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS       0x03150200
 
 struct MR_DEV_HANDLE_INFO {
 	u16     curDevHdl;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 62f1a60..0d78a4d 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -453,7 +453,7 @@
 	    instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
 
 	if (instance->irq != SCSI_IRQ_NONE) 
-	    if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED,
+	    if (request_irq(instance->irq, pas16_intr, 0,
 			    "pas16", instance)) {
 		printk("scsi%d : IRQ%d not free, interrupts disabled\n", 
 		    instance->host_no, instance->irq);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index a04b4ff..28b4e81 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -323,24 +323,17 @@
 	int offset;
 	char *str = buf;
 	int start = 0;
-#define IB_MEMMAP(c)		\
-		(*(u32 *)((u8 *)pm8001_ha->		\
-		memoryMap.region[IB].virt_ptr +		\
+#define IB_MEMMAP(c)	\
+		(*(u32 *)((u8 *)pm8001_ha->	\
+		memoryMap.region[IB].virt_ptr +	\
 		pm8001_ha->evtlog_ib_offset + (c)))
 
 	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
-		if (pm8001_ha->chip_id != chip_8001)
-			str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
-		else
-			str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+		str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
 		start = start + 4;
 	}
 	pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
-	if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
-		&& (pm8001_ha->chip_id != chip_8001))
-		pm8001_ha->evtlog_ib_offset = 0;
-	if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
-		&& (pm8001_ha->chip_id == chip_8001))
+	if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
 		pm8001_ha->evtlog_ib_offset = 0;
 
 	return str - buf;
@@ -363,24 +356,17 @@
 	int offset;
 	char *str = buf;
 	int start = 0;
-#define OB_MEMMAP(c)		\
-		(*(u32 *)((u8 *)pm8001_ha->		\
-		memoryMap.region[OB].virt_ptr +		\
+#define OB_MEMMAP(c)	\
+		(*(u32 *)((u8 *)pm8001_ha->	\
+		memoryMap.region[OB].virt_ptr +	\
 		pm8001_ha->evtlog_ob_offset + (c)))
 
 	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
-		if (pm8001_ha->chip_id != chip_8001)
-			str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
-		else
-			str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+		str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
 		start = start + 4;
 	}
 	pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
-	if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
-			&& (pm8001_ha->chip_id != chip_8001))
-		pm8001_ha->evtlog_ob_offset = 0;
-	if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
-			&& (pm8001_ha->chip_id == chip_8001))
+	if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
 		pm8001_ha->evtlog_ob_offset = 0;
 
 	return str - buf;
@@ -466,7 +452,7 @@
 static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
-	u32 count;
+	ssize_t count;
 
 	count = pm80xx_get_fatal_dump(cdev, attr, buf);
 	return count;
@@ -484,7 +470,7 @@
 static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
-	u32 count;
+	ssize_t count;
 
 	count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
 	return count;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 0a1296a..a97be01 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -644,7 +644,7 @@
 	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
 	/* 8081 controllers need BAR shift to access MPI space
 	* as this is shared with BIOS data */
-	if (deviceid == 0x8081) {
+	if (deviceid == 0x8081 || deviceid == 0x0042) {
 		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
 			PM8001_FAIL_DBG(pm8001_ha,
 				pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -673,7 +673,7 @@
 	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
 		update_outbnd_queue_table(pm8001_ha, i);
 	/* 8081 controller donot require these operations */
-	if (deviceid != 0x8081) {
+	if (deviceid != 0x8081 && deviceid != 0x0042) {
 		mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
 		/* 7->130ms, 34->500ms, 119->1.5s */
 		mpi_set_open_retry_interval_reg(pm8001_ha, 119);
@@ -701,7 +701,7 @@
 	u32 gst_len_mpistate;
 	u16 deviceid;
 	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
-	if (deviceid == 0x8081) {
+	if (deviceid == 0x8081 || deviceid == 0x0042) {
 		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
 			PM8001_FAIL_DBG(pm8001_ha,
 				pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -2502,11 +2502,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*in order to force CPU ordering*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2522,11 +2518,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2550,11 +2542,7 @@
 				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/* ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2617,11 +2605,7 @@
 				    IO_DS_NON_OPERATIONAL);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2641,11 +2625,7 @@
 				    IO_DS_IN_ERROR);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2674,20 +2654,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -2796,11 +2765,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_COMPLETE;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2909,20 +2874,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -4467,23 +4421,11 @@
 					" stat 0x%x but aborted by upper layer "
 					"\n", task, ts->resp, ts->stat));
 				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-			} else if (task->uldd_task) {
+			} else {
 				spin_unlock_irqrestore(&task->task_state_lock,
 							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/* ditto */
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
-				return 0;
-			} else if (!task->uldd_task) {
-				spin_unlock_irqrestore(&task->task_state_lock,
-							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/*ditto*/
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
+				pm8001_ccb_task_free_done(pm8001_ha, task,
+								ccb, tag);
 				return 0;
 			}
 		}
@@ -5020,7 +4962,7 @@
 	/* check max is 1 Mbytes */
 	if ((length > 0x100000) || (gsm_dump_offset & 3) ||
 		((gsm_dump_offset + length) > 0x1000000))
-			return 1;
+			return -EINVAL;
 
 	if (pm8001_ha->chip_id == chip_8001)
 		bar = 2;
@@ -5048,12 +4990,12 @@
 				gsm_base = GSM_BASE;
 				if (-1 == pm8001_bar4_shift(pm8001_ha,
 						(gsm_base + shift_value)))
-					return 1;
+					return -EIO;
 			} else {
 				gsm_base = 0;
 				if (-1 == pm80xx_bar4_shift(pm8001_ha,
 						(gsm_base + shift_value)))
-					return 1;
+					return -EIO;
 			}
 			gsm_dump_offset = (gsm_dump_offset + offset) &
 						0xFFFF0000;
@@ -5072,13 +5014,8 @@
 		direct_data += sprintf(direct_data, "%08x ", value);
 	}
 	/* Shift back to BAR4 original address */
-	if (pm8001_ha->chip_id == chip_8001) {
-		if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
-			return 1;
-	} else {
-		if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
-			return 1;
-	}
+	if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+			return -EIO;
 	pm8001_ha->fatal_forensic_shift_offset += 1024;
 
 	if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 73a120d..c4f31b21 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -625,7 +625,7 @@
 	pm8001_ha->nvmd_completion = &completion;
 
 	if (pm8001_ha->chip_id == chip_8001) {
-		if (deviceid == 0x8081) {
+		if (deviceid == 0x8081 || deviceid == 0x0042) {
 			payload.minor_function = 4;
 			payload.length = 4096;
 		} else {
@@ -646,6 +646,9 @@
 			if (deviceid == 0x8081)
 				pm8001_ha->sas_addr[j] =
 					payload.func_specific[0x704 + i];
+			else if (deviceid == 0x0042)
+				pm8001_ha->sas_addr[j] =
+					payload.func_specific[0x010 + i];
 		} else
 			pm8001_ha->sas_addr[j] =
 					payload.func_specific[0x804 + i];
@@ -713,11 +716,9 @@
 	/* SPCv controllers supports 64 msi-x */
 	if (pm8001_ha->chip_id == chip_8001) {
 		number_of_intr = 1;
-		flag |= IRQF_DISABLED;
 	} else {
 		number_of_intr = PM8001_MAX_MSIX_VEC;
 		flag &= ~IRQF_SHARED;
-		flag |= IRQF_DISABLED;
 	}
 
 	max_entry = sizeof(pm8001_ha->msix_entries) /
@@ -1072,10 +1073,7 @@
  */
 static struct pci_device_id pm8001_pci_table[] = {
 	{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
-	{
-		PCI_DEVICE(0x117c, 0x0042),
-		.driver_data = chip_8001
-	},
+	{ PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
 	/* Support for SPC/SPCv/SPCve controllers */
 	{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
 	{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f50ac44..8a44bc9 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -434,6 +434,7 @@
 		ccb->n_elem = n_elem;
 		ccb->ccb_tag = tag;
 		ccb->task = t;
+		ccb->device = pm8001_dev;
 		switch (t->task_proto) {
 		case SAS_PROTOCOL_SMP:
 			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
@@ -865,13 +866,11 @@
 static void pm8001_dev_gone_notify(struct domain_device *dev)
 {
 	unsigned long flags = 0;
-	u32 tag;
 	struct pm8001_hba_info *pm8001_ha;
 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
 
 	pm8001_ha = pm8001_find_ha_by_dev(dev);
 	spin_lock_irqsave(&pm8001_ha->lock, flags);
-	pm8001_tag_alloc(pm8001_ha, &tag);
 	if (pm8001_dev) {
 		u32 device_id = pm8001_dev->device_id;
 
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6c5fd5e..1ee06f2 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -708,5 +708,17 @@
 /* ctl shared API */
 extern struct device_attribute *pm8001_host_attrs[];
 
+static inline void
+pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+			struct sas_task *task, struct pm8001_ccb_info *ccb,
+			u32 ccb_idx)
+{
+	pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+	smp_mb(); /*in order to force CPU ordering*/
+	spin_unlock(&pm8001_ha->lock);
+	task->task_done(task);
+	spin_lock(&pm8001_ha->lock);
+}
+
 #endif
 
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c950dc5..d70587f 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -91,7 +91,6 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 	void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
-	u32 status = 1;
 	u32 accum_len , reg_val, index, *temp;
 	unsigned long start;
 	u8 *direct_data;
@@ -111,13 +110,10 @@
 		direct_data = (u8 *)fatal_error_data;
 		pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
 		pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
-		pm8001_ha->forensic_info.data_buf.direct_offset = 0;
 		pm8001_ha->forensic_info.data_buf.read_len = 0;
 
 		pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
-	}
 
-	if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
 		/* start to get data */
 		/* Program the MEMBASE II Shifting Register with 0x00.*/
 		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,6 +122,7 @@
 		pm8001_ha->forensic_fatal_step = 0;
 		pm8001_ha->fatal_bar_loc = 0;
 	}
+
 	/* Read until accum_len is retrived */
 	accum_len = pm8001_mr32(fatal_table_address,
 				MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
@@ -135,7 +132,7 @@
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("Possible PCI issue 0x%x not expected\n",
 				accum_len));
-		return status;
+		return -EIO;
 	}
 	if (accum_len == 0 || accum_len >= 0x100000) {
 		pm8001_ha->forensic_info.data_buf.direct_data +=
@@ -178,7 +175,6 @@
 			pm8001_ha->forensic_fatal_step = 1;
 			pm8001_ha->fatal_forensic_shift_offset = 0;
 			pm8001_ha->forensic_last_offset	= 0;
-			status = 0;
 			return (char *)pm8001_ha->
 				forensic_info.data_buf.direct_data -
 				(char *)buf;
@@ -194,7 +190,6 @@
 					forensic_info.data_buf.direct_data,
 					"%08x ", *(temp + index));
 			}
-			status = 0;
 			return (char *)pm8001_ha->
 				forensic_info.data_buf.direct_data -
 				(char *)buf;
@@ -214,7 +209,6 @@
 		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
 			pm8001_ha->fatal_forensic_shift_offset);
 		pm8001_ha->fatal_bar_loc = 0;
-		status = 0;
 		return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
 			(char *)buf;
 	}
@@ -239,7 +233,7 @@
 			PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
 			" = 0x%x\n", reg_val));
-			return -1;
+			return -EIO;
 		}
 
 		/* Read the next 64K of the debug data. */
@@ -259,7 +253,6 @@
 			pm8001_ha->forensic_info.data_buf.direct_len =  0;
 			pm8001_ha->forensic_info.data_buf.direct_offset = 0;
 			pm8001_ha->forensic_info.data_buf.read_len = 0;
-			status = 0;
 		}
 	}
 
@@ -2175,11 +2168,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*in order to force CPU ordering*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2195,11 +2184,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2221,11 +2206,7 @@
 				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/* ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2288,11 +2269,7 @@
 					IO_DS_NON_OPERATIONAL);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2312,11 +2289,7 @@
 					IO_DS_IN_ERROR);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2345,20 +2318,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -2470,11 +2432,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_COMPLETE;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2596,20 +2554,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -4304,23 +4251,11 @@
 					"\n", task, ts->resp, ts->stat));
 				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
 				return 0;
-			} else if (task->uldd_task) {
+			} else {
 				spin_unlock_irqrestore(&task->task_state_lock,
 							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/* ditto */
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
-				return 0;
-			} else if (!task->uldd_task) {
-				spin_unlock_irqrestore(&task->task_state_lock,
-							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/*ditto*/
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
+				pm8001_ccb_task_free_done(pm8001_ha, task,
+								ccb, tag);
 				return 0;
 			}
 		}
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index ff0fc7c..44def6b 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o qla_mr.o qla_nx2.o qla_target.o
+		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4a0d7c9..07befcf 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,6 +147,92 @@
 };
 
 static ssize_t
+qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
+			   struct bin_attribute *bin_attr,
+			   char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->fw_dump_template || !ha->fw_dump_template_len)
+		return 0;
+
+	ql_dbg(ql_dbg_user, vha, 0x70e2,
+	    "chunk <- off=%llx count=%zx\n", off, count);
+	return memory_read_from_buffer(buf, count, &off,
+	    ha->fw_dump_template, ha->fw_dump_template_len);
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
+			    struct bin_attribute *bin_attr,
+			    char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t size;
+
+	if (off == 0) {
+		if (ha->fw_dump)
+			vfree(ha->fw_dump);
+		if (ha->fw_dump_template)
+			vfree(ha->fw_dump_template);
+
+		ha->fw_dump = NULL;
+		ha->fw_dump_len = 0;
+		ha->fw_dump_template = NULL;
+		ha->fw_dump_template_len = 0;
+
+		size = qla27xx_fwdt_template_size(buf);
+		ql_dbg(ql_dbg_user, vha, 0x70d1,
+		    "-> allocating fwdt (%x bytes)...\n", size);
+		ha->fw_dump_template = vmalloc(size);
+		if (!ha->fw_dump_template) {
+			ql_log(ql_log_warn, vha, 0x70d2,
+			    "Failed allocate fwdt (%x bytes).\n", size);
+			return -ENOMEM;
+		}
+		ha->fw_dump_template_len = size;
+	}
+
+	if (off + count > ha->fw_dump_template_len) {
+		count = ha->fw_dump_template_len - off;
+		ql_dbg(ql_dbg_user, vha, 0x70d3,
+		    "chunk -> truncating to %zx bytes.\n", count);
+	}
+
+	ql_dbg(ql_dbg_user, vha, 0x70d4,
+	    "chunk -> off=%llx count=%zx\n", off, count);
+	memcpy(ha->fw_dump_template + off, buf, count);
+
+	if (off + count == ha->fw_dump_template_len) {
+		size = qla27xx_fwdt_calculate_dump_size(vha);
+		ql_dbg(ql_dbg_user, vha, 0x70d5,
+		    "-> allocating fwdump (%x bytes)...\n", size);
+		ha->fw_dump = vmalloc(size);
+		if (!ha->fw_dump) {
+			ql_log(ql_log_warn, vha, 0x70d6,
+			    "Failed allocate fwdump (%x bytes).\n", size);
+			return -ENOMEM;
+		}
+		ha->fw_dump_len = size;
+	}
+
+	return count;
+}
+static struct bin_attribute sysfs_fw_dump_template_attr = {
+	.attr = {
+		.name = "fw_dump_template",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_fw_dump_template,
+	.write = qla2x00_sysfs_write_fw_dump_template,
+};
+
+static ssize_t
 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
 			 struct bin_attribute *bin_attr,
 			 char *buf, loff_t off, size_t count)
@@ -241,12 +327,17 @@
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
+	ssize_t rval = 0;
 
 	if (ha->optrom_state != QLA_SREADING)
 		return 0;
 
-	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
-					ha->optrom_region_size);
+	mutex_lock(&ha->optrom_mutex);
+	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+	    ha->optrom_region_size);
+	mutex_unlock(&ha->optrom_mutex);
+
+	return rval;
 }
 
 static ssize_t
@@ -265,7 +356,9 @@
 	if (off + count > ha->optrom_region_size)
 		count = ha->optrom_region_size - off;
 
+	mutex_lock(&ha->optrom_mutex);
 	memcpy(&ha->optrom_buffer[off], buf, count);
+	mutex_unlock(&ha->optrom_mutex);
 
 	return count;
 }
@@ -288,10 +381,10 @@
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
-
 	uint32_t start = 0;
 	uint32_t size = ha->optrom_size;
 	int val, valid;
+	ssize_t rval = count;
 
 	if (off)
 		return -EINVAL;
@@ -304,12 +397,14 @@
 	if (start > ha->optrom_size)
 		return -EINVAL;
 
+	mutex_lock(&ha->optrom_mutex);
 	switch (val) {
 	case 0:
 		if (ha->optrom_state != QLA_SREADING &&
-		    ha->optrom_state != QLA_SWRITING)
-			return -EINVAL;
-
+		    ha->optrom_state != QLA_SWRITING) {
+			rval =  -EINVAL;
+			goto out;
+		}
 		ha->optrom_state = QLA_SWAITING;
 
 		ql_dbg(ql_dbg_user, vha, 0x7061,
@@ -320,8 +415,10 @@
 		ha->optrom_buffer = NULL;
 		break;
 	case 1:
-		if (ha->optrom_state != QLA_SWAITING)
-			return -EINVAL;
+		if (ha->optrom_state != QLA_SWAITING) {
+			rval = -EINVAL;
+			goto out;
+		}
 
 		ha->optrom_region_start = start;
 		ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -335,13 +432,15 @@
 			    "(%x).\n", ha->optrom_region_size);
 
 			ha->optrom_state = QLA_SWAITING;
-			return -ENOMEM;
+			rval = -ENOMEM;
+			goto out;
 		}
 
 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x7063,
 			    "HBA not online, failing NVRAM update.\n");
-			return -EAGAIN;
+			rval = -EAGAIN;
+			goto out;
 		}
 
 		ql_dbg(ql_dbg_user, vha, 0x7064,
@@ -353,8 +452,10 @@
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
 	case 2:
-		if (ha->optrom_state != QLA_SWAITING)
-			return -EINVAL;
+		if (ha->optrom_state != QLA_SWAITING) {
+			rval = -EINVAL;
+			goto out;
+		}
 
 		/*
 		 * We need to be more restrictive on which FLASH regions are
@@ -388,7 +489,8 @@
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7065,
 			    "Invalid start region 0x%x/0x%x.\n", start, size);
-			return -EINVAL;
+			rval = -EINVAL;
+			goto out;
 		}
 
 		ha->optrom_region_start = start;
@@ -403,7 +505,8 @@
 			    "(%x)\n", ha->optrom_region_size);
 
 			ha->optrom_state = QLA_SWAITING;
-			return -ENOMEM;
+			rval = -ENOMEM;
+			goto out;
 		}
 
 		ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -413,13 +516,16 @@
 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
 		break;
 	case 3:
-		if (ha->optrom_state != QLA_SWRITING)
-			return -EINVAL;
+		if (ha->optrom_state != QLA_SWRITING) {
+			rval = -EINVAL;
+			goto out;
+		}
 
 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x7068,
 			    "HBA not online, failing flash update.\n");
-			return -EAGAIN;
+			rval = -EAGAIN;
+			goto out;
 		}
 
 		ql_dbg(ql_dbg_user, vha, 0x7069,
@@ -430,9 +536,12 @@
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
 	default:
-		return -EINVAL;
+		rval = -EINVAL;
 	}
-	return count;
+
+out:
+	mutex_unlock(&ha->optrom_mutex);
+	return rval;
 }
 
 static struct bin_attribute sysfs_optrom_ctl_attr = {
@@ -822,6 +931,7 @@
 	int is4GBp_only;
 } bin_file_entries[] = {
 	{ "fw_dump", &sysfs_fw_dump_attr, },
+	{ "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
 	{ "nvram", &sysfs_nvram_attr, },
 	{ "optrom", &sysfs_optrom_attr, },
 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -847,6 +957,8 @@
 			continue;
 		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
 			continue;
+		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+			continue;
 
 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
 		    iter->attr);
@@ -1187,7 +1299,7 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1391,6 +1503,37 @@
 	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
 }
 
+static ssize_t
+qla2x00_allow_cna_fw_dump_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_P3P_TYPE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+	else
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->allow_cna_fw_dump ? "true" : "false");
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+
+	if (!IS_P3P_TYPE(vha->hw))
+		return -EINVAL;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	vha->hw->allow_cna_fw_dump = val != 0;
+
+	return strlen(buf);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1432,6 +1575,9 @@
 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
+static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
+		   qla2x00_allow_cna_fw_dump_show,
+		   qla2x00_allow_cna_fw_dump_store);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1464,6 +1610,7 @@
 	&dev_attr_diag_requests,
 	&dev_attr_diag_megabytes,
 	&dev_attr_fw_dump_size,
+	&dev_attr_allow_cna_fw_dump,
 	NULL,
 };
 
@@ -1509,6 +1656,9 @@
 	case PORT_SPEED_16GB:
 		speed = FC_PORTSPEED_16GBIT;
 		break;
+	case PORT_SPEED_32GB:
+		speed = FC_PORTSPEED_32GBIT;
+		break;
 	}
 	fc_host_speed(shost) = speed;
 }
@@ -2160,6 +2310,9 @@
 	else if (IS_QLAFX00(ha))
 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
 		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLA27XX(ha))
+		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+		    FC_PORTSPEED_8GBIT;
 	else
 		speed = FC_PORTSPEED_1GBIT;
 	fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f15d03e..71ff340 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1437,9 +1437,12 @@
 	if (ha->flags.nic_core_reset_hdlr_active)
 		return -EBUSY;
 
+	mutex_lock(&ha->optrom_mutex);
 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
-	if (rval)
+	if (rval) {
+		mutex_unlock(&ha->optrom_mutex);
 		return rval;
+	}
 
 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
 	    ha->optrom_region_start, ha->optrom_region_size);
@@ -1453,6 +1456,7 @@
 	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
+	mutex_unlock(&ha->optrom_mutex);
 	bsg_job->job_done(bsg_job);
 	return rval;
 }
@@ -1465,9 +1469,12 @@
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
 
+	mutex_lock(&ha->optrom_mutex);
 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
-	if (rval)
+	if (rval) {
+		mutex_unlock(&ha->optrom_mutex);
 		return rval;
+	}
 
 	/* Set the isp82xx_no_md_cap not to capture minidump */
 	ha->flags.isp82xx_no_md_cap = 1;
@@ -1483,6 +1490,7 @@
 	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
+	mutex_unlock(&ha->optrom_mutex);
 	bsg_job->job_done(bsg_job);
 	return rval;
 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6103f5..97255f7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,13 +11,15 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x015b       | 0x4b,0xba,0xfa |
- * |                              |                    | 0x0x015a	|
- * | Mailbox commands             |       0x1187       | 0x111a-0x111b  |
- * |                              |                    | 0x1155-0x1158  |
- * |                              |                    | 0x1018-0x1019  |
+ * | Module Init and Probe        |       0x017d       | 0x004b,0x0141	|
+ * |                              |                    | 0x0144,0x0146	|
+ * |                              |                    | 0x015b-0x0160	|
+ * |                              |                    | 0x016e-0x0170	|
+ * | Mailbox commands             |       0x1187       | 0x1018-0x1019	|
+ * |                              |                    | 0x10ca         |
  * |                              |                    | 0x1115-0x1116  |
- * |                              |                    | 0x10ca		|
+ * |                              |                    | 0x111a-0x111b	|
+ * |                              |                    | 0x1155-0x1158  |
  * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2016         |
@@ -32,18 +34,17 @@
  * |                              |                    | 0x5047,0x5052  |
  * |                              |                    | 0x5084,0x5075	|
  * |                              |                    | 0x503d,0x5044  |
+ * |                              |                    | 0x507b		|
  * | Timer Routines               |       0x6012       |                |
- * | User Space Interactions      |       0x70e1       | 0x7018,0x702e, |
- * |                              |                    | 0x7020,0x7024, |
- * |                              |                    | 0x7039,0x7045, |
- * |                              |                    | 0x7073-0x7075, |
- * |                              |                    | 0x707b,0x708c, |
- * |                              |                    | 0x70a5,0x70a6, |
- * |                              |                    | 0x70a8,0x70ab, |
- * |                              |                    | 0x70ad-0x70ae, |
- * |                              |                    | 0x70d1-0x70db, |
- * |                              |                    | 0x7047,0x703b	|
- * |                              |                    | 0x70de-0x70df, |
+ * | User Space Interactions      |       0x70e2       | 0x7018,0x702e  |
+ * |				  |		       | 0x7020,0x7024  |
+ * |                              |                    | 0x7039,0x7045  |
+ * |                              |                    | 0x7073-0x7075  |
+ * |                              |                    | 0x70a5-0x70a6  |
+ * |                              |                    | 0x70a8,0x70ab  |
+ * |                              |                    | 0x70ad-0x70ae  |
+ * |                              |                    | 0x70d7-0x70db  |
+ * |                              |                    | 0x70de-0x70df  |
  * | Task Management              |       0x803d       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |		|
@@ -59,7 +60,11 @@
  * |                              |                    | 0xb13c-0xb140  |
  * |                              |                    | 0xb149		|
  * | MultiQ                       |       0xc00c       |		|
- * | Misc                         |       0xd010       |		|
+ * | Misc                         |       0xd2ff       | 0xd017-0xd019	|
+ * |                              |                    | 0xd020		|
+ * |                              |                    | 0xd02e-0xd0ff	|
+ * |                              |                    | 0xd101-0xd1fe	|
+ * |                              |                    | 0xd212-0xd2fe	|
  * | Target Mode		  |	  0xe070       | 0xe021		|
  * | Target Mode Management	  |	  0xf072       | 0xf002-0xf003	|
  * |                              |                    | 0xf046-0xf049  |
@@ -104,7 +109,87 @@
 	return ptr + (rsp->length * sizeof(response_t));
 }
 
-static int
+int
+qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+	uint32_t ram_dwords, void **nxt)
+{
+	int rval;
+	uint32_t cnt, stat, timer, dwords, idx;
+	uint16_t mb0, mb1;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	dma_addr_t dump_dma = ha->gid_list_dma;
+	uint32_t *dump = (uint32_t *)ha->gid_list;
+
+	rval = QLA_SUCCESS;
+	mb0 = 0;
+
+	WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	dwords = qla2x00_gid_list_size(ha) / 4;
+	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+	    cnt += dwords, addr += dwords) {
+		if (cnt + dwords > ram_dwords)
+			dwords = ram_dwords - cnt;
+
+		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+
+		WRT_REG_WORD(&reg->mailbox9, 0);
+		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+		ha->flags.mbox_int = 0;
+		for (timer = 6000000; timer; timer--) {
+			/* Check for pending interrupts. */
+			stat = RD_REG_DWORD(&reg->host_status);
+			if (stat & HSRX_RISC_INT) {
+				stat &= 0xff;
+
+				if (stat == 0x1 || stat == 0x2 ||
+				    stat == 0x10 || stat == 0x11) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_REG_WORD(&reg->mailbox0);
+					mb1 = RD_REG_WORD(&reg->mailbox1);
+
+					WRT_REG_DWORD(&reg->hccr,
+					    HCCRX_CLR_RISC_INT);
+					RD_REG_DWORD(&reg->hccr);
+					break;
+				}
+
+				/* Clear this intr; it wasn't a mailbox intr */
+				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+				RD_REG_DWORD(&reg->hccr);
+			}
+			udelay(5);
+		}
+		ha->flags.mbox_int = 1;
+
+		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			rval = mb0 & MBS_MASK;
+			for (idx = 0; idx < dwords; idx++)
+				ram[cnt + idx] = IS_QLA27XX(ha) ?
+				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+		} else {
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
+	return rval;
+}
+
+int
 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
     uint32_t ram_dwords, void **nxt)
 {
@@ -139,6 +224,7 @@
 		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
+		ha->flags.mbox_int = 0;
 		for (timer = 6000000; timer; timer--) {
 			/* Check for pending interrupts. */
 			stat = RD_REG_DWORD(&reg->host_status);
@@ -164,11 +250,13 @@
 			}
 			udelay(5);
 		}
+		ha->flags.mbox_int = 1;
 
 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
 			rval = mb0 & MBS_MASK;
 			for (idx = 0; idx < dwords; idx++)
-				ram[cnt + idx] = swab32(dump[idx]);
+				ram[cnt + idx] = IS_QLA27XX(ha) ?
+				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
 		} else {
 			rval = QLA_FUNCTION_FAILED;
 		}
@@ -208,7 +296,7 @@
 	return buf;
 }
 
-static inline int
+int
 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
 {
 	int rval = QLA_SUCCESS;
@@ -227,7 +315,7 @@
 	return rval;
 }
 
-static int
+int
 qla24xx_soft_reset(struct qla_hw_data *ha)
 {
 	int rval = QLA_SUCCESS;
@@ -537,7 +625,7 @@
 	struct qla2xxx_mq_chain *mq = ptr;
 	device_reg_t __iomem *reg;
 
-	if (!ha->mqenable || IS_QLA83XX(ha))
+	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		return ptr;
 
 	mq = ptr;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 35e20b4..cc96104 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,3 +348,10 @@
 #define ql_dbg_tgt	0x00004000 /* Target mode */
 #define ql_dbg_tgt_mgt	0x00002000 /* Target mode management */
 #define ql_dbg_tgt_tmr	0x00001000 /* Target mode task management */
+
+extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+	uint32_t, void **);
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+	uint32_t, void **);
+extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
+extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 266724b..6a10613 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -654,7 +654,7 @@
 		struct device_reg_25xxmq isp25mq;
 		struct device_reg_82xx isp82;
 		struct device_reg_fx00 ispfx00;
-} device_reg_t;
+} __iomem device_reg_t;
 
 #define ISP_REQ_Q_IN(ha, reg) \
 	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
@@ -808,7 +808,7 @@
 					   Notification */
 #define MBA_FW_POLL_STATE	0x8600  /* Firmware in poll diagnostic state */
 #define MBA_FW_RESET_FCT	0x8502	/* Firmware reset factory defaults */
-
+#define MBA_FW_INIT_INPROGRESS	0x8500	/* Firmware boot in progress */
 /* 83XX FCoE specific */
 #define MBA_IDC_AEN		0x8200  /* FCoE: NIC Core state change AEN */
 
@@ -938,6 +938,7 @@
  */
 #define MBC_WRITE_SERDES		0x3	/* Write serdes word. */
 #define MBC_READ_SERDES			0x4	/* Read serdes word. */
+#define MBC_LOAD_DUMP_MPI_RAM		0x5	/* Load/Dump MPI RAM. */
 #define MBC_SERDES_PARAMS		0x10	/* Serdes Tx Parameters. */
 #define MBC_GET_IOCB_STATUS		0x12	/* Get IOCB status command. */
 #define MBC_PORT_PARAMS			0x1A	/* Port iDMA Parameters. */
@@ -1197,30 +1198,6 @@
 	uint8_t  reserved_3[26];
 } init_cb_t;
 
-
-struct init_cb_fx {
-	uint16_t	version;
-	uint16_t	reserved_1[13];
-	__le16		request_q_outpointer;
-	__le16		response_q_inpointer;
-	uint16_t	reserved_2[2];
-	__le16		response_q_length;
-	__le16		request_q_length;
-	uint16_t	reserved_3[2];
-	__le32		request_q_address[2];
-	__le32		response_q_address[2];
-	uint16_t	reserved_4[4];
-	uint8_t		response_q_msivec;
-	uint8_t		reserved_5[19];
-	uint16_t	interrupt_delay_timer;
-	uint16_t	reserved_6;
-	uint32_t	fwoptions1;
-	uint32_t	fwoptions2;
-	uint32_t	fwoptions3;
-	uint8_t		reserved_7[24];
-};
-
-
 /*
  * Get Link Status mailbox command return buffer.
  */
@@ -2172,6 +2149,7 @@
 #define FDMI_PORT_SPEED_4GB		0x8
 #define FDMI_PORT_SPEED_8GB		0x10
 #define FDMI_PORT_SPEED_16GB		0x20
+#define FDMI_PORT_SPEED_32GB		0x40
 #define FDMI_PORT_SPEED_UNKNOWN		0x8000
 
 struct ct_fdmi_port_attr {
@@ -2680,7 +2658,7 @@
 #define QLA_MQ_SIZE 32
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
-	((ha->mqenable || IS_QLA83XX(ha)) ? \
+	((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
 	 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
 	 ((void __iomem *)ha->iobase))
 #define QLA_REQ_QUE_ID(tag) \
@@ -2818,7 +2796,6 @@
 		uint32_t	fac_supported		:1;
 
 		uint32_t	chip_reset_done		:1;
-		uint32_t	port0			:1;
 		uint32_t	running_gold_fw		:1;
 		uint32_t	eeh_busy		:1;
 		uint32_t	cpu_affinity_enabled	:1;
@@ -2849,7 +2826,7 @@
 	spinlock_t	hardware_lock ____cacheline_aligned;
 	int		bars;
 	int		mem_only;
-	device_reg_t __iomem *iobase;           /* Base I/O address */
+	device_reg_t *iobase;           /* Base I/O address */
 	resource_size_t pio_address;
 
 #define MIN_IOBASE_LEN          0x100
@@ -2868,8 +2845,8 @@
 	uint32_t		rsp_que_off;
 
 	/* Multi queue data structs */
-	device_reg_t __iomem *mqiobase;
-	device_reg_t __iomem *msixbase;
+	device_reg_t *mqiobase;
+	device_reg_t *msixbase;
 	uint16_t        msix_count;
 	uint8_t         mqenable;
 	struct req_que **req_q_map;
@@ -2905,6 +2882,7 @@
 #define PORT_SPEED_4GB  0x03
 #define PORT_SPEED_8GB  0x04
 #define PORT_SPEED_16GB 0x05
+#define PORT_SPEED_32GB 0x06
 #define PORT_SPEED_10GB	0x13
 	uint16_t	link_data_rate;         /* F/W operating speed */
 
@@ -2928,6 +2906,7 @@
 #define PCI_DEVICE_ID_QLOGIC_ISP8001	0x8001
 #define PCI_DEVICE_ID_QLOGIC_ISP8031	0x8031
 #define PCI_DEVICE_ID_QLOGIC_ISP2031	0x2031
+#define PCI_DEVICE_ID_QLOGIC_ISP2071	0x2071
 	uint32_t	device_type;
 #define DT_ISP2100                      BIT_0
 #define DT_ISP2200                      BIT_1
@@ -2948,7 +2927,8 @@
 #define DT_ISP8031			BIT_16
 #define DT_ISPFX00			BIT_17
 #define DT_ISP8044			BIT_18
-#define DT_ISP_LAST			(DT_ISP8044 << 1)
+#define DT_ISP2071			BIT_19
+#define DT_ISP_LAST			(DT_ISP2071 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2978,6 +2958,7 @@
 #define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)
 #define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)
 #define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00)
+#define IS_QLA2071(ha)	(DT_MASK(ha) & DT_ISP2071)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2986,6 +2967,7 @@
 #define IS_QLA25XX(ha)  (IS_QLA2532(ha))
 #define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
+#define IS_QLA27XX(ha)  (IS_QLA2071(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -2994,11 +2976,13 @@
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
 				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
 				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
-				IS_QLA8044(ha))
+				IS_QLA8044(ha) || IS_QLA27XX(ha))
 #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
 #define IS_NOPOLLING_TYPE(ha)	(IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
 #define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
@@ -3008,7 +2992,8 @@
 #define IS_OEM_001(ha)          ((ha)->device_type & DT_OEM_001)
 #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
 #define IS_CT6_SUPPORTED(ha)	((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha))
+#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
 #define IS_BIDI_CAPABLE(ha)	((IS_QLA25XX(ha) || IS_QLA2031(ha)))
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)	(IS_QLA2031(ha) && \
@@ -3133,6 +3118,9 @@
 	uint16_t	fw_xcb_count;
 	uint16_t	fw_iocb_count;
 
+	uint32_t	fw_shared_ram_start;
+	uint32_t	fw_shared_ram_end;
+
 	uint16_t	fw_options[16];         /* slots: 1,2,3,10,11 */
 	uint8_t		fw_seriallink_options[4];
 	uint16_t	fw_seriallink_options24[4];
@@ -3141,6 +3129,9 @@
 	uint32_t	mpi_capabilities;
 	uint8_t		phy_version[3];
 
+	/* Firmware dump template */
+	void		*fw_dump_template;
+	uint32_t	fw_dump_template_len;
 	/* Firmware dump information. */
 	struct qla2xxx_fw_dump *fw_dump;
 	uint32_t	fw_dump_len;
@@ -3183,6 +3174,7 @@
 #define QLA_SWRITING	2
 	uint32_t	optrom_region_start;
 	uint32_t	optrom_region_size;
+	struct mutex	optrom_mutex;
 
 /* PCI expansion ROM image information. */
 #define ROM_CODE_TYPE_BIOS	0
@@ -3309,6 +3301,7 @@
 	struct mr_data_fx00 mr;
 
 	struct qlt_hw_data tgt;
+	int	allow_cna_fw_dump;
 };
 
 /*
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 792a292..32ab809 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,8 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha))
 		goto out;
 	if (!ha->fce)
 		goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 610d3aa9..3a7353e 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1378,6 +1378,10 @@
 #define FLT_REG_NVRAM_0		0x15
 #define FLT_REG_VPD_1		0x16
 #define FLT_REG_NVRAM_1		0x17
+#define FLT_REG_VPD_2		0xD4
+#define FLT_REG_NVRAM_2		0xD5
+#define FLT_REG_VPD_3		0xD6
+#define FLT_REG_NVRAM_3		0xD7
 #define FLT_REG_FDT		0x1a
 #define FLT_REG_FLT		0x1c
 #define FLT_REG_HW_EVENT_0	0x1d
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1f42662..e665e81 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -330,6 +330,7 @@
     dma_addr_t);
 
 extern int qla24xx_abort_command(srb_t *);
+extern int qla24xx_async_abort_command(srb_t *);
 extern int
 qla24xx_abort_target(struct fc_port *, unsigned int, int);
 extern int
@@ -511,6 +512,16 @@
 extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern int qla27xx_fwdt_template_valid(void *);
+extern ulong qla27xx_fwdt_template_size(void *);
+extern const void *qla27xx_fwdt_template_default(void);
+extern ulong qla27xx_fwdt_template_default_size(void);
+
 extern void qla2x00_dump_regs(scsi_qla_host_t *);
 extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
 extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
@@ -594,7 +605,6 @@
 extern irqreturn_t qlafx00_intr_handler(int, void *);
 extern void qlafx00_enable_intrs(struct qla_hw_data *);
 extern void qlafx00_disable_intrs(struct qla_hw_data *);
-extern int qlafx00_abort_command(srb_t *);
 extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
 extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
 extern int qlafx00_start_scsi(srb_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cd47f1b..e377f9d2 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1532,6 +1532,10 @@
 	if (IS_CNA_CAPABLE(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_10GB);
+	else if (IS_QLA27XX(ha))
+		eiter->a.sup_speed = __constant_cpu_to_be32(
+		    FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB);
 	else if (IS_QLA25XX(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
@@ -1580,6 +1584,10 @@
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
 		break;
+	case PORT_SPEED_32GB:
+		eiter->a.cur_speed =
+		    __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+		break;
 	default:
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1889,6 +1897,9 @@
 			case BIT_10:
 				list[i].fp_speed = PORT_SPEED_16GB;
 				break;
+			case BIT_8:
+				list[i].fp_speed = PORT_SPEED_32GB;
+				break;
 			}
 
 			ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7e5f4f..38aeb54 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -271,56 +271,46 @@
 }
 
 static void
-qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
+qla2x00_tmf_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+	tmf->u.tmf.comp_status = CS_TIMEOUT;
+	complete(&tmf->u.tmf.comp);
+}
+
+static void
+qla2x00_tmf_sp_done(void *data, void *ptr, int res)
 {
 	srb_t *sp = (srb_t *)ptr;
-	struct srb_iocb *iocb = &sp->u.iocb_cmd;
-	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
-	uint32_t flags;
-	uint16_t lun;
-	int rval;
-
-	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
-		flags = iocb->u.tmf.flags;
-		lun = (uint16_t)iocb->u.tmf.lun;
-
-		/* Issue Marker IOCB */
-		rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
-			vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
-			flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
-
-		if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
-			ql_dbg(ql_dbg_taskm, vha, 0x8030,
-			    "TM IOCB failed (%x).\n", rval);
-		}
-	}
-	sp->free(sp->fcport->vha, sp);
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+	complete(&tmf->u.tmf.comp);
 }
 
 int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 	uint32_t tag)
 {
 	struct scsi_qla_host *vha = fcport->vha;
+	struct srb_iocb *tm_iocb;
 	srb_t *sp;
-	struct srb_iocb *tcf;
-	int rval;
+	int rval = QLA_FUNCTION_FAILED;
 
-	rval = QLA_FUNCTION_FAILED;
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
+	tm_iocb = &sp->u.iocb_cmd;
 	sp->type = SRB_TM_CMD;
 	sp->name = "tmf";
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
-	tcf = &sp->u.iocb_cmd;
-	tcf->u.tmf.flags = tm_flags;
-	tcf->u.tmf.lun = lun;
-	tcf->u.tmf.data = tag;
-	tcf->timeout = qla2x00_async_iocb_timeout;
-	sp->done = qla2x00_async_tm_cmd_done;
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	tm_iocb->u.tmf.flags = flags;
+	tm_iocb->u.tmf.lun = lun;
+	tm_iocb->u.tmf.data = tag;
+	sp->done = qla2x00_tmf_sp_done;
+	tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+	init_completion(&tm_iocb->u.tmf.comp);
 
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS)
@@ -330,14 +320,121 @@
 	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
-	return rval;
+
+	wait_for_completion(&tm_iocb->u.tmf.comp);
+
+	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+	if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
+		ql_dbg(ql_dbg_taskm, vha, 0x8030,
+		    "TM IOCB failed (%x).\n", rval);
+	}
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+		flags = tm_iocb->u.tmf.flags;
+		lun = (uint16_t)tm_iocb->u.tmf.lun;
+
+		/* Issue Marker IOCB */
+		qla2x00_marker(vha, vha->hw->req_q_map[0],
+		    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+	}
 
 done_free_sp:
-	sp->free(fcport->vha, sp);
+	sp->free(vha, sp);
 done:
 	return rval;
 }
 
+static void
+qla24xx_abort_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	abt->u.abt.comp_status = CS_TIMEOUT;
+	complete(&abt->u.abt.comp);
+}
+
+static void
+qla24xx_abort_sp_done(void *data, void *ptr, int res)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	complete(&abt->u.abt.comp);
+}
+
+static int
+qla24xx_async_abort_cmd(srb_t *cmd_sp)
+{
+	scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+	fc_port_t *fcport = cmd_sp->fcport;
+	struct srb_iocb *abt_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	abt_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_ABT_CMD;
+	sp->name = "abort";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+	sp->done = qla24xx_abort_sp_done;
+	abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+	init_completion(&abt_iocb->u.abt.comp);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_async, vha, 0x507c,
+	    "Abort command issued - hdl=%x, target_id=%x\n",
+	    cmd_sp->handle, fcport->tgt_id);
+
+	wait_for_completion(&abt_iocb->u.abt.comp);
+
+	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+	sp->free(vha, sp);
+done:
+	return rval;
+}
+
+int
+qla24xx_async_abort_command(srb_t *sp)
+{
+	unsigned long   flags = 0;
+
+	uint32_t	handle;
+	fc_port_t	*fcport = sp->fcport;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = vha->req;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+		if (req->outstanding_cmds[handle] == sp)
+			break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (handle == req->num_outstanding_cmds) {
+		/* Command not found. */
+		return QLA_FUNCTION_FAILED;
+	}
+	if (sp->type == SRB_FXIOCB_DCMD)
+		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+		    FXDISC_ABORT_IOCTL);
+
+	return qla24xx_async_abort_cmd(sp);
+}
+
 void
 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
@@ -1379,7 +1476,12 @@
 	}
 
 	ha->fw_dumped = 0;
-	fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+	dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+	req_q_size = rsp_q_size = 0;
+
+	if (IS_QLA27XX(ha))
+		goto try_fce;
+
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
 		fixed_size = sizeof(struct qla2100_fw_dump);
 	} else if (IS_QLA23XX(ha)) {
@@ -1395,6 +1497,7 @@
 			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
 		else
 			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
 		    sizeof(uint32_t);
 		if (ha->mqenable) {
@@ -1412,9 +1515,16 @@
 		if (ha->tgt.atio_ring)
 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
 		/* Allocate memory for Fibre Channel Event Buffer. */
-		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+		    !IS_QLA27XX(ha))
 			goto try_eft;
 
+try_fce:
+		if (ha->fce)
+			dma_free_coherent(&ha->pdev->dev,
+			    FCE_SIZE, ha->fce, ha->fce_dma);
+
+		/* Allocate memory for Fibre Channel Event Buffer. */
 		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
 		    GFP_KERNEL);
 		if (!tc) {
@@ -1442,7 +1552,12 @@
 		ha->flags.fce_enabled = 1;
 		ha->fce_dma = tc_dma;
 		ha->fce = tc;
+
 try_eft:
+		if (ha->eft)
+			dma_free_coherent(&ha->pdev->dev,
+			    EFT_SIZE, ha->eft, ha->eft_dma);
+
 		/* Allocate memory for Extended Trace Buffer. */
 		tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
 		    GFP_KERNEL);
@@ -1469,15 +1584,28 @@
 		ha->eft_dma = tc_dma;
 		ha->eft = tc;
 	}
+
 cont_alloc:
+	if (IS_QLA27XX(ha)) {
+		if (!ha->fw_dump_template) {
+			ql_log(ql_log_warn, vha, 0x00ba,
+			    "Failed missing fwdump template\n");
+			return;
+		}
+		dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+		ql_dbg(ql_dbg_init, vha, 0x00fa,
+		    "-> allocating fwdump (%x bytes)...\n", dump_size);
+		goto allocate;
+	}
+
 	req_q_size = req->length * sizeof(request_t);
 	rsp_q_size = rsp->length * sizeof(response_t);
-
 	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
 	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
 	ha->chain_offset = dump_size;
 	dump_size += mq_size + fce_size;
 
+allocate:
 	ha->fw_dump = vmalloc(dump_size);
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0x00c4,
@@ -1499,10 +1627,13 @@
 		}
 		return;
 	}
+	ha->fw_dump_len = dump_size;
 	ql_dbg(ql_dbg_init, vha, 0x00c5,
 	    "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
 
-	ha->fw_dump_len = dump_size;
+	if (IS_QLA27XX(ha))
+		return;
+
 	ha->fw_dump->signature[0] = 'Q';
 	ha->fw_dump->signature[1] = 'L';
 	ha->fw_dump->signature[2] = 'G';
@@ -1718,9 +1849,6 @@
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	}
 
-	if (IS_QLA83XX(ha))
-		goto skip_fac_check;
-
 	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
 		uint32_t size;
 
@@ -1733,8 +1861,8 @@
 			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
 			    ha->fw_major_version, ha->fw_minor_version,
 			    ha->fw_subminor_version);
-skip_fac_check:
-			if (IS_QLA83XX(ha)) {
+
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 				ha->flags.fac_supported = 0;
 				rval = QLA_SUCCESS;
 			}
@@ -1933,7 +2061,7 @@
 	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
 	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
 
-	if (ha->mqenable || IS_QLA83XX(ha)) {
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
 		icb->rid = __constant_cpu_to_le16(rid);
 		if (ha->flags.msix_enabled) {
@@ -4792,13 +4920,14 @@
 	nv = ha->nvram;
 
 	/* Determine NVRAM starting address. */
-	if (ha->flags.port0) {
+	if (ha->port_no == 0) {
 		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
 		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
 	} else {
 		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
 		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
 	}
+
 	ha->nvram_size = sizeof(struct nvram_24xx);
 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
 
@@ -4842,7 +4971,7 @@
 		nv->exchange_count = __constant_cpu_to_le16(0);
 		nv->hard_address = __constant_cpu_to_le16(124);
 		nv->port_name[0] = 0x21;
-		nv->port_name[1] = 0x00 + ha->port_no;
+		nv->port_name[1] = 0x00 + ha->port_no + 1;
 		nv->port_name[2] = 0x00;
 		nv->port_name[3] = 0xe0;
 		nv->port_name[4] = 0x8b;
@@ -5117,6 +5246,99 @@
 		segments--;
 	}
 
+	if (!IS_QLA27XX(ha))
+		return rval;
+
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	ql_dbg(ql_dbg_init, vha, 0x0161,
+	    "Loading fwdump template from %x\n", faddr);
+	qla24xx_read_flash_data(vha, dcode, faddr, 7);
+	risc_size = be32_to_cpu(dcode[2]);
+	ql_dbg(ql_dbg_init, vha, 0x0162,
+	    "-> array size %x dwords\n", risc_size);
+	if (risc_size == 0 || risc_size == ~0)
+		goto default_template;
+
+	dlen = (risc_size - 8) * sizeof(*dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0163,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x0164,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto default_template;
+	}
+
+	faddr += 7;
+	risc_size -= 8;
+	dcode = ha->fw_dump_template;
+	qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = le32_to_cpu(dcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(dcode)) {
+		ql_log(ql_log_warn, vha, 0x0165,
+		    "Failed fwdump template validate\n");
+		goto default_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0166,