Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "14 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: revert x86_64 and arm64 ELF_ET_DYN_BASE base changes
  mm/vmalloc.c: don't unconditonally use __GFP_HIGHMEM
  mm/mempolicy: fix use after free when calling get_mempolicy
  mm/cma_debug.c: fix stack corruption due to sprintf usage
  signal: don't remove SIGNAL_UNKILLABLE for traced tasks.
  mm, oom: fix potential data corruption when oom_reaper races with writer
  mm: fix double mmap_sem unlock on MMF_UNSTABLE enforced SIGBUS
  slub: fix per memcg cache leak on css offline
  mm: discard memblock data later
  test_kmod: fix description for -s -and -c parameters
  kmod: fix wait on recursive loop
  wait: add wait_event_killable_timeout()
  kernel/watchdog: fix Kconfig constraints for perf hardlockup watchdog
  mm: memcontrol: fix NULL pointer crash in test_clear_page_writeback()
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index 0c3354c..76944e3 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -36,12 +36,18 @@
 	for (queue = 0; queue < set->nr_hw_queues; queue++) {
 		mask = pci_irq_get_affinity(pdev, queue);
 		if (!mask)
-			return -EINVAL;
+			goto fallback;
 
 		for_each_cpu(cpu, mask)
 			set->mq_map[cpu] = queue;
 	}
 
 	return 0;
+
+fallback:
+	WARN_ON_ONCE(set->nr_hw_queues > 1);
+	for_each_possible_cpu(cpu)
+		set->mq_map[cpu] = 0;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 535cbdf..4603b11 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -360,12 +360,12 @@
 		return ERR_PTR(ret);
 
 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+	blk_queue_exit(q);
 
 	if (!rq)
 		return ERR_PTR(-EWOULDBLOCK);
 
 	blk_mq_put_ctx(alloc_data.ctx);
-	blk_queue_exit(q);
 
 	rq->__data_len = 0;
 	rq->__sector = (sector_t) -1;
@@ -411,12 +411,11 @@
 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+	blk_queue_exit(q);
 
 	if (!rq)
 		return ERR_PTR(-EWOULDBLOCK);
 
-	blk_queue_exit(q);
-
 	return rq;
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 98e34e4..2468c28 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2075,9 +2075,9 @@
 			/*
 			 * Get the bios in the request so we can re-queue them.
 			 */
-			if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
-			    req_op(shadow[i].request) == REQ_OP_DISCARD ||
-			    req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+			if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+			    req_op(shadow[j].request) == REQ_OP_DISCARD ||
+			    req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
 			    shadow[j].request->cmd_flags & REQ_FUA) {
 				/*
 				 * Flush operations don't contain bios, so
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a5dfab6..221468f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -537,10 +537,11 @@
 	}
 	up_read(&lists_rwsem);
 
-	mutex_unlock(&device_mutex);
-
 	ib_device_unregister_rdmacg(device);
 	ib_device_unregister_sysfs(device);
+
+	mutex_unlock(&device_mutex);
+
 	ib_cache_cleanup_one(device);
 
 	ib_security_destroy_port_pkey_list(device);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index c023e2c..5e530d2 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1153,7 +1153,6 @@
 		kref_get(&file->ref);
 		mutex_unlock(&uverbs_dev->lists_mutex);
 
-		ib_uverbs_event_handler(&file->event_handler, &event);
 
 		mutex_lock(&file->cleanup_mutex);
 		ucontext = file->ucontext;
@@ -1170,6 +1169,7 @@
 			 * for example due to freeing the resources
 			 * (e.g mmput).
 			 */
+			ib_uverbs_event_handler(&file->event_handler, &event);
 			ib_dev->disassociate_ucontext(ucontext);
 			mutex_lock(&file->cleanup_mutex);
 			ib_uverbs_cleanup_ucontext(file, ucontext, true);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 5332f06..c2fba76 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -661,7 +661,7 @@
 	rhp = php->rhp;
 
 	if (mr_type != IB_MR_TYPE_MEM_REG ||
-	    max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+	    max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
 					 use_dsgl))
 		return ERR_PTR(-EINVAL);
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index f78a733..d545302 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -64,8 +64,10 @@
 	} else {
 		u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
 
-		if (!dmac)
+		if (!dmac) {
+			kfree(ah);
 			return ERR_PTR(-EINVAL);
+		}
 		memcpy(ah->av.mac, dmac, ETH_ALEN);
 	}
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 9ec1ae9..a49ff2e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -130,20 +130,32 @@
 	u64 base = 0;
 	u32 i, j;
 	u32 k = 0;
-	u32 low;
 
 	/* copy base values in obj_info */
-	for (i = I40IW_HMC_IW_QP, j = 0;
-			i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+	for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+		if ((i == I40IW_HMC_IW_SRQ) ||
+			(i == I40IW_HMC_IW_FSIMC) ||
+			(i == I40IW_HMC_IW_FSIAV)) {
+			info[i].base = 0;
+			info[i].cnt = 0;
+			continue;
+		}
 		get_64bit_val(buf, j, &temp);
 		info[i].base = RS_64_1(temp, 32) * 512;
 		if (info[i].base > base) {
 			base = info[i].base;
 			k = i;
 		}
-		low = (u32)(temp);
-		if (low)
-			info[i].cnt = low;
+		if (i == I40IW_HMC_IW_APBVT_ENTRY) {
+			info[i].cnt = 1;
+			continue;
+		}
+		if (i == I40IW_HMC_IW_QP)
+			info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+		else if (i == I40IW_HMC_IW_CQ)
+			info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+		else
+			info[i].cnt = (u32)(temp);
 	}
 	size = info[k].cnt * info[k].size + info[k].base;
 	if (size & 0x1FFFFF)
@@ -155,6 +167,31 @@
 }
 
 /**
+ * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 i40iw_sc_decode_fpm_query(u64 *buf,
+					    u32 buf_idx,
+					    struct i40iw_hmc_obj_info *obj_info,
+					    u32 rsrc_idx)
+{
+	u64 temp;
+	u32 size;
+
+	get_64bit_val(buf, buf_idx, &temp);
+	obj_info[rsrc_idx].max_cnt = (u32)temp;
+	size = (u32)RS_64_1(temp, 32);
+	obj_info[rsrc_idx].size = LS_64_1(1, size);
+
+	return temp;
+}
+
+/**
  * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
  * @buf: ptr to fpm query buffer
  * @info: ptr to i40iw_hmc_obj_info struct
@@ -168,9 +205,9 @@
 				struct i40iw_hmc_info *hmc_info,
 				struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
 {
-	u64 temp;
 	struct i40iw_hmc_obj_info *obj_info;
-	u32 i, j, size;
+	u64 temp;
+	u32 size;
 	u16 max_pe_sds;
 
 	obj_info = hmc_info->hmc_obj;
@@ -185,41 +222,52 @@
 	hmc_fpm_misc->max_sds = max_pe_sds;
 	hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
 
-	for (i = I40IW_HMC_IW_QP, j = 8;
-	     i <= I40IW_HMC_IW_ARP; i++, j += 8) {
-		get_64bit_val(buf, j, &temp);
-		if (i == I40IW_HMC_IW_QP)
-			obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
-		else if (i == I40IW_HMC_IW_CQ)
-			obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
-		else
-			obj_info[i].max_cnt = (u32)temp;
+	get_64bit_val(buf, 8, &temp);
+	obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+	size = (u32)RS_64_1(temp, 32);
+	obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
 
-		size = (u32)RS_64_1(temp, 32);
-		obj_info[i].size = ((u64)1 << size);
-	}
-	for (i = I40IW_HMC_IW_MR, j = 48;
-			i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
-		get_64bit_val(buf, j, &temp);
-		obj_info[i].max_cnt = (u32)temp;
-		size = (u32)RS_64_1(temp, 32);
-		obj_info[i].size = LS_64_1(1, size);
-	}
+	get_64bit_val(buf, 16, &temp);
+	obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+	size = (u32)RS_64_1(temp, 32);
+	obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
 
-	get_64bit_val(buf, 120, &temp);
-	hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
-	get_64bit_val(buf, 120, &temp);
-	hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
-	get_64bit_val(buf, 120, &temp);
-	hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+	i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
+	i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
+
+	obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+	obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+	i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
+	i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
+
 	get_64bit_val(buf, 64, &temp);
+	obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
+	obj_info[I40IW_HMC_IW_XFFL].size = 4;
 	hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
 	if (!hmc_fpm_misc->xf_block_size)
 		return I40IW_ERR_INVALID_SIZE;
+
+	i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
+
 	get_64bit_val(buf, 80, &temp);
+	obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
+	obj_info[I40IW_HMC_IW_Q1FL].size = 4;
 	hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
 	if (!hmc_fpm_misc->q1_block_size)
 		return I40IW_ERR_INVALID_SIZE;
+
+	i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
+
+	get_64bit_val(buf, 112, &temp);
+	obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
+	obj_info[I40IW_HMC_IW_PBLE].size = 8;
+
+	get_64bit_val(buf, 120, &temp);
+	hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+	hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+	hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+
 	return 0;
 }
 
@@ -3392,13 +3440,6 @@
 		hmc_info->sd_table.sd_entry = virt_mem.va;
 	}
 
-	/* fill size of objects which are fixed */
-	hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
-	hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
-	hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
-	hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
-	hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
-
 	return ret_code;
 }
 
@@ -4840,7 +4881,7 @@
 {
 	u8 fcn_id = vsi->fcn_id;
 
-	if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+	if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
 		vsi->dev->fcn_id_array[fcn_id] = false;
 	i40iw_hw_stats_stop_timer(vsi);
 }
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index a39ac12..2ebaadb 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1507,8 +1507,8 @@
 	I40IW_CQ0_ALIGNMENT_MASK =		(256 - 1),
 	I40IW_HOST_CTX_ALIGNMENT_MASK =		(4 - 1),
 	I40IW_SHADOWAREA_MASK =			(128 - 1),
-	I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK =	0,
-	I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK =	0
+	I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK =	(4 - 1),
+	I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK =	(4 - 1)
 };
 
 enum i40iw_alignment {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 71050c5..7f5583d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -685,7 +685,7 @@
 	cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
 	tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
 	ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
-				     I40IW_CQ0_ALIGNMENT_MASK);
+				     I40IW_CQ0_ALIGNMENT);
 	if (ret)
 		return ret;
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index 91c4217..f7013f1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -62,7 +62,7 @@
 	I40IW_ERR_INVALID_ALIGNMENT = -23,
 	I40IW_ERR_FLUSHED_QUEUE = -24,
 	I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
-	I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+	I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
 	I40IW_ERR_TIMEOUT = -27,
 	I40IW_ERR_OPCODE_MISMATCH = -28,
 	I40IW_ERR_CQP_COMPL_ERROR = -29,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index b0d3a0e..1060725 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -435,7 +435,7 @@
 
 	op_info = &info->op.inline_rdma_write;
 	if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
-		return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+		return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
 
 	ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
 	if (ret_code)
@@ -511,7 +511,7 @@
 
 	op_info = &info->op.inline_send;
 	if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
-		return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+		return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
 
 	ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
 	if (ret_code)
@@ -784,7 +784,7 @@
 	get_64bit_val(cqe, 0, &qword0);
 	get_64bit_val(cqe, 16, &qword2);
 
-	info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+	info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
 
 	info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
 
@@ -1187,7 +1187,7 @@
 							 u8 *wqe_size)
 {
 	if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
-		return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+		return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
 
 	if (data_size <= 16)
 		*wqe_size = I40IW_QP_WQE_MIN_SIZE;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 69bda61..90aa326 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -65,13 +65,28 @@
 	struct pvrdma_dev *dev = to_vdev(ibcq->device);
 	struct pvrdma_cq *cq = to_vcq(ibcq);
 	u32 val = cq->cq_handle;
+	unsigned long flags;
+	int has_data = 0;
 
 	val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
 		PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
 
+	spin_lock_irqsave(&cq->cq_lock, flags);
+
 	pvrdma_write_uar_cq(dev, val);
 
-	return 0;
+	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+		unsigned int head;
+
+		has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+						    cq->ibcq.cqe, &head);
+		if (unlikely(has_data == PVRDMA_INVALID_IDX))
+			dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+	}
+
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+	return has_data;
 }
 
 /**
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2e582a2..5f5cd30 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -794,7 +794,8 @@
 		int i;
 
 		for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
-			if (opt_tokens[i].token & ~allowed_opts) {
+			if ((opt_tokens[i].token & opts->mask) &&
+			    (opt_tokens[i].token & ~allowed_opts)) {
 				pr_warn("invalid parameter '%s'\n",
 					opt_tokens[i].pattern);
 			}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 74a124a..925467b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -801,6 +801,7 @@
 		return;
 	}
 
+	nvmeq->cqe_seen = 1;
 	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
 	nvme_end_request(req, cqe->status, cqe->result);
 }
@@ -830,10 +831,8 @@
 		consumed++;
 	}
 
-	if (consumed) {
+	if (consumed)
 		nvme_ring_cq_doorbell(nvmeq);
-		nvmeq->cqe_seen = 1;
-	}
 }
 
 static irqreturn_t nvme_irq(int irq, void *data)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2d7a98a..a53bb66 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,12 +199,6 @@
 	copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
 	copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
 
-	memset(id->mn, ' ', sizeof(id->mn));
-	strncpy((char *)id->mn, "Linux", sizeof(id->mn));
-
-	memset(id->fr, ' ', sizeof(id->fr));
-	strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
-
 	id->rab = 6;
 
 	/*
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1b7f252..309c84a 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -394,7 +394,7 @@
 static struct nvmet_fc_ls_iod *
 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
 {
-	static struct nvmet_fc_ls_iod *iod;
+	struct nvmet_fc_ls_iod *iod;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
@@ -471,7 +471,7 @@
 static struct nvmet_fc_fcp_iod *
 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
 {
-	static struct nvmet_fc_fcp_iod *fod;
+	struct nvmet_fc_fcp_iod *fod;
 
 	lockdep_assert_held(&queue->qlock);
 
@@ -704,7 +704,7 @@
 {
 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
 	struct nvmet_fc_fcp_iod *fod = queue->fod;
-	struct nvmet_fc_defer_fcp_req *deferfcp;
+	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
 	unsigned long flags;
 	int i, writedataactive;
 	bool disconnect;
@@ -735,7 +735,8 @@
 	}
 
 	/* Cleanup defer'ed IOs in queue */
-	list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
+	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+				req_list) {
 		list_del(&deferfcp->req_list);
 		kfree(deferfcp);
 	}
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 4da69db..1bdd02a 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -10,8 +10,7 @@
 	unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
 	unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
 
-	return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
-		((bfn1 == bfn2) || ((bfn1+1) == bfn2));
+	return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
 #else
 	/*
 	 * XXX: Add support for merging bio_vec when using different page
diff --git a/fs/iomap.c b/fs/iomap.c
index 0392661..59cc98a 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -278,7 +278,7 @@
 		unsigned long bytes;	/* Bytes to write to page */
 
 		offset = (pos & (PAGE_SIZE - 1));
-		bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
+		bytes = min_t(loff_t, PAGE_SIZE - offset, length);
 
 		rpage = __iomap_read_page(inode, pos);
 		if (IS_ERR(rpage))
@@ -373,7 +373,7 @@
 		unsigned offset, bytes;
 
 		offset = pos & (PAGE_SIZE - 1); /* Within page */
-		bytes = min_t(unsigned, PAGE_SIZE - offset, count);
+		bytes = min_t(loff_t, PAGE_SIZE - offset, count);
 
 		if (IS_DAX(inode))
 			status = iomap_dax_zero(pos, offset, bytes, iomap);
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index ffd5a15..abf5bea 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1246,13 +1246,13 @@
 
 			/* free inodes to the left? */
 			if (useleft && trec.ir_freecount) {
-				rec = trec;
 				xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 				cur = tcur;
 
 				pag->pagl_leftrec = trec.ir_startino;
 				pag->pagl_rightrec = rec.ir_startino;
 				pag->pagl_pagino = pagino;
+				rec = trec;
 				goto alloc_inode;
 			}
 
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 0053bcf..4ebd0ba 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -749,9 +749,20 @@
 		return 0;
 	}
 
+	/*
+	 * During the second phase of log recovery, we need iget and
+	 * iput to behave like they do for an active filesystem.
+	 * xfs_fs_drop_inode needs to be able to prevent the deletion
+	 * of inodes before we're done replaying log items on those
+	 * inodes.  Turn it off immediately after recovery finishes
+	 * so that we don't leak the quota inodes if subsequent mount
+	 * activities fail.
+	 */
+	mp->m_super->s_flags |= MS_ACTIVE;
 	error = xlog_recover_finish(mp->m_log);
 	if (!error)
 		xfs_log_work_queue(mp);
+	mp->m_super->s_flags &= ~MS_ACTIVE;
 
 	return error;
 }
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 40d4e8b..ea7d4b4 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -945,15 +945,6 @@
 	}
 
 	/*
-	 * During the second phase of log recovery, we need iget and
-	 * iput to behave like they do for an active filesystem.
-	 * xfs_fs_drop_inode needs to be able to prevent the deletion
-	 * of inodes before we're done replaying log items on those
-	 * inodes.
-	 */
-	mp->m_super->s_flags |= MS_ACTIVE;
-
-	/*
 	 * Finish recovering the file system.  This part needed to be delayed
 	 * until after the root and real-time bitmap inodes were consistently
 	 * read in.
@@ -1028,12 +1019,13 @@
  out_quota:
 	xfs_qm_unmount_quotas(mp);
  out_rtunmount:
-	mp->m_super->s_flags &= ~MS_ACTIVE;
 	xfs_rtunmount_inodes(mp);
  out_rele_rip:
 	IRELE(rip);
 	cancel_delayed_work_sync(&mp->m_reclaim_work);
 	xfs_reclaim_inodes(mp, SYNC_WAIT);
+	/* Clean out dquots that might be in memory after quotacheck. */
+	xfs_qm_unmount(mp);
  out_log_dealloc:
 	mp->m_flags |= XFS_MOUNT_UNMOUNTING;
 	xfs_log_mount_cancel(mp);