Merge tag 'afs-fixes-20171201' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull AFS fixes from David Howells:
 "Two fix patches for the AFS filesystem:

   - Fix the refcounting on permit caching.

   - AFS inode (afs_vnode) fields need resetting after allocation
     because they're only initialised when slab pages are obtained from
     the page allocator"

* tag 'afs-fixes-20171201' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  afs: Properly reset afs_vnode (inode) fields
  afs: Fix permit refcounting
diff --git a/block/bio.c b/block/bio.c
index 228229f..8bfdea5 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1819,7 +1819,7 @@
 struct bio *bio_split(struct bio *bio, int sectors,
 		      gfp_t gfp, struct bio_set *bs)
 {
-	struct bio *split = NULL;
+	struct bio *split;
 
 	BUG_ON(sectors <= 0);
 	BUG_ON(sectors >= bio_sectors(bio));
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e54be40..870484e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -450,12 +450,9 @@
 		ret = wbt_init(q);
 		if (ret)
 			return ret;
-
-		rwb = q->rq_wb;
-		if (!rwb)
-			return -EINVAL;
 	}
 
+	rwb = q->rq_wb;
 	if (val == -1)
 		rwb->min_lat_nsec = wbt_default_latency_nsec(q);
 	else if (val >= 0)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index b252da0..ae8de978 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -178,12 +178,11 @@
 
 		if (wbt_is_read(stat))
 			wb_timestamp(rwb, &rwb->last_comp);
-		wbt_clear_state(stat);
 	} else {
 		WARN_ON_ONCE(stat == rwb->sync_cookie);
 		__wbt_done(rwb, wbt_stat_to_mask(stat));
-		wbt_clear_state(stat);
 	}
+	wbt_clear_state(stat);
 }
 
 /*
@@ -482,7 +481,7 @@
 
 	/*
 	 * At this point we know it's a buffered write. If this is
-	 * kswapd trying to free memory, or REQ_SYNC is set, set, then
+	 * kswapd trying to free memory, or REQ_SYNC is set, then
 	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
 	 * that. If the write is marked as a background write, then use
 	 * the idle limit, or go to normal if we haven't had competing
@@ -723,8 +722,6 @@
 		init_waitqueue_head(&rwb->rq_wait[i].wait);
 	}
 
-	rwb->wc = 1;
-	rwb->queue_depth = RWB_DEF_DEPTH;
 	rwb->last_comp = rwb->last_issue = jiffies;
 	rwb->queue = q;
 	rwb->win_nsec = RWB_WINDOW_NSEC;
diff --git a/block/genhd.c b/block/genhd.c
index c2223f1..96a66f6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -671,10 +671,13 @@
 		disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
 		disk->flags |= GENHD_FL_NO_PART_SCAN;
 	} else {
+		int ret;
+
 		/* Register BDI before referencing it from bdev */
 		disk_to_dev(disk)->devt = devt;
-		bdi_register_owner(disk->queue->backing_dev_info,
-				disk_to_dev(disk));
+		ret = bdi_register_owner(disk->queue->backing_dev_info,
+						disk_to_dev(disk));
+		WARN_ON(ret);
 		blk_register_region(disk_devt(disk), disk->minors, NULL,
 				    exact_match, exact_lock, disk);
 	}
@@ -1389,7 +1392,7 @@
 
 	if (minors > DISK_MAX_PARTS) {
 		printk(KERN_ERR
-			"block: can't allocated more than %d partitions\n",
+			"block: can't allocate more than %d partitions\n",
 			DISK_MAX_PARTS);
 		minors = DISK_MAX_PARTS;
 	}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index c61960d..ccb9975 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -471,7 +471,6 @@
 {
 	struct nullb_device *dev = to_nullb_device(item);
 
-	badblocks_exit(&dev->badblocks);
 	null_free_device_storage(dev, false);
 	null_free_dev(dev);
 }
@@ -582,6 +581,10 @@
 
 static void null_free_dev(struct nullb_device *dev)
 {
+	if (!dev)
+		return;
+
+	badblocks_exit(&dev->badblocks);
 	kfree(dev);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5afaf60..0b14b53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -717,7 +717,7 @@
 			  struct amdgpu_queue_mgr *mgr);
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
 			 struct amdgpu_queue_mgr *mgr,
-			 int hw_ip, int instance, int ring,
+			 u32 hw_ip, u32 instance, u32 ring,
 			 struct amdgpu_ring **out_ring);
 
 /*
@@ -1572,18 +1572,14 @@
 	/* sdma */
 	struct amdgpu_sdma		sdma;
 
-	union {
-		struct {
-			/* uvd */
-			struct amdgpu_uvd		uvd;
+	/* uvd */
+	struct amdgpu_uvd		uvd;
 
-			/* vce */
-			struct amdgpu_vce		vce;
-		};
+	/* vce */
+	struct amdgpu_vce		vce;
 
-		/* vcn */
-		struct amdgpu_vcn		vcn;
-	};
+	/* vcn */
+	struct amdgpu_vcn		vcn;
 
 	/* firmwares */
 	struct amdgpu_firmware		firmware;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 47d1c13..1e3e9be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -379,29 +379,50 @@
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct cik_sdma_rlc_registers *m;
+	unsigned long end_jiffies;
 	uint32_t sdma_base_addr;
+	uint32_t data;
 
 	m = get_sdma_mqd(mqd);
 	sdma_base_addr = get_sdma_base_addr(m);
 
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
-			m->sdma_rlc_virtual_addr);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+		m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
 
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
-			m->sdma_rlc_rb_base);
-
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
-			m->sdma_rlc_rb_base_hi);
-
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
-			m->sdma_rlc_rb_rptr_addr_lo);
-
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
-			m->sdma_rlc_rb_rptr_addr_hi);
+	end_jiffies = msecs_to_jiffies(2000) + jiffies;
+	while (true) {
+		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+			break;
+		if (time_after(jiffies, end_jiffies))
+			return -ETIME;
+		usleep_range(500, 1000);
+	}
+	if (m->sdma_engine_id) {
+		data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+		data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+				RESUME_CTX, 0);
+		WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+	} else {
+		data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+		data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+				RESUME_CTX, 0);
+		WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+	}
 
 	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
-			m->sdma_rlc_doorbell);
-
+				m->sdma_rlc_doorbell);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+				m->sdma_rlc_virtual_addr);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+			m->sdma_rlc_rb_base_hi);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+			m->sdma_rlc_rb_rptr_addr_lo);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+			m->sdma_rlc_rb_rptr_addr_hi);
 	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
 			m->sdma_rlc_rb_cntl);
 
@@ -574,9 +595,9 @@
 	}
 
 	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
-	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a57cec7..57abf7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -409,6 +409,10 @@
 		if (candidate->robj == validated)
 			break;
 
+		/* We can't move pinned BOs here */
+		if (bo->pin_count)
+			continue;
+
 		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 
 		/* Check if this BO is in one of the domains we need space for */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2c57437..3573ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1837,9 +1837,6 @@
 		adev->ip_blocks[i].status.hw = false;
 	}
 
-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
-		amdgpu_ucode_fini_bo(adev);
-
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
 		if (!adev->ip_blocks[i].status.sw)
 			continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ec96bb1..c2f414f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -536,7 +536,7 @@
 	{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
 	{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
 	/* Raven */
-	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 
 	{0, 0, 0}
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 033fba2..5f5aa5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -164,6 +164,9 @@
 		ret = adev->powerplay.ip_funcs->hw_fini(
 					adev->powerplay.pp_handle);
 
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+		amdgpu_ucode_fini_bo(adev);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 7714f4a..447d446 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -442,6 +442,8 @@
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 		return 0;
 
+	amdgpu_ucode_fini_bo(adev);
+
 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 190e28c..93d8661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -63,7 +63,7 @@
 
 static int amdgpu_identity_map(struct amdgpu_device *adev,
 			       struct amdgpu_queue_mapper *mapper,
-			       int ring,
+			       u32 ring,
 			       struct amdgpu_ring **out_ring)
 {
 	switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@
 
 static int amdgpu_lru_map(struct amdgpu_device *adev,
 			  struct amdgpu_queue_mapper *mapper,
-			  int user_ring, bool lru_pipe_order,
+			  u32 user_ring, bool lru_pipe_order,
 			  struct amdgpu_ring **out_ring)
 {
 	int r, i, j;
@@ -208,7 +208,7 @@
  */
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
 			 struct amdgpu_queue_mgr *mgr,
-			 int hw_ip, int instance, int ring,
+			 u32 hw_ip, u32 instance, u32 ring,
 			 struct amdgpu_ring **out_ring)
 {
 	int r, ip_num_rings;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 793b1470..a296f7bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1023,22 +1023,101 @@
 	{mmPA_SC_RASTER_CONFIG_1, true},
 };
 
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
-					  u32 se_num, u32 sh_num,
-					  u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+				       bool indexed, u32 se_num,
+				       u32 sh_num, u32 reg_offset)
 {
-	uint32_t val;
+	if (indexed) {
+		uint32_t val;
+		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+		switch (reg_offset) {
+		case mmCC_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+		case mmGC_USER_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+		case mmPA_SC_RASTER_CONFIG:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+		case mmPA_SC_RASTER_CONFIG_1:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+		}
 
-	val = RREG32(reg_offset);
+		mutex_lock(&adev->grbm_idx_mutex);
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-	return val;
+		val = RREG32(reg_offset);
+
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+		return val;
+	} else {
+		unsigned idx;
+
+		switch (reg_offset) {
+		case mmGB_ADDR_CONFIG:
+			return adev->gfx.config.gb_addr_config;
+		case mmMC_ARB_RAMCFG:
+			return adev->gfx.config.mc_arb_ramcfg;
+		case mmGB_TILE_MODE0:
+		case mmGB_TILE_MODE1:
+		case mmGB_TILE_MODE2:
+		case mmGB_TILE_MODE3:
+		case mmGB_TILE_MODE4:
+		case mmGB_TILE_MODE5:
+		case mmGB_TILE_MODE6:
+		case mmGB_TILE_MODE7:
+		case mmGB_TILE_MODE8:
+		case mmGB_TILE_MODE9:
+		case mmGB_TILE_MODE10:
+		case mmGB_TILE_MODE11:
+		case mmGB_TILE_MODE12:
+		case mmGB_TILE_MODE13:
+		case mmGB_TILE_MODE14:
+		case mmGB_TILE_MODE15:
+		case mmGB_TILE_MODE16:
+		case mmGB_TILE_MODE17:
+		case mmGB_TILE_MODE18:
+		case mmGB_TILE_MODE19:
+		case mmGB_TILE_MODE20:
+		case mmGB_TILE_MODE21:
+		case mmGB_TILE_MODE22:
+		case mmGB_TILE_MODE23:
+		case mmGB_TILE_MODE24:
+		case mmGB_TILE_MODE25:
+		case mmGB_TILE_MODE26:
+		case mmGB_TILE_MODE27:
+		case mmGB_TILE_MODE28:
+		case mmGB_TILE_MODE29:
+		case mmGB_TILE_MODE30:
+		case mmGB_TILE_MODE31:
+			idx = (reg_offset - mmGB_TILE_MODE0);
+			return adev->gfx.config.tile_mode_array[idx];
+		case mmGB_MACROTILE_MODE0:
+		case mmGB_MACROTILE_MODE1:
+		case mmGB_MACROTILE_MODE2:
+		case mmGB_MACROTILE_MODE3:
+		case mmGB_MACROTILE_MODE4:
+		case mmGB_MACROTILE_MODE5:
+		case mmGB_MACROTILE_MODE6:
+		case mmGB_MACROTILE_MODE7:
+		case mmGB_MACROTILE_MODE8:
+		case mmGB_MACROTILE_MODE9:
+		case mmGB_MACROTILE_MODE10:
+		case mmGB_MACROTILE_MODE11:
+		case mmGB_MACROTILE_MODE12:
+		case mmGB_MACROTILE_MODE13:
+		case mmGB_MACROTILE_MODE14:
+		case mmGB_MACROTILE_MODE15:
+			idx = (reg_offset - mmGB_MACROTILE_MODE0);
+			return adev->gfx.config.macrotile_mode_array[idx];
+		default:
+			return RREG32(reg_offset);
+		}
+	}
 }
 
 static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1048,13 +1127,13 @@
 
 	*value = 0;
 	for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+		bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
 		if (reg_offset != cik_allowed_read_registers[i].reg_offset)
 			continue;
 
-		*value = cik_allowed_read_registers[i].grbm_indexed ?
-			 cik_read_indexed_register(adev, se_num,
-						   sh_num, reg_offset) :
-			 RREG32(reg_offset);
+		*value = cik_get_register_value(adev, indexed, se_num, sh_num,
+						reg_offset);
 		return 0;
 	}
 	return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 5c8a7a4..419ba0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1819,6 +1819,22 @@
 							adev->gfx.config.backend_enable_mask,
 							num_rb_pipes);
 	}
+
+	/* cache the values for userspace */
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+			adev->gfx.config.rb_config[i][j].rb_backend_disable =
+				RREG32(mmCC_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].raster_config =
+				RREG32(mmPA_SC_RASTER_CONFIG);
+			adev->gfx.config.rb_config[i][j].raster_config_1 =
+				RREG32(mmPA_SC_RASTER_CONFIG_1);
+		}
+	}
+	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1eb4d79..0450ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1175,7 +1175,7 @@
 
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+	adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
 	adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 6c5a9ca..f744cae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -24,6 +24,7 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/device.h>
+#include <linux/printk.h>
 #include "kfd_priv.h"
 
 #define KFD_DRIVER_AUTHOR	"AMD Inc. and others"
@@ -132,7 +133,7 @@
 	kfd_process_destroy_wq();
 	kfd_topology_shutdown();
 	kfd_chardev_exit();
-	dev_info(kfd_device, "Removed module\n");
+	pr_info("amdkfd: Removed module\n");
 }
 
 module_init(kfd_module_init);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4859d26..4728fad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -202,8 +202,8 @@
 	struct cik_sdma_rlc_registers *m;
 
 	m = get_sdma_mqd(mqd);
-	m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
-			SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+	m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+			<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
 			q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
 			1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
 			6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 2bec902..a3f1e62 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -191,6 +191,24 @@
 
 	switch (type) {
 	case KFD_QUEUE_TYPE_SDMA:
+		if (dev->dqm->queue_count >=
+			CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+			pr_err("Over-subscription is not allowed for SDMA.\n");
+			retval = -EPERM;
+			goto err_create_queue;
+		}
+
+		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+		if (retval != 0)
+			goto err_create_queue;
+		pqn->q = q;
+		pqn->kq = NULL;
+		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+						&q->properties.vmid);
+		pr_debug("DQM returned %d for create_queue\n", retval);
+		print_queue(q);
+		break;
+
 	case KFD_QUEUE_TYPE_COMPUTE:
 		/* check if there is over subscription */
 		if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 889ed24..f71fe6d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -520,7 +520,8 @@
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		aconnector = to_amdgpu_dm_connector(connector);
-		if (aconnector->dc_link->type == dc_connection_mst_branch) {
+		if (aconnector->dc_link->type == dc_connection_mst_branch &&
+		    aconnector->mst_mgr.aux) {
 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
 					aconnector, aconnector->base.base.id);
 
@@ -677,6 +678,10 @@
 
 		mutex_lock(&aconnector->hpd_lock);
 		dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+			aconnector->fake_enable = false;
+
 		aconnector->dc_sink = NULL;
 		amdgpu_dm_update_connector_after_detect(aconnector);
 		mutex_unlock(&aconnector->hpd_lock);
@@ -711,7 +716,6 @@
 
 	ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
 
-	drm_atomic_state_put(adev->dm.cached_state);
 	adev->dm.cached_state = NULL;
 
 	amdgpu_dm_irq_resume_late(adev);
@@ -2704,7 +2708,7 @@
 			.link = aconnector->dc_link,
 			.sink_signal = SIGNAL_TYPE_VIRTUAL
 	};
-	struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+	struct edid *edid;
 
 	if (!aconnector->base.edid_blob_ptr ||
 		!aconnector->base.edid_blob_ptr->data) {
@@ -2716,6 +2720,8 @@
 		return;
 	}
 
+	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
 	aconnector->edid = edid;
 
 	aconnector->dc_em_sink = dc_link_add_remote_sink(
@@ -4193,13 +4199,13 @@
 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
 
+		if (!dm_new_crtc_state->stream)
+			continue;
+
 		status = dc_stream_get_status(dm_new_crtc_state->stream);
 		WARN_ON(!status);
 		WARN_ON(!status->plane_count);
 
-		if (!dm_new_crtc_state->stream)
-			continue;
-
 		/*TODO How it works with MPO ?*/
 		if (!dc_commit_planes_to_stream(
 				dm->dc,
@@ -4253,7 +4259,7 @@
 	drm_atomic_helper_commit_hw_done(state);
 
 	if (wait_for_vblank)
-		drm_atomic_helper_wait_for_vblanks(dev, state);
+		drm_atomic_helper_wait_for_flip_done(dev, state);
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 }
@@ -4332,9 +4338,11 @@
 		return;
 
 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
-	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+	if (!disconnected_acrtc)
+		return;
 
-	if (!disconnected_acrtc || !acrtc_state->stream)
+	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+	if (!acrtc_state->stream)
 		return;
 
 	/*
@@ -4455,7 +4463,7 @@
 			}
 		}
 
-		if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+		if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
 				dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
 
 			new_crtc_state->mode_changed = false;
@@ -4709,7 +4717,8 @@
 		}
 	} else {
 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-			if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+			if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+					!new_crtc_state->color_mgmt_changed)
 				continue;
 
 			if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 785b943..6e43168 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -75,6 +75,9 @@
 		if (signal == signal_type_info_tbl[i].type)
 			break;
 
+	if (i == NUM_ELEMENTS(signal_type_info_tbl))
+		goto fail;
+
 	dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
 			signal_type_info_tbl[i].name,
 			link->link_index);
@@ -96,6 +99,8 @@
 
 	dm_logger_append(&entry, "^\n");
 	dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
 	dm_logger_close(&entry);
 
 	va_end(args);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index aaaebd0..86e6438 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -249,7 +249,7 @@
 	struct graphics_object_id *dest_object_id)
 {
 	uint32_t number;
-	uint16_t *id;
+	uint16_t *id = NULL;
 	ATOM_OBJECT *object;
 	struct bios_parser *bp = BP_FROM_DCB(dcb);
 
@@ -260,7 +260,7 @@
 
 	number = get_dest_obj_list(bp, object, &id);
 
-	if (number <= index)
+	if (number <= index || !id)
 		return BP_RESULT_BADINPUT;
 
 	*dest_object_id = object_id_from_bios_object_id(id[index]);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fe63f58..7240db2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -121,6 +121,10 @@
 			goto failed_alloc;
 		}
 
+		link->link_index = dc->link_count;
+		dc->links[dc->link_count] = link;
+		dc->link_count++;
+
 		link->ctx = dc->ctx;
 		link->dc = dc;
 		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
@@ -129,6 +133,13 @@
 		link->link_id.enum_id = ENUM_ID_1;
 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
 
+		if (!link->link_enc) {
+			BREAK_TO_DEBUGGER();
+			goto failed_alloc;
+		}
+
+		link->link_status.dpcd_caps = &link->dpcd_caps;
+
 		enc_init.ctx = dc->ctx;
 		enc_init.channel = CHANNEL_ID_UNKNOWN;
 		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
@@ -138,10 +149,6 @@
 		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
 		enc_init.encoder.enum_id = ENUM_ID_1;
 		virtual_link_encoder_construct(link->link_enc, &enc_init);
-
-		link->link_index = dc->link_count;
-		dc->links[dc->link_count] = link;
-		dc->link_count++;
 	}
 
 	return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 0602610..e27ed4a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -480,22 +480,6 @@
 		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
 		detect_dp_sink_caps(link);
 
-		/* DP active dongles */
-		if (is_dp_active_dongle(link)) {
-			link->type = dc_connection_active_dongle;
-			if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
-				/*
-				 * active dongle unplug processing for short irq
-				 */
-				link_disconnect_sink(link);
-				return;
-			}
-
-			if (link->dpcd_caps.dongle_type !=
-			DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
-				*converter_disable_audio = true;
-			}
-		}
 		if (is_mst_supported(link)) {
 			sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
 			link->type = dc_connection_mst_branch;
@@ -535,6 +519,22 @@
 				sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
 			}
 		}
+
+		if (link->type != dc_connection_mst_branch &&
+			is_dp_active_dongle(link)) {
+			/* DP active dongles */
+			link->type = dc_connection_active_dongle;
+			if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+				/*
+				 * active dongle unplug processing for short irq
+				 */
+				link_disconnect_sink(link);
+				return;
+			}
+
+			if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+				*converter_disable_audio = true;
+		}
 	} else {
 		/* DP passive dongles */
 		sink_caps->signal = dp_passive_dongle_detection(link->ddc,
@@ -1801,12 +1801,75 @@
 		link->link_enc->funcs->disable_output(link->link_enc, signal, link);
 }
 
+bool dp_active_dongle_validate_timing(
+		const struct dc_crtc_timing *timing,
+		const struct dc_dongle_caps *dongle_caps)
+{
+	unsigned int required_pix_clk = timing->pix_clk_khz;
+
+	if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+		dongle_caps->extendedCapValid == false)
+		return true;
+
+	/* Check Pixel Encoding */
+	switch (timing->pixel_encoding) {
+	case PIXEL_ENCODING_RGB:
+	case PIXEL_ENCODING_YCBCR444:
+		break;
+	case PIXEL_ENCODING_YCBCR422:
+		if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+			return false;
+		break;
+	case PIXEL_ENCODING_YCBCR420:
+		if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+			return false;
+		break;
+	default:
+		/* Invalid Pixel Encoding*/
+		return false;
+	}
+
+
+	/* Check Color Depth and Pixel Clock */
+	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+		required_pix_clk /= 2;
+
+	switch (timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+	case COLOR_DEPTH_888:
+		/*888 and 666 should always be supported*/
+		break;
+	case COLOR_DEPTH_101010:
+		if (dongle_caps->dp_hdmi_max_bpc < 10)
+			return false;
+		required_pix_clk = required_pix_clk * 10 / 8;
+		break;
+	case COLOR_DEPTH_121212:
+		if (dongle_caps->dp_hdmi_max_bpc < 12)
+			return false;
+		required_pix_clk = required_pix_clk * 12 / 8;
+		break;
+
+	case COLOR_DEPTH_141414:
+	case COLOR_DEPTH_161616:
+	default:
+		/* These color depths are currently not supported */
+		return false;
+	}
+
+	if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+		return false;
+
+	return true;
+}
+
 enum dc_status dc_link_validate_mode_timing(
 		const struct dc_stream_state *stream,
 		struct dc_link *link,
 		const struct dc_crtc_timing *timing)
 {
 	uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+	struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
 
 	/* A hack to avoid failing any modes for EDID override feature on
 	 * topology change such as lower quality cable for DP or different dongle
@@ -1814,8 +1877,13 @@
 	if (link->remote_sinks[0])
 		return DC_OK;
 
+	/* Passive Dongle */
 	if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
-		return DC_EXCEED_DONGLE_MAX_CLK;
+		return DC_EXCEED_DONGLE_CAP;
+
+	/* Active Dongle*/
+	if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+		return DC_EXCEED_DONGLE_CAP;
 
 	switch (stream->signal) {
 	case SIGNAL_TYPE_EDP:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index ced4248..e6bf05d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1512,7 +1512,7 @@
 	struct dc_link *link,
 	union hpd_irq_data *hpd_irq_dpcd_data)
 {
-	uint8_t irq_reg_rx_power_state;
+	uint8_t irq_reg_rx_power_state = 0;
 	enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
 	union lane_status lane_status;
 	uint32_t lane;
@@ -1524,60 +1524,55 @@
 
 	if (link->cur_link_settings.lane_count == 0)
 		return return_code;
-	/*1. Check that we can handle interrupt: Not in FS DOS,
-	 *  Not in "Display Timeout" state, Link is trained.
-	 */
 
-	dpcd_result = core_link_read_dpcd(link,
-		DP_SET_POWER,
-		&irq_reg_rx_power_state,
-		sizeof(irq_reg_rx_power_state));
+	/*1. Check that Link Status changed, before re-training.*/
 
-	if (dpcd_result != DC_OK) {
-		irq_reg_rx_power_state = DP_SET_POWER_D0;
-		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-			"%s: DPCD read failed to obtain power state.\n",
-			__func__);
+	/*parse lane status*/
+	for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+		/* check status of lanes 0,1
+		 * changed DpcdAddress_Lane01Status (0x202)
+		 */
+		lane_status.raw = get_nibble_at_index(
+			&hpd_irq_dpcd_data->bytes.lane01_status.raw,
+			lane);
+
+		if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+			!lane_status.bits.CR_DONE_0 ||
+			!lane_status.bits.SYMBOL_LOCKED_0) {
+			/* if one of the channel equalization, clock
+			 * recovery or symbol lock is dropped
+			 * consider it as (link has been
+			 * dropped) dp sink status has changed
+			 */
+			sink_status_changed = true;
+			break;
+		}
 	}
 
-	if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
+	/* Check interlane align.*/
+	if (sink_status_changed ||
+		!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
 
-		/*2. Check that Link Status changed, before re-training.*/
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: Link Status changed.\n", __func__);
 
-		/*parse lane status*/
-		for (lane = 0;
-			lane < link->cur_link_settings.lane_count;
-			lane++) {
+		return_code = true;
 
-			/* check status of lanes 0,1
-			 * changed DpcdAddress_Lane01Status (0x202)*/
-			lane_status.raw = get_nibble_at_index(
-				&hpd_irq_dpcd_data->bytes.lane01_status.raw,
-				lane);
+		/*2. Check that we can handle interrupt: Not in FS DOS,
+		 *  Not in "Display Timeout" state, Link is trained.
+		 */
+		dpcd_result = core_link_read_dpcd(link,
+			DP_SET_POWER,
+			&irq_reg_rx_power_state,
+			sizeof(irq_reg_rx_power_state));
 
-			if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
-				!lane_status.bits.CR_DONE_0 ||
-				!lane_status.bits.SYMBOL_LOCKED_0) {
-				/* if one of the channel equalization, clock
-				 * recovery or symbol lock is dropped
-				 * consider it as (link has been
-				 * dropped) dp sink status has changed*/
-				sink_status_changed = true;
-				break;
-			}
-
-		}
-
-		/* Check interlane align.*/
-		if (sink_status_changed ||
-			!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
-			INTERLANE_ALIGN_DONE) {
-
+		if (dpcd_result != DC_OK) {
 			dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-				"%s: Link Status changed.\n",
+				"%s: DPCD read failed to obtain power state.\n",
 				__func__);
-
-			return_code = true;
+		} else {
+			if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+				return_code = false;
 		}
 	}
 
@@ -2062,6 +2057,24 @@
 			(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
 }
 
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+	switch (bpc) {
+	case DOWN_STREAM_MAX_8BPC:
+		return 8;
+	case DOWN_STREAM_MAX_10BPC:
+		return 10;
+	case DOWN_STREAM_MAX_12BPC:
+		return 12;
+	case DOWN_STREAM_MAX_16BPC:
+		return 16;
+	default:
+		break;
+	}
+
+	return -1;
+}
+
 static void get_active_converter_info(
 	uint8_t data, struct dc_link *link)
 {
@@ -2131,7 +2144,8 @@
 					hdmi_caps.bits.YCrCr420_CONVERSION;
 
 				link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
-					hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+					translate_dpcd_max_bpc(
+						hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
 
 				link->dpcd_caps.dongle_caps.extendedCapValid = true;
 			}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d1cdf9f..b7422d3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -516,13 +516,11 @@
 			right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
 
 		if (right_view) {
-			data->viewport.width /= 2;
-			data->viewport_c.width /= 2;
-			data->viewport.x +=  data->viewport.width;
-			data->viewport_c.x +=  data->viewport_c.width;
+			data->viewport.x +=  data->viewport.width / 2;
+			data->viewport_c.x +=  data->viewport_c.width / 2;
 			/* Ceil offset pipe */
-			data->viewport.width += data->viewport.width % 2;
-			data->viewport_c.width += data->viewport_c.width % 2;
+			data->viewport.width = (data->viewport.width + 1) / 2;
+			data->viewport_c.width = (data->viewport_c.width + 1) / 2;
 		} else {
 			data->viewport.width /= 2;
 			data->viewport_c.width /= 2;
@@ -580,14 +578,12 @@
 	if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
 		pipe_ctx->plane_state) {
 		if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-			pipe_ctx->plane_res.scl_data.recout.height /= 2;
-			pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+			pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
 			/* Floor primary pipe, ceil 2ndary pipe */
-			pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+			pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
 		} else {
-			pipe_ctx->plane_res.scl_data.recout.width /= 2;
-			pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
-			pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+			pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+			pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
 		}
 	} else if (pipe_ctx->bottom_pipe &&
 			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
@@ -856,6 +852,7 @@
 	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
 	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
 
+
 	/* Taps calculations */
 	if (pipe_ctx->plane_res.xfm != NULL)
 		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
@@ -864,16 +861,21 @@
 	if (pipe_ctx->plane_res.dpp != NULL)
 		res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
 				pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
-
 	if (!res) {
 		/* Try 24 bpp linebuffer */
 		pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
 
-		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
-			pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+		if (pipe_ctx->plane_res.xfm != NULL)
+			res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+					pipe_ctx->plane_res.xfm,
+					&pipe_ctx->plane_res.scl_data,
+					&plane_state->scaling_quality);
 
-		res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
-			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+		if (pipe_ctx->plane_res.dpp != NULL)
+			res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+					pipe_ctx->plane_res.dpp,
+					&pipe_ctx->plane_res.scl_data,
+					&plane_state->scaling_quality);
 	}
 
 	if (res)
@@ -991,8 +993,10 @@
 
 	head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 
-	if (!head_pipe)
+	if (!head_pipe) {
 		ASSERT(0);
+		return NULL;
+	}
 
 	if (!head_pipe->plane_state)
 		return head_pipe;
@@ -1447,11 +1451,16 @@
 
 static struct audio *find_first_free_audio(
 		struct resource_context *res_ctx,
-		const struct resource_pool *pool)
+		const struct resource_pool *pool,
+		enum engine_id id)
 {
 	int i;
 	for (i = 0; i < pool->audio_count; i++) {
 		if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+			/*we have enough audio endpoint, find the matching inst*/
+			if (id != i)
+				continue;
+
 			return pool->audios[i];
 		}
 	}
@@ -1700,7 +1709,7 @@
 	    dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
 	    stream->audio_info.mode_count) {
 		pipe_ctx->stream_res.audio = find_first_free_audio(
-		&context->res_ctx, pool);
+		&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
 		/*
 		 * Audio assigned in order first come first get.
@@ -1765,13 +1774,16 @@
 	enum dc_status result = DC_ERROR_UNEXPECTED;
 	int i, j;
 
+	if (!new_ctx)
+		return DC_ERROR_UNEXPECTED;
+
 	if (dc->res_pool->funcs->validate_global) {
 			result = dc->res_pool->funcs->validate_global(dc, new_ctx);
 			if (result != DC_OK)
 				return result;
 	}
 
-	for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+	for (i = 0; i < new_ctx->stream_count; i++) {
 		struct dc_stream_state *stream = new_ctx->streams[i];
 
 		for (j = 0; j < dc->res_pool->pipe_count; j++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index b00a604..e230cc4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -263,7 +263,6 @@
 		struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
 		struct mem_input *mi = pipe_ctx->plane_res.mi;
 		struct hubp *hubp = pipe_ctx->plane_res.hubp;
-		struct transform *xfm = pipe_ctx->plane_res.xfm;
 		struct dpp *dpp = pipe_ctx->plane_res.dpp;
 		struct dc_cursor_position pos_cpy = *position;
 		struct dc_cursor_mi_param param = {
@@ -294,11 +293,11 @@
 		if (mi != NULL && mi->funcs->set_cursor_position != NULL)
 			mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
 
-		if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
-			hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+		if (!hubp)
+			continue;
 
-		if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
-			xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+		if (hubp->funcs->set_cursor_position != NULL)
+			hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
 
 		if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
 			dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 81c40f8..0df9ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -352,11 +352,11 @@
 	uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
 
 	set_reg_field_value(value, 1,
-			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-			CLOCK_GATING_DISABLE);
-		set_reg_field_value(value, 1,
-			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-			AUDIO_ENABLED);
+			    AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			    CLOCK_GATING_DISABLE);
+	set_reg_field_value(value, 1,
+			    AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			    AUDIO_ENABLED);
 
 	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
 	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 4fd49a1..e42b6eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -87,6 +87,9 @@
 	 */
 	uint32_t max_retries = 50;
 
+	/*we need turn on clock before programming AFMT block*/
+	REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
 	if (REG(AFMT_VBI_PACKET_CONTROL1)) {
 		if (packet_index >= 8)
 			ASSERT(0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 1229a33..07ff8d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -991,6 +991,16 @@
 	struct dc_link *link = stream->sink->link;
 	struct dc *dc = pipe_ctx->stream->ctx->dc;
 
+	if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+		pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+			pipe_ctx->stream_res.stream_enc);
+
+	if (dc_is_dp_signal(pipe_ctx->stream->signal))
+		pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+			pipe_ctx->stream_res.stream_enc);
+
+	pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+			pipe_ctx->stream_res.stream_enc, true);
 	if (pipe_ctx->stream_res.audio) {
 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
 
@@ -1015,18 +1025,6 @@
 		 */
 	}
 
-	if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
-		pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
-			pipe_ctx->stream_res.stream_enc);
-
-	if (dc_is_dp_signal(pipe_ctx->stream->signal))
-		pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
-			pipe_ctx->stream_res.stream_enc);
-
-	pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-			pipe_ctx->stream_res.stream_enc, true);
-
-
 	/* blank at encoder level */
 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
 		if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
@@ -1774,6 +1772,10 @@
 	if (pipe_ctx->stream->sink->link->psr_enabled)
 		return DC_ERROR_UNEXPECTED;
 
+	/* Nothing to compress */
+	if (!pipe_ctx->plane_state)
+		return DC_ERROR_UNEXPECTED;
+
 	/* Only for non-linear tiling */
 	if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
 		return DC_ERROR_UNEXPECTED;
@@ -1868,8 +1870,10 @@
 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
-			/* disable already, no need to disable again */
-			if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+			/* Disable if new stream is null. O/w, if stream is
+			 * disabled already, no need to disable again.
+			 */
+			if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
 				core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
 
 			pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index db96d2b..61adb81 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1037,11 +1037,13 @@
 	struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
 						 GFP_KERNEL);
 
-	if ((dce110_tgv == NULL) ||
-		(dce110_xfmv == NULL) ||
-		(dce110_miv == NULL) ||
-		(dce110_oppv == NULL))
-			return false;
+	if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+		kfree(dce110_tgv);
+		kfree(dce110_xfmv);
+		kfree(dce110_miv);
+		kfree(dce110_oppv);
+		return false;
+	}
 
 	dce110_opp_v_construct(dce110_oppv, ctx);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 67ac737..4befce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -1112,10 +1112,7 @@
 	enum signal_type signal)
 {
 	uint32_t h_blank;
-	uint32_t h_back_porch;
-	uint32_t hsync_offset = timing->h_border_right +
-			timing->h_front_porch;
-	uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+	uint32_t h_back_porch, hsync_offset, h_sync_start;
 
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 
@@ -1124,6 +1121,9 @@
 	if (!timing)
 		return false;
 
+	hsync_offset = timing->h_border_right + timing->h_front_porch;
+	h_sync_start = timing->h_addressable + hsync_offset;
+
 	/* Currently we don't support 3D, so block all 3D timings */
 	if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
 		return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 4c4bd72..9fc8f82 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -912,11 +912,13 @@
 	struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
 
-	if (!head_pipe)
+	if (!head_pipe) {
 		ASSERT(0);
+		return NULL;
+	}
 
 	if (!idle_pipe)
-		return false;
+		return NULL;
 
 	idle_pipe->stream = head_pipe->stream;
 	idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
index c7333cd..fced178 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
@@ -496,9 +496,6 @@
 		timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
 		return false;
 
-	if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
-		tg->ctx->dc->debug.disable_stereo_support)
-		return false;
 	/* Temporarily blocking interlacing mode until it's supported */
 	if (timing->flags.INTERLACE == 1)
 		return false;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 01df856..94fc310 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -38,7 +38,7 @@
 	DC_FAIL_DETACH_SURFACES = 8,
 	DC_FAIL_SURFACE_VALIDATE = 9,
 	DC_NO_DP_LINK_BANDWIDTH = 10,
-	DC_EXCEED_DONGLE_MAX_CLK = 11,
+	DC_EXCEED_DONGLE_CAP = 11,
 	DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
 	DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
 	DC_FAIL_SCALING = 14,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 7c08bc6..ea88997 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -259,13 +259,6 @@
 			struct transform *xfm_base,
 			const struct dc_cursor_attributes *attr);
 
-	void (*set_cursor_position)(
-			struct transform *xfm_base,
-			const struct dc_cursor_position *pos,
-			const struct dc_cursor_mi_param *param,
-			uint32_t width
-			);
-
 };
 
 const uint16_t *get_filter_2tap_16p(void);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 72b22b8..5a5427b 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -317,9 +317,8 @@
 				       formats, ARRAY_SIZE(formats),
 				       NULL,
 				       DRM_PLANE_TYPE_PRIMARY, NULL);
-	if (ret) {
+	if (ret)
 		return ERR_PTR(ret);
-	}
 
 	drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
 	hdlcd->plane = plane;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 764d0c8..0afb53b 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/console.h>
 #include <linux/list.h>
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
@@ -354,7 +355,7 @@
 err_free:
 	drm_mode_config_cleanup(drm);
 	dev_set_drvdata(dev, NULL);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return ret;
 }
@@ -379,7 +380,7 @@
 	pm_runtime_disable(drm->dev);
 	of_reserved_mem_device_release(drm->dev);
 	drm_mode_config_cleanup(drm);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 	drm->dev_private = NULL;
 	dev_set_drvdata(dev, NULL);
 }
@@ -432,9 +433,11 @@
 		return 0;
 
 	drm_kms_helper_poll_disable(drm);
+	drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
 
 	hdlcd->state = drm_atomic_helper_suspend(drm);
 	if (IS_ERR(hdlcd->state)) {
+		drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
 		drm_kms_helper_poll_enable(drm);
 		return PTR_ERR(hdlcd->state);
 	}
@@ -451,8 +454,8 @@
 		return 0;
 
 	drm_atomic_helper_resume(drm, hdlcd->state);
+	drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
 	drm_kms_helper_poll_enable(drm);
-	pm_runtime_set_active(dev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 3615d18..904fff8 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -65,8 +65,8 @@
 	/* We rely on firmware to set mclk to a sensible level. */
 	clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
-	hwdev->modeset(hwdev, &vm);
-	hwdev->leave_config_mode(hwdev);
+	hwdev->hw->modeset(hwdev, &vm);
+	hwdev->hw->leave_config_mode(hwdev);
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -77,8 +77,12 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 	int err;
 
+	/* always disable planes on the CRTC that is being turned off */
+	drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
 	drm_crtc_vblank_off(crtc);
-	hwdev->enter_config_mode(hwdev);
+	hwdev->hw->enter_config_mode(hwdev);
+
 	clk_disable_unprepare(hwdev->pxlclk);
 
 	err = pm_runtime_put(crtc->dev->dev);
@@ -319,7 +323,7 @@
 
 mclk_calc:
 	drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
-	ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+	ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
 	if (ret < 0)
 		return -EINVAL;
 	return 0;
@@ -475,7 +479,7 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 
 	malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-			     hwdev->map.de_irq_map.vsync_irq);
+			     hwdev->hw->map.de_irq_map.vsync_irq);
 	return 0;
 }
 
@@ -485,7 +489,7 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 
 	malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-			      hwdev->map.de_irq_map.vsync_irq);
+			      hwdev->hw->map.de_irq_map.vsync_irq);
 }
 
 static const struct drm_crtc_funcs malidp_crtc_funcs = {
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index b894466..91f2b01 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -47,10 +47,10 @@
 	 * directly.
 	 */
 	malidp_hw_write(hwdev, gamma_write_mask,
-			hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+			hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
 	for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
 		malidp_hw_write(hwdev, data[i],
-				hwdev->map.coeffs_base +
+				hwdev->hw->map.coeffs_base +
 				MALIDP_COEF_TABLE_DATA);
 }
 
@@ -103,7 +103,7 @@
 			for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
 				malidp_hw_write(hwdev,
 						mc->coloradj_coeffs[i],
-						hwdev->map.coeffs_base +
+						hwdev->hw->map.coeffs_base +
 						MALIDP_COLOR_ADJ_COEF + 4 * i);
 
 		malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
@@ -120,8 +120,8 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 	struct malidp_se_config *s = &cs->scaler_config;
 	struct malidp_se_config *old_s = &old_cs->scaler_config;
-	u32 se_control = hwdev->map.se_base +
-			 ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+	u32 se_control = hwdev->hw->map.se_base +
+			 ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
 			 0x10 : 0xC);
 	u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
 	u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
@@ -135,7 +135,7 @@
 		return;
 	}
 
-	hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+	hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
 	val = malidp_hw_read(hwdev, se_control);
 	val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
 
@@ -170,9 +170,9 @@
 	int ret;
 
 	atomic_set(&malidp->config_valid, 0);
-	hwdev->set_config_valid(hwdev);
+	hwdev->hw->set_config_valid(hwdev);
 	/* don't wait for config_valid flag if we are in config mode */
-	if (hwdev->in_config_mode(hwdev))
+	if (hwdev->hw->in_config_mode(hwdev))
 		return 0;
 
 	ret = wait_event_interruptible_timeout(malidp->wq,
@@ -455,7 +455,7 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 
 	/* we can only suspend if the hardware is in config mode */
-	WARN_ON(!hwdev->in_config_mode(hwdev));
+	WARN_ON(!hwdev->hw->in_config_mode(hwdev));
 
 	hwdev->pm_suspended = true;
 	clk_disable_unprepare(hwdev->mclk);
@@ -500,11 +500,7 @@
 	if (!hwdev)
 		return -ENOMEM;
 
-	/*
-	 * copy the associated data from malidp_drm_of_match to avoid
-	 * having to keep a reference to the OF node after binding
-	 */
-	memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+	hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
 	malidp->dev = hwdev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -568,13 +564,13 @@
 		goto query_hw_fail;
 	}
 
-	ret = hwdev->query_hw(hwdev);
+	ret = hwdev->hw->query_hw(hwdev);
 	if (ret) {
 		DRM_ERROR("Invalid HW configuration\n");
 		goto query_hw_fail;
 	}
 
-	version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+	version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
 	DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
 		 (version >> 12) & 0xf, (version >> 8) & 0xf);
 
@@ -589,7 +585,7 @@
 
 	for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
 		out_depth = (out_depth << 8) | (output_width[i] & 0xf);
-	malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+	malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
 
 	atomic_set(&malidp->config_valid, 0);
 	init_waitqueue_head(&malidp->wq);
@@ -671,7 +667,7 @@
 		malidp_runtime_pm_suspend(dev);
 	drm->dev_private = NULL;
 	dev_set_drvdata(dev, NULL);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 alloc_fail:
 	of_reserved_mem_device_release(dev);
 
@@ -704,7 +700,7 @@
 		malidp_runtime_pm_suspend(dev);
 	drm->dev_private = NULL;
 	dev_set_drvdata(dev, NULL);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 	of_reserved_mem_device_release(dev);
 }
 
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 17bca99..2bfb542 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -183,7 +183,7 @@
 
 	malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
 	while (count) {
-		status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+		status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
 		if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
 			break;
 		/*
@@ -203,7 +203,7 @@
 	malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
 	malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
 	while (count) {
-		status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+		status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
 		if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
 			break;
 		usleep_range(100, 1000);
@@ -216,7 +216,7 @@
 {
 	u32 status;
 
-	status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+	status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
 	if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
 		return true;
 
@@ -407,7 +407,7 @@
 
 	malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
 	while (count) {
-		status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+		status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
 		if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
 			break;
 		/*
@@ -427,7 +427,7 @@
 	malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
 	malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
 	while (count) {
-		status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+		status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
 		if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
 			break;
 		usleep_range(100, 1000);
@@ -440,7 +440,7 @@
 {
 	u32 status;
 
-	status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+	status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
 	if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
 		return true;
 
@@ -616,7 +616,7 @@
 	return 0;
 }
 
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
 	[MALIDP_500] = {
 		.map = {
 			.coeffs_base = MALIDP500_COEFFS_BASE,
@@ -751,7 +751,7 @@
 {
 	u32 base = malidp_get_block_base(hwdev, block);
 
-	if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+	if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
 		malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
 	else
 		malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
@@ -762,12 +762,14 @@
 	struct drm_device *drm = arg;
 	struct malidp_drm *malidp = drm->dev_private;
 	struct malidp_hw_device *hwdev;
+	struct malidp_hw *hw;
 	const struct malidp_irq_map *de;
 	u32 status, mask, dc_status;
 	irqreturn_t ret = IRQ_NONE;
 
 	hwdev = malidp->dev;
-	de = &hwdev->map.de_irq_map;
+	hw = hwdev->hw;
+	de = &hw->map.de_irq_map;
 
 	/*
 	 * if we are suspended it is likely that we were invoked because
@@ -778,8 +780,8 @@
 		return IRQ_NONE;
 
 	/* first handle the config valid IRQ */
-	dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
-	if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+	dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+	if (dc_status & hw->map.dc_irq_map.vsync_irq) {
 		/* we have a page flip event */
 		atomic_set(&malidp->config_valid, 1);
 		malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
@@ -832,11 +834,11 @@
 
 	/* first enable the DC block IRQs */
 	malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
-			     hwdev->map.dc_irq_map.irq_mask);
+			     hwdev->hw->map.dc_irq_map.irq_mask);
 
 	/* now enable the DE block IRQs */
 	malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-			     hwdev->map.de_irq_map.irq_mask);
+			     hwdev->hw->map.de_irq_map.irq_mask);
 
 	return 0;
 }
@@ -847,9 +849,9 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 
 	malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-			      hwdev->map.de_irq_map.irq_mask);
+			      hwdev->hw->map.de_irq_map.irq_mask);
 	malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
-			      hwdev->map.dc_irq_map.irq_mask);
+			      hwdev->hw->map.dc_irq_map.irq_mask);
 }
 
 static irqreturn_t malidp_se_irq(int irq, void *arg)
@@ -857,6 +859,8 @@
 	struct drm_device *drm = arg;
 	struct malidp_drm *malidp = drm->dev_private;
 	struct malidp_hw_device *hwdev = malidp->dev;
+	struct malidp_hw *hw = hwdev->hw;
+	const struct malidp_irq_map *se = &hw->map.se_irq_map;
 	u32 status, mask;
 
 	/*
@@ -867,12 +871,12 @@
 	if (hwdev->pm_suspended)
 		return IRQ_NONE;
 
-	status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
-	if (!(status & hwdev->map.se_irq_map.irq_mask))
+	status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+	if (!(status & se->irq_mask))
 		return IRQ_NONE;
 
-	mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
-	status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+	mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+	status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
 	status &= mask;
 	/* ToDo: status decoding and firing up of VSYNC and page flip events */
 
@@ -905,7 +909,7 @@
 	}
 
 	malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
-			     hwdev->map.se_irq_map.irq_mask);
+			     hwdev->hw->map.se_irq_map.irq_mask);
 
 	return 0;
 }
@@ -916,5 +920,5 @@
 	struct malidp_hw_device *hwdev = malidp->dev;
 
 	malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
-			      hwdev->map.se_irq_map.irq_mask);
+			      hwdev->hw->map.se_irq_map.irq_mask);
 }
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 849ad9a..b0690eb 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -120,18 +120,14 @@
 /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
 #define MALIDP_DEVICE_LV_HAS_3_STRIDES	BIT(0)
 
-struct malidp_hw_device {
-	const struct malidp_hw_regmap map;
-	void __iomem *regs;
+struct malidp_hw_device;
 
-	/* APB clock */
-	struct clk *pclk;
-	/* AXI clock */
-	struct clk *aclk;
-	/* main clock for display core */
-	struct clk *mclk;
-	/* pixel clock for display core */
-	struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+	const struct malidp_hw_regmap map;
 
 	/*
 	 * Validate the driver instance against the hardware bits
@@ -182,15 +178,6 @@
 			     struct videomode *vm);
 
 	u8 features;
-
-	u8 min_line_size;
-	u16 max_line_size;
-
-	/* track the device PM state */
-	bool pm_suspended;
-
-	/* size of memory used for rotating layers, up to two banks available */
-	u32 rotation_memory[2];
 };
 
 /* Supported variants of the hardware */
@@ -202,7 +189,33 @@
 	MALIDP_MAX_DEVICES
 };
 
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+	struct malidp_hw *hw;
+	void __iomem *regs;
+
+	/* APB clock */
+	struct clk *pclk;
+	/* AXI clock */
+	struct clk *aclk;
+	/* main clock for display core */
+	struct clk *mclk;
+	/* pixel clock for display core */
+	struct clk *pxlclk;
+
+	u8 min_line_size;
+	u16 max_line_size;
+
+	/* track the device PM state */
+	bool pm_suspended;
+
+	/* size of memory used for rotating layers, up to two banks available */
+	u32 rotation_memory[2];
+};
 
 static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
 {
@@ -240,9 +253,9 @@
 {
 	switch (block) {
 	case MALIDP_SE_BLOCK:
-		return hwdev->map.se_base;
+		return hwdev->hw->map.se_base;
 	case MALIDP_DC_BLOCK:
-		return hwdev->map.dc_base;
+		return hwdev->hw->map.dc_base;
 	}
 
 	return 0;
@@ -275,7 +288,7 @@
 static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
 					 unsigned int pitch)
 {
-	return !(pitch & (hwdev->map.bus_align_bytes - 1));
+	return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
 }
 
 /* U16.16 */
@@ -308,8 +321,8 @@
 	};
 	u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
 		  MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
-	u32 image_enh = hwdev->map.se_base +
-			((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+	u32 image_enh = hwdev->hw->map.se_base +
+			((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
 			 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
 	u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
 	int i;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 94e7e3f..e741979 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -57,7 +57,7 @@
 	struct malidp_plane *mp = to_malidp_plane(plane);
 
 	if (mp->base.fb)
-		drm_framebuffer_unreference(mp->base.fb);
+		drm_framebuffer_put(mp->base.fb);
 
 	drm_plane_helper_disable(plane);
 	drm_plane_cleanup(plane);
@@ -185,8 +185,9 @@
 
 	fb = state->fb;
 
-	ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
-					    fb->format->format);
+	ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+					     mp->layer->id,
+					     fb->format->format);
 	if (ms->format == MALIDP_INVALID_FORMAT_ID)
 		return -EINVAL;
 
@@ -211,7 +212,7 @@
 	 * third plane stride register.
 	 */
 	if (ms->n_planes == 3 &&
-	    !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+	    !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
 	    (state->fb->pitches[1] != state->fb->pitches[2]))
 		return -EINVAL;
 
@@ -229,9 +230,9 @@
 	if (state->rotation & MALIDP_ROTATED_MASK) {
 		int val;
 
-		val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
-						 state->crtc_w,
-						 fb->format->format);
+		val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+						     state->crtc_w,
+						     fb->format->format);
 		if (val < 0)
 			return val;
 
@@ -251,7 +252,7 @@
 		return;
 
 	if (num_planes == 3)
-		num_strides = (mp->hwdev->features &
+		num_strides = (mp->hwdev->hw->features &
 			       MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
 
 	for (i = 0; i < num_strides; ++i)
@@ -264,13 +265,11 @@
 				   struct drm_plane_state *old_state)
 {
 	struct malidp_plane *mp;
-	const struct malidp_hw_regmap *map;
 	struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
 	u32 src_w, src_h, dest_w, dest_h, val;
 	int i;
 
 	mp = to_malidp_plane(plane);
-	map = &mp->hwdev->map;
 
 	/* convert src values from Q16 fixed point to integer */
 	src_w = plane->state->src_w >> 16;
@@ -363,7 +362,7 @@
 int malidp_de_planes_init(struct drm_device *drm)
 {
 	struct malidp_drm *malidp = drm->dev_private;
-	const struct malidp_hw_regmap *map = &malidp->dev->map;
+	const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
 	struct malidp_plane *plane = NULL;
 	enum drm_plane_type plane_type;
 	unsigned long crtcs = 1 << drm->mode_config.num_crtc;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index b4efcba..d034b2c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -372,9 +372,18 @@
 };
 
 #ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-		     unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
 void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+	unsigned int offset = adv7511->type == ADV7533 ?
+						ADV7533_REG_CEC_OFFSET : 0;
+
+	regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+		     ADV7511_CEC_CTRL_POWER_DOWN);
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_DRM_I2C_ADV7533
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index b33d730..a20a45c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -300,18 +300,21 @@
 	return 0;
 }
 
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-		     unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 {
+	unsigned int offset = adv7511->type == ADV7533 ?
+						ADV7533_REG_CEC_OFFSET : 0;
 	int ret = adv7511_cec_parse_dt(dev, adv7511);
 
 	if (ret)
-		return ret;
+		goto err_cec_parse_dt;
 
 	adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
 		adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
-	if (IS_ERR(adv7511->cec_adap))
-		return PTR_ERR(adv7511->cec_adap);
+	if (IS_ERR(adv7511->cec_adap)) {
+		ret = PTR_ERR(adv7511->cec_adap);
+		goto err_cec_alloc;
+	}
 
 	regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
 	/* cec soft reset */
@@ -329,9 +332,18 @@
 		     ((adv7511->cec_clk_freq / 750000) - 1) << 2);
 
 	ret = cec_register_adapter(adv7511->cec_adap, dev);
-	if (ret) {
-		cec_delete_adapter(adv7511->cec_adap);
-		adv7511->cec_adap = NULL;
-	}
-	return ret;
+	if (ret)
+		goto err_cec_register;
+	return 0;
+
+err_cec_register:
+	cec_delete_adapter(adv7511->cec_adap);
+	adv7511->cec_adap = NULL;
+err_cec_alloc:
+	dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+		 ret);
+err_cec_parse_dt:
+	regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+		     ADV7511_CEC_CTRL_POWER_DOWN);
+	return ret == -EPROBE_DEFER ? ret : 0;
 }
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 0e14f15..efa29db 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1084,7 +1084,6 @@
 	struct device *dev = &i2c->dev;
 	unsigned int main_i2c_addr = i2c->addr << 1;
 	unsigned int edid_i2c_addr = main_i2c_addr + 4;
-	unsigned int offset;
 	unsigned int val;
 	int ret;
 
@@ -1192,24 +1191,16 @@
 	if (adv7511->type == ADV7511)
 		adv7511_set_link_config(adv7511, &link_config);
 
+	ret = adv7511_cec_init(dev, adv7511);
+	if (ret)
+		goto err_unregister_cec;
+
 	adv7511->bridge.funcs = &adv7511_bridge_funcs;
 	adv7511->bridge.of_node = dev->of_node;
 
 	drm_bridge_add(&adv7511->bridge);
 
 	adv7511_audio_init(dev, adv7511);
-
-	offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-	ret = adv7511_cec_init(dev, adv7511, offset);
-	if (ret)
-		goto err_unregister_cec;
-#else
-	regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
-		     ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
 	return 0;
 
 err_unregister_cec:
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 0903ba5..75b0d3f 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -13,13 +13,37 @@
 
 #include <linux/of_graph.h>
 
+struct lvds_encoder {
+	struct drm_bridge bridge;
+	struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+	struct lvds_encoder *lvds_encoder = container_of(bridge,
+							 struct lvds_encoder,
+							 bridge);
+
+	return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+				 bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+	.attach = lvds_encoder_attach,
+};
+
 static int lvds_encoder_probe(struct platform_device *pdev)
 {
 	struct device_node *port;
 	struct device_node *endpoint;
 	struct device_node *panel_node;
 	struct drm_panel *panel;
-	struct drm_bridge *bridge;
+	struct lvds_encoder *lvds_encoder;
+
+	lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+				    GFP_KERNEL);
+	if (!lvds_encoder)
+		return -ENOMEM;
 
 	/* Locate the panel DT node. */
 	port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@
 		return -EPROBE_DEFER;
 	}
 
-	bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
-	if (IS_ERR(bridge))
-		return PTR_ERR(bridge);
+	lvds_encoder->panel_bridge =
+		devm_drm_panel_bridge_add(&pdev->dev,
+					  panel, DRM_MODE_CONNECTOR_LVDS);
+	if (IS_ERR(lvds_encoder->panel_bridge))
+		return PTR_ERR(lvds_encoder->panel_bridge);
 
-	platform_set_drvdata(pdev, bridge);
+	/* The panel_bridge bridge is attached to the panel's of_node,
+	 * but we need a bridge attached to our of_node for our user
+	 * to look up.
+	 */
+	lvds_encoder->bridge.of_node = pdev->dev.of_node;
+	lvds_encoder->bridge.funcs = &funcs;
+	drm_bridge_add(&lvds_encoder->bridge);
+
+	platform_set_drvdata(pdev, lvds_encoder);
 
 	return 0;
 }
 
 static int lvds_encoder_remove(struct platform_device *pdev)
 {
-	struct drm_bridge *bridge = platform_get_drvdata(pdev);
+	struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
 
-	drm_bridge_remove(bridge);
+	drm_bridge_remove(&lvds_encoder->bridge);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index bf14214..b72259b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -138,6 +138,7 @@
 	struct device *dev;
 	struct clk *isfr_clk;
 	struct clk *iahb_clk;
+	struct clk *cec_clk;
 	struct dw_hdmi_i2c *i2c;
 
 	struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@
 		goto err_isfr;
 	}
 
+	hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+	if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+		hdmi->cec_clk = NULL;
+	} else if (IS_ERR(hdmi->cec_clk)) {
+		ret = PTR_ERR(hdmi->cec_clk);
+		if (ret != -EPROBE_DEFER)
+			dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+				ret);
+
+		hdmi->cec_clk = NULL;
+		goto err_iahb;
+	} else {
+		ret = clk_prepare_enable(hdmi->cec_clk);
+		if (ret) {
+			dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+				ret);
+			goto err_iahb;
+		}
+	}
+
 	/* Product and revision IDs */
 	hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
 		      | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@
 		cec_notifier_put(hdmi->cec_notifier);
 
 	clk_disable_unprepare(hdmi->iahb_clk);
+	if (hdmi->cec_clk)
+		clk_disable_unprepare(hdmi->cec_clk);
 err_isfr:
 	clk_disable_unprepare(hdmi->isfr_clk);
 err_res:
@@ -2541,6 +2564,8 @@
 
 	clk_disable_unprepare(hdmi->iahb_clk);
 	clk_disable_unprepare(hdmi->isfr_clk);
+	if (hdmi->cec_clk)
+		clk_disable_unprepare(hdmi->cec_clk);
 
 	if (hdmi->i2c)
 		i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8571cfd..8636e7e 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -97,7 +97,7 @@
 #define DP0_ACTIVEVAL		0x0650
 #define DP0_SYNCVAL		0x0654
 #define DP0_MISC		0x0658
-#define TU_SIZE_RECOMMENDED		(0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED		(63) /* LSCLK cycles per TU */
 #define BPC_6				(0 << 5)
 #define BPC_8				(1 << 5)
 
@@ -318,7 +318,7 @@
 				tmp = (tmp << 8) | buf[i];
 			i++;
 			if (((i % 4) == 0) || (i == size)) {
-				tc_write(DP0_AUXWDATA(i >> 2), tmp);
+				tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
 				tmp = 0;
 			}
 		}
@@ -603,8 +603,15 @@
 	ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
 	if (ret < 0)
 		goto err_dpcd_read;
-	if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
-		goto err_dpcd_inval;
+	if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+		dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+		tc->link.base.rate = 270000;
+	}
+
+	if (tc->link.base.num_lanes > 2) {
+		dev_dbg(tc->dev, "Falling to 2 lanes\n");
+		tc->link.base.num_lanes = 2;
+	}
 
 	ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
 	if (ret < 0)
@@ -637,9 +644,6 @@
 err_dpcd_read:
 	dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
 	return ret;
-err_dpcd_inval:
-	dev_err(tc->dev, "invalid DPCD\n");
-	return -EINVAL;
 }
 
 static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -655,6 +659,14 @@
 	int lower_margin = mode->vsync_start - mode->vdisplay;
 	int vsync_len = mode->vsync_end - mode->vsync_start;
 
+	/*
+	 * Recommended maximum number of symbols transferred in a transfer unit:
+	 * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+	 *              (output active video bandwidth in bytes))
+	 * Must be less than tu_size.
+	 */
+	max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
 	dev_dbg(tc->dev, "set mode %dx%d\n",
 		mode->hdisplay, mode->vdisplay);
 	dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -664,13 +676,18 @@
 	dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
 
 
-	/* LCD Ctl Frame Size */
-	tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+	/*
+	 * LCD Ctl Frame Size
+	 * datasheet is not clear of vsdelay in case of DPI
+	 * assume we do not need any delay when DPI is a source of
+	 * sync signals
+	 */
+	tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
 		 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
-	tc_write(HTIM01, (left_margin << 16) |		/* H back porch */
-			 (hsync_len << 0));		/* Hsync */
-	tc_write(HTIM02, (right_margin << 16) |		/* H front porch */
-			 (mode->hdisplay << 0));	/* width */
+	tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+			 (ALIGN(hsync_len, 2) << 0));	 /* Hsync */
+	tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) |  /* H front porch */
+			 (ALIGN(mode->hdisplay, 2) << 0)); /* width */
 	tc_write(VTIM01, (upper_margin << 16) |		/* V back porch */
 			 (vsync_len << 0));		/* Vsync */
 	tc_write(VTIM02, (lower_margin << 16) |		/* V front porch */
@@ -689,7 +706,7 @@
 	/* DP Main Stream Attributes */
 	vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
 	tc_write(DP0_VIDSYNCDELAY,
-		 (0x003e << 16) |	/* thresh_dly */
+		 (max_tu_symbol << 16) |	/* thresh_dly */
 		 (vid_sync_dly << 0));
 
 	tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -705,14 +722,8 @@
 	tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
 		 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
 
-	/*
-	 * Recommended maximum number of symbols transferred in a transfer unit:
-	 * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
-	 *              (output active video bandwidth in bytes))
-	 * Must be less than tu_size.
-	 */
-	max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
-	tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+	tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+			   BPC_8);
 
 	return 0;
 err:
@@ -808,8 +819,6 @@
 	unsigned int rate;
 	u32 dp_phy_ctrl;
 	int timeout;
-	bool aligned;
-	bool ready;
 	u32 value;
 	int ret;
 	u8 tmp[8];
@@ -954,16 +963,15 @@
 		ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
 		if (ret < 0)
 			goto err_dpcd_read;
-		ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
-				     DP_CHANNEL_EQ_BITS));      /* Lane0 */
-		aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
-	} while ((--timeout) && !(ready && aligned));
+	} while ((--timeout) &&
+		 !(drm_dp_channel_eq_ok(tmp + 2,  tc->link.base.num_lanes)));
 
 	if (timeout == 0) {
 		/* Read DPCD 0x200-0x201 */
 		ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
 		if (ret < 0)
 			goto err_dpcd_read;
+		dev_err(dev, "channel(s) EQ not ok\n");
 		dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
 		dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
 			 tmp[1]);
@@ -974,10 +982,6 @@
 		dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
 			 tmp[6]);
 
-		if (!ready)
-			dev_err(dev, "Lane0/1 not ready\n");
-		if (!aligned)
-			dev_err(dev, "Lane0/1 not aligned\n");
 		return -EAGAIN;
 	}
 
@@ -1099,7 +1103,10 @@
 static int tc_connector_mode_valid(struct drm_connector *connector,
 				   struct drm_display_mode *mode)
 {
-	/* Accept any mode */
+	/* DPI interface clock limitation: upto 154 MHz */
+	if (mode->clock > 154000)
+		return MODE_CLOCK_HIGH;
+
 	return MODE_OK;
 }
 
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 71d712f..b16f1d6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1225,7 +1225,7 @@
 		return;
 
 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
-		if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+		if (!new_crtc_state->active)
 			continue;
 
 		ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0737400..e561663 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1809,6 +1809,10 @@
 
 	if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
 		DRM_INFO("Cannot find any crtc or sizes\n");
+
+		/* First time: disable all crtc's.. */
+		if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+			restore_fbdev_mode(fb_helper);
 		return -EAGAIN;
 	}
 
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3c31843..3551208 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -282,6 +282,7 @@
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
 				    int type, unsigned int resolution)
 {
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
 	if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@
 	port->type = type;
 
 	emulate_monitor_status_change(vgpu);
+	vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4427be1..940cdaa 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -496,6 +496,12 @@
 		goto err_unpin_mm;
 	}
 
+	ret = intel_gvt_generate_request(workload);
+	if (ret) {
+		gvt_vgpu_err("fail to generate request\n");
+		goto err_unpin_mm;
+	}
+
 	ret = prepare_shadow_batch_buffer(workload);
 	if (ret) {
 		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2801d70..8e33114 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -311,9 +311,9 @@
 
 #define GTT_HAW 46
 
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
 
 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a5bed2e..44cd5ff 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1381,40 +1381,6 @@
 	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
 }
 
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
-		void *p_data, unsigned int bytes)
-{
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	u32 v = *(u32 *)p_data;
-
-	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
-		return intel_vgpu_default_mmio_write(vgpu,
-				offset, p_data, bytes);
-
-	switch (offset) {
-	case 0x4ddc:
-		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-		vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
-		break;
-	case 0x42080:
-		/* bypass WaCompressedResourceDisplayNewHashMode */
-		vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
-		break;
-	case 0xe194:
-		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-		vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
-		break;
-	case 0x7014:
-		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-		vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
@@ -1671,8 +1637,8 @@
 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
 		NULL, NULL);
-	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
-		 skl_misc_ctl_write);
+	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+		 NULL, NULL);
 	MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2530,7 @@
 	MMIO_D(0x6e570, D_BDW_PLUS);
 	MMIO_D(0x65f10, D_BDW_PLUS);
 
-	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
-		 skl_misc_ctl_write);
+	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2580,8 @@
 	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
 	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
 	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-	MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
-	MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+	MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+	MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
 	MMIO_D(0x45504, D_SKL_PLUS);
 	MMIO_D(0x45520, D_SKL_PLUS);
 	MMIO_D(0x46000, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f6ded47..3ac1dc9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -140,9 +140,10 @@
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	enum intel_engine_id ring_id = req->engine->id;
 	struct intel_vgpu_workload *workload;
+	unsigned long flags;
 
 	if (!is_gvt_request(req)) {
-		spin_lock_bh(&scheduler->mmio_context_lock);
+		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
 		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
 		    scheduler->engine_owner[ring_id]) {
 			/* Switch ring from vGPU to host. */
@@ -150,7 +151,7 @@
 					      NULL, ring_id);
 			scheduler->engine_owner[ring_id] = NULL;
 		}
-		spin_unlock_bh(&scheduler->mmio_context_lock);
+		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
 
 		return NOTIFY_OK;
 	}
@@ -161,7 +162,7 @@
 
 	switch (action) {
 	case INTEL_CONTEXT_SCHEDULE_IN:
-		spin_lock_bh(&scheduler->mmio_context_lock);
+		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
 			/* Switch ring from host to vGPU or vGPU to vGPU. */
 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,7 +171,7 @@
 		} else
 			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
 				      ring_id, workload->vgpu->id);
-		spin_unlock_bh(&scheduler->mmio_context_lock);
+		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
 		atomic_set(&workload->shadow_ctx_active, 1);
 		break;
 	case INTEL_CONTEXT_SCHEDULE_OUT:
@@ -253,7 +254,6 @@
 	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
 	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-	struct drm_i915_gem_request *rq;
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_ring *ring;
 	int ret;
@@ -299,6 +299,26 @@
 	ret = populate_shadow_context(workload);
 	if (ret)
 		goto err_unpin;
+	workload->shadowed = true;
+	return 0;
+
+err_unpin:
+	engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+	release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+	return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+	int ring_id = workload->ring_id;
+	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+	struct drm_i915_gem_request *rq;
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+	int ret;
 
 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
 	if (IS_ERR(rq)) {
@@ -313,14 +333,11 @@
 	ret = copy_workload_to_ring_buffer(workload);
 	if (ret)
 		goto err_unpin;
-	workload->shadowed = true;
 	return 0;
 
 err_unpin:
 	engine->context_unpin(engine, shadow_ctx);
-err_shadow:
 	release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2d694f6..b9f8722 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,4 +142,7 @@
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
 
 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
index e299385..888b7d3 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -52,7 +52,8 @@
 
 	if (has_transparent_hugepage()) {
 		struct super_block *sb = gemfs->mnt_sb;
-		char options[] = "huge=within_size";
+		/* FIXME: Disabled until we get W/A for read BW issue. */
+		char options[] = "huge=never";
 		int flags = 0;
 		int err;
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7bc60c8..6c7f8bc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1736,7 +1736,7 @@
 int intel_backlight_device_register(struct intel_connector *connector);
 void intel_backlight_device_unregister(struct intel_connector *connector);
 #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
 {
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b8af351..ea96682 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -697,10 +697,8 @@
 
 	/* Due to peculiar init order wrt to hpd handling this is separate. */
 	if (drm_fb_helper_initial_config(&ifbdev->helper,
-					 ifbdev->preferred_bpp)) {
+					 ifbdev->preferred_bpp))
 		intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-		intel_fbdev_fini(to_i915(ifbdev->helper.dev));
-	}
 }
 
 void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@
 {
 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
-	if (ifbdev)
+	if (!ifbdev)
+		return;
+
+	intel_fbdev_sync(ifbdev);
+	if (ifbdev->vma)
 		drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index eb58271..49fdf09 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -438,7 +438,9 @@
 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
 {
 	return (i + 1 < num &&
-		!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+		msgs[i].addr == msgs[i + 1].addr &&
+		!(msgs[i].flags & I2C_M_RD) &&
+		(msgs[i].len == 1 || msgs[i].len == 2) &&
 		(msgs[i + 1].flags & I2C_M_RD));
 }
 
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 93c7e3f..17d2f3a 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -133,9 +133,16 @@
 			plane_disabling = true;
 	}
 
-	if (plane_disabling) {
-		drm_atomic_helper_wait_for_vblanks(dev, state);
+	/*
+	 * The flip done wait is only strictly required by imx-drm if a deferred
+	 * plane disable is in-flight. As the core requires blocking commits
+	 * to wait for the flip it is done here unconditionally. This keeps the
+	 * workitem around a bit longer than required for the majority of
+	 * non-blocking commits, but we accept that for the sake of simplicity.
+	 */
+	drm_atomic_helper_wait_for_flip_done(dev, state);
 
+	if (plane_disabling) {
 		for_each_old_plane_in_state(state, plane, old_plane_state, i)
 			ipu_plane_disable_deferred(plane);
 
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c226da1..a349cb6 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -35,6 +35,7 @@
 
 config DRM_OMAP_PANEL_DPI
 	tristate "Generic DPI panel"
+	depends on BACKLIGHT_CLASS_DEVICE
 	help
 	  Driver for generic DPI panels.
 
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index daf286f..ca1e3b48 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -566,8 +566,8 @@
 }
 
 static const struct soc_device_attribute dpi_soc_devices[] = {
-	{ .family = "OMAP3[456]*" },
-	{ .family = "[AD]M37*" },
+	{ .machine = "OMAP3[456]*" },
+	{ .machine = "[AD]M37*" },
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index d86873f..e626edd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -352,7 +352,7 @@
 {
 	const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
 			 CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
-	unsigned int ret;
+	int ret;
 
 	core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
 		"omap4", caps, CEC_MAX_LOG_ADDRS);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 62e4511..b06f995 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -886,25 +886,36 @@
 	bool audio_use_mclk;
 };
 
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
 	.cts_swmode = false,
 	.audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
 	.cts_swmode = true,
 	.audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
 	.cts_swmode = true,
 	.audio_use_mclk = true,
 };
 
 static const struct soc_device_attribute hdmi4_soc_devices[] = {
-	{ .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
-	{ .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
-	{ .family = "OMAP4",			  .data = &hdmi4_es3_features },
+	{
+		.machine = "OMAP4430",
+		.revision = "ES1.?",
+		.data = &hdmi4430_es1_features,
+	},
+	{
+		.machine = "OMAP4430",
+		.revision = "ES2.?",
+		.data = &hdmi4430_es2_features,
+	},
+	{
+		.family = "OMAP4",
+		.data = &hdmi4_features,
+	},
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 1dd3daf..c60a85e 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -638,7 +638,8 @@
 		match = of_match_node(dmm_of_match, dev->dev.of_node);
 		if (!match) {
 			dev_err(&dev->dev, "failed to find matching device node\n");
-			return -ENODEV;
+			ret = -ENODEV;
+			goto fail;
 		}
 
 		omap_dmm->plat_data = match->data;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 898f9a0..a651191 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -5451,28 +5451,6 @@
 	WREG32(VM_INVALIDATE_REQUEST, 0x1);
 }
 
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
-	int i;
-	uint32_t sh_mem_bases, sh_mem_config;
-
-	sh_mem_bases = 0x6000 | 0x6000 << 16;
-	sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
-	sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
-	mutex_lock(&rdev->srbm_mutex);
-	for (i = 8; i < 16; i++) {
-		cik_srbm_select(rdev, 0, 0, 0, i);
-		/* CP and shaders */
-		WREG32(SH_MEM_CONFIG, sh_mem_config);
-		WREG32(SH_MEM_APE1_BASE, 1);
-		WREG32(SH_MEM_APE1_LIMIT, 0);
-		WREG32(SH_MEM_BASES, sh_mem_bases);
-	}
-	cik_srbm_select(rdev, 0, 0, 0, 0);
-	mutex_unlock(&rdev->srbm_mutex);
-}
-
 /**
  * cik_pcie_gart_enable - gart enable
  *
@@ -5586,8 +5564,6 @@
 	cik_srbm_select(rdev, 0, 0, 0, 0);
 	mutex_unlock(&rdev->srbm_mutex);
 
-	cik_pcie_init_compute_vmid(rdev);
-
 	cik_pcie_gart_tlb_flush(rdev);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(rdev->mc.gtt_size >> 20),
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index b15755b..b1fe063 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -1285,8 +1285,6 @@
 		goto err_pllref;
 	}
 
-	pm_runtime_enable(dev);
-
 	dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
 	dsi->dsi_host.dev = dev;
 	ret = mipi_dsi_host_register(&dsi->dsi_host);
@@ -1301,6 +1299,7 @@
 	}
 
 	dev_set_drvdata(dev, dsi);
+	pm_runtime_enable(dev);
 	return 0;
 
 err_mipi_dsi_host:
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b0551aa..8d7172e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1062,7 +1062,6 @@
 }
 EXPORT_SYMBOL(ttm_pool_unpopulate);
 
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
 {
 	unsigned i, j;
@@ -1133,7 +1132,6 @@
 	ttm_pool_unpopulate(&tt->ttm);
 }
 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-#endif
 
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a27d852..a0cc1bc 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -490,7 +490,7 @@
 		if (b == -1)
 			goto err;
 
-		k->ptr[i] = PTR(ca->buckets[b].gen,
+		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
 				bucket_to_sector(c, b),
 				ca->sb.nr_this_dev);
 
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 11c5503..81e8dc3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -807,7 +807,10 @@
 	c->shrink.scan_objects = bch_mca_scan;
 	c->shrink.seeks = 4;
 	c->shrink.batch = c->btree_pages * 2;
-	register_shrinker(&c->shrink);
+
+	if (register_shrinker(&c->shrink))
+		pr_warn("bcache: %s: could not register shrinker",
+				__func__);
 
 	return 0;
 }
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 41c238f..f9d3917 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -585,7 +585,7 @@
 		return false;
 
 	for (i = 0; i < KEY_PTRS(l); i++)
-		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+		if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
 		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
 			return false;
 
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 02a98dd..a87165c 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -170,6 +170,11 @@
 		 * find a sequence of buckets with valid journal entries
 		 */
 		for (i = 0; i < ca->sb.njournal_buckets; i++) {
+			/*
+			 * We must try the index l with ZERO first for
+			 * correctness due to the scenario that the journal
+			 * bucket is circular buffer which might have wrapped
+			 */
 			l = (i * 2654435769U) % ca->sb.njournal_buckets;
 
 			if (test_bit(l, bitmap))
@@ -507,7 +512,7 @@
 			continue;
 
 		ja->cur_idx = next;
-		k->ptr[n++] = PTR(0,
+		k->ptr[n++] = MAKE_PTR(0,
 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
 				  ca->sb.nr_this_dev);
 	}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 3a7aed7..643c3021 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -708,16 +708,15 @@
 {
 	struct search *s = container_of(cl, struct search, cl);
 	struct bio *bio = &s->bio.bio;
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
 	/*
-	 * If cache device is dirty (dc->has_dirty is non-zero), then
-	 * recovery a failed read request from cached device may get a
-	 * stale data back. So read failure recovery is only permitted
-	 * when cache device is clean.
+	 * If read request hit dirty data (s->read_dirty_data is true),
+	 * then recovery a failed read request from cached device may
+	 * get a stale data back. So read failure recovery is only
+	 * permitted when read request hit clean data in cache device,
+	 * or when cache read race happened.
 	 */
-	if (s->recoverable &&
-	    (dc && !atomic_read(&dc->has_dirty))) {
+	if (s->recoverable && !s->read_dirty_data) {
 		/* Retry from the backing device: */
 		trace_bcache_read_retry(s->orig_bio);
 
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ea80ff4..ccfa98a 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -122,6 +122,10 @@
 	struct device_attribute force_ro;
 	struct device_attribute power_ro_lock;
 	int	area_type;
+
+	/* debugfs files (only in main mmc_blk_data) */
+	struct dentry *status_dentry;
+	struct dentry *ext_csd_dentry;
 };
 
 /* Device type for RPMB character devices */
@@ -233,9 +237,14 @@
 
 	/* Dispatch locking to the block layer */
 	req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+	if (IS_ERR(req)) {
+		count = PTR_ERR(req);
+		goto out_put;
+	}
 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
 	blk_execute_rq(mq->queue, NULL, req, 0);
 	ret = req_to_mmc_queue_req(req)->drv_op_result;
+	blk_put_request(req);
 
 	if (!ret) {
 		pr_info("%s: Locking boot partition ro until next power on\n",
@@ -248,7 +257,7 @@
 				set_disk_ro(part_md->disk, 1);
 			}
 	}
-
+out_put:
 	mmc_blk_put(md);
 	return count;
 }
@@ -624,6 +633,10 @@
 	req = blk_get_request(mq->queue,
 		idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
 		__GFP_RECLAIM);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto cmd_done;
+	}
 	idatas[0] = idata;
 	req_to_mmc_queue_req(req)->drv_op =
 		rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
@@ -691,6 +704,10 @@
 	req = blk_get_request(mq->queue,
 		idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
 		__GFP_RECLAIM);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto cmd_err;
+	}
 	req_to_mmc_queue_req(req)->drv_op =
 		rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
 	req_to_mmc_queue_req(req)->drv_op_data = idata;
@@ -2550,6 +2567,8 @@
 
 	/* Ask the block layer about the card status */
 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
 	blk_execute_rq(mq->queue, NULL, req, 0);
 	ret = req_to_mmc_queue_req(req)->drv_op_result;
@@ -2557,6 +2576,7 @@
 		*val = ret;
 		ret = 0;
 	}
+	blk_put_request(req);
 
 	return ret;
 }
@@ -2583,10 +2603,15 @@
 
 	/* Ask the block layer for the EXT CSD */
 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto out_free;
+	}
 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
 	req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
 	blk_execute_rq(mq->queue, NULL, req, 0);
 	err = req_to_mmc_queue_req(req)->drv_op_result;
+	blk_put_request(req);
 	if (err) {
 		pr_err("FAILED %d\n", err);
 		goto out_free;
@@ -2632,7 +2657,7 @@
 	.llseek		= default_llseek,
 };
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
 	struct dentry *root;
 
@@ -2642,28 +2667,53 @@
 	root = card->debugfs_root;
 
 	if (mmc_card_mmc(card) || mmc_card_sd(card)) {
-		if (!debugfs_create_file("status", S_IRUSR, root, card,
-					 &mmc_dbg_card_status_fops))
+		md->status_dentry =
+			debugfs_create_file("status", S_IRUSR, root, card,
+					    &mmc_dbg_card_status_fops);
+		if (!md->status_dentry)
 			return -EIO;
 	}
 
 	if (mmc_card_mmc(card)) {
-		if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
-					 &mmc_dbg_ext_csd_fops))
+		md->ext_csd_dentry =
+			debugfs_create_file("ext_csd", S_IRUSR, root, card,
+					    &mmc_dbg_ext_csd_fops);
+		if (!md->ext_csd_dentry)
 			return -EIO;
 	}
 
 	return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+				   struct mmc_blk_data *md)
+{
+	if (!card->debugfs_root)
+		return;
+
+	if (!IS_ERR_OR_NULL(md->status_dentry)) {
+		debugfs_remove(md->status_dentry);
+		md->status_dentry = NULL;
+	}
+
+	if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
+		debugfs_remove(md->ext_csd_dentry);
+		md->ext_csd_dentry = NULL;
+	}
+}
 
 #else
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
 	return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+				   struct mmc_blk_data *md)
+{
+}
+
 #endif /* CONFIG_DEBUG_FS */
 
 static int mmc_blk_probe(struct mmc_card *card)
@@ -2703,7 +2753,7 @@
 	}
 
 	/* Add two debugfs entries */
-	mmc_blk_add_debugfs(card);
+	mmc_blk_add_debugfs(card, md);
 
 	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
 	pm_runtime_use_autosuspend(&card->dev);
@@ -2729,6 +2779,7 @@
 {
 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
 
+	mmc_blk_remove_debugfs(card, md);
 	mmc_blk_remove_parts(card, md);
 	pm_runtime_get_sync(&card->dev);
 	mmc_claim_host(card->host);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index a4b49e2..7586ff2 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -157,6 +157,9 @@
 		return ret;
 
 	ret = host->bus_ops->suspend(host);
+	if (ret)
+		pm_generic_resume(dev);
+
 	return ret;
 }
 
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 01e459a..0f4a7d7 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -314,4 +314,5 @@
 void mmc_remove_card_debugfs(struct mmc_card *card)
 {
 	debugfs_remove_recursive(card->debugfs_root);
+	card->debugfs_root = NULL;
 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a552f61..d209fb4 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -781,7 +781,7 @@
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
-MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
 	card->ext_csd.device_life_time_est_typ_a,
 	card->ext_csd.device_life_time_est_typ_b);
@@ -791,7 +791,7 @@
 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 45bf78f..62b84dd 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -675,7 +675,7 @@
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 
 static ssize_t mmc_dsr_show(struct device *dev,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3fb7d2e..c283291 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -29,6 +29,9 @@
 #define CORE_VERSION_MAJOR_MASK		(0xf << CORE_VERSION_MAJOR_SHIFT)
 #define CORE_VERSION_MINOR_MASK		0xff
 
+#define CORE_MCI_GENERICS		0x70
+#define SWITCHABLE_SIGNALING_VOLTAGE	BIT(29)
+
 #define CORE_HC_MODE		0x78
 #define HC_MODE_EN		0x1
 #define CORE_POWER		0x0
@@ -1028,12 +1031,23 @@
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
 	bool done = false;
+	u32 val;
 
 	pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
 			mmc_hostname(host->mmc), __func__, req_type,
 			msm_host->curr_pwr_state, msm_host->curr_io_level);
 
 	/*
+	 * The power interrupt will not be generated for signal voltage
+	 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+	 */
+	val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+	if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
+	    !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
+		return;
+	}
+
+	/*
 	 * The IRQ for request type IO High/LOW will be generated when -
 	 * there is a state change in 1.8V enable bit (bit 3) of
 	 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 2f14334..e9290a3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
@@ -3651,17 +3652,6 @@
 	spin_lock_init(&host->lock);
 
 	/*
-	 * Maximum number of segments. Depends on if the hardware
-	 * can do scatter/gather or not.
-	 */
-	if (host->flags & SDHCI_USE_ADMA)
-		mmc->max_segs = SDHCI_MAX_SEGS;
-	else if (host->flags & SDHCI_USE_SDMA)
-		mmc->max_segs = 1;
-	else /* PIO */
-		mmc->max_segs = SDHCI_MAX_SEGS;
-
-	/*
 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
 	 * is less anyway.
@@ -3669,6 +3659,24 @@
 	mmc->max_req_size = 524288;
 
 	/*
+	 * Maximum number of segments. Depends on if the hardware
+	 * can do scatter/gather or not.
+	 */
+	if (host->flags & SDHCI_USE_ADMA) {
+		mmc->max_segs = SDHCI_MAX_SEGS;
+	} else if (host->flags & SDHCI_USE_SDMA) {
+		mmc->max_segs = 1;
+		if (swiotlb_max_segment()) {
+			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
+						IO_TLB_SEGSIZE;
+			mmc->max_req_size = min(mmc->max_req_size,
+						max_req_size);
+		}
+	} else { /* PIO */
+		mmc->max_segs = SDHCI_MAX_SEGS;
+	}
+
+	/*
 	 * Maximum segment size. Could be one segment with the maximum number
 	 * of bytes. When doing hardware scatter/gather, each entry cannot
 	 * be larger than 64 KiB though.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 25da74d..f837d66 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1449,19 +1449,19 @@
 	int srcu_idx, ret;
 	u8 data[16] = { 0, };
 
+	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+	if (unlikely(!ns))
+		return -EWOULDBLOCK;
+
 	put_unaligned_le64(key, &data[0]);
 	put_unaligned_le64(sa_key, &data[8]);
 
 	memset(&c, 0, sizeof(c));
 	c.common.opcode = op;
-	c.common.nsid = cpu_to_le32(head->ns_id);
+	c.common.nsid = cpu_to_le32(ns->head->ns_id);
 	c.common.cdw10[0] = cpu_to_le32(cdw10);
 
-	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
-	if (unlikely(!ns))
-		ret = -EWOULDBLOCK;
-	else
-		ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
 	nvme_put_ns_from_disk(head, srcu_idx);
 	return ret;
 }
@@ -2961,8 +2961,6 @@
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-	struct nvme_ns_head *head = ns->head;
-
 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
 		return;
 
@@ -2980,15 +2978,14 @@
 
 	mutex_lock(&ns->ctrl->subsys->lock);
 	nvme_mpath_clear_current_path(ns);
-	if (head)
-		list_del_rcu(&ns->siblings);
+	list_del_rcu(&ns->siblings);
 	mutex_unlock(&ns->ctrl->subsys->lock);
 
 	mutex_lock(&ns->ctrl->namespaces_mutex);
 	list_del_init(&ns->list);
 	mutex_unlock(&ns->ctrl->namespaces_mutex);
 
-	synchronize_srcu(&head->srcu);
+	synchronize_srcu(&ns->head->srcu);
 	nvme_put_ns(ns);
 }
 
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 42232e7..9ba6149 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -156,4 +156,34 @@
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 
+static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+		struct request *rq)
+{
+	struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+	/*
+	 * We cannot accept any other command until the connect command has
+	 * completed, so only allow connect to pass.
+	 */
+	if (!blk_rq_is_passthrough(rq) ||
+	    cmd->common.opcode != nvme_fabrics_command ||
+	    cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+		/*
+		 * Reconnecting state means transport disruption, which can take
+		 * a long time and even might fail permanently, fail fast to
+		 * give upper layers a chance to failover.
+		 * Deleting state means that the ctrl will never accept commands
+		 * again, fail it permanently.
+		 */
+		if (ctrl->state == NVME_CTRL_RECONNECTING ||
+		    ctrl->state == NVME_CTRL_DELETING) {
+			nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+			return BLK_STS_IOERR;
+		}
+		return BLK_STS_RESOURCE; /* try again later */
+	}
+
+	return BLK_STS_OK;
+}
+
 #endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7ab0be5..0a8af4d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -31,7 +31,8 @@
 
 
 enum nvme_fc_queue_flags {
-	NVME_FC_Q_CONNECTED = (1 << 0),
+	NVME_FC_Q_CONNECTED = 0,
+	NVME_FC_Q_LIVE,
 };
 
 #define NVMEFC_QUEUE_DELAY	3		/* ms units */
@@ -1927,6 +1928,7 @@
 	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
 		return;
 
+	clear_bit(NVME_FC_Q_LIVE, &queue->flags);
 	/*
 	 * Current implementation never disconnects a single queue.
 	 * It always terminates a whole association. So there is never
@@ -1934,7 +1936,6 @@
 	 */
 
 	queue->connection_id = 0;
-	clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
 }
 
 static void
@@ -2013,6 +2014,8 @@
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
 			break;
+
+		set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
 	}
 
 	return ret;
@@ -2320,6 +2323,14 @@
 	return BLK_STS_RESOURCE;
 }
 
+static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+		struct request *rq)
+{
+	if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+	return BLK_STS_OK;
+}
+
 static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 			const struct blk_mq_queue_data *bd)
@@ -2335,6 +2346,10 @@
 	u32 data_len;
 	blk_status_t ret;
 
+	ret = nvme_fc_is_ready(queue, rq);
+	if (unlikely(ret))
+		return ret;
+
 	ret = nvme_setup_cmd(ns, rq, sqe);
 	if (ret)
 		return ret;
@@ -2727,6 +2742,8 @@
 	if (ret)
 		goto out_disconnect_admin_queue;
 
+	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
 	/*
 	 * Check controller capabilities
 	 *
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 78d9215..1218a9f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -131,7 +131,7 @@
 		bio->bi_opf |= REQ_NVME_MPATH;
 		ret = direct_make_request(bio);
 	} else if (!list_empty_careful(&head->list)) {
-		dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+		dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
 
 		spin_lock_irq(&head->requeue_lock);
 		bio_list_add(&head->requeue_list, bio);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c0873a6..ea1aa52 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -114,7 +114,7 @@
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  * found empirically.
  */
-#define NVME_QUIRK_DELAY_AMOUNT		2000
+#define NVME_QUIRK_DELAY_AMOUNT		2300
 
 enum nvme_ctrl_state {
 	NVME_CTRL_NEW,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a11cfd4..f5800c3c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1759,6 +1759,7 @@
 			dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
 			dev->host_mem_descs, dev->host_mem_descs_dma);
 	dev->host_mem_descs = NULL;
+	dev->nr_host_mem_descs = 0;
 }
 
 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
@@ -1787,7 +1788,7 @@
 	if (!bufs)
 		goto out_free_descs;
 
-	for (size = 0; size < preferred; size += len) {
+	for (size = 0; size < preferred && i < max_entries; size += len) {
 		dma_addr_t dma_addr;
 
 		len = min_t(u64, chunk_size, preferred - size);
@@ -2428,7 +2429,7 @@
 	return -ENODEV;
 }
 
-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 {
 	if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
 		/*
@@ -2443,6 +2444,14 @@
 		    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
 		     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
 			return NVME_QUIRK_NO_DEEPEST_PS;
+	} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+		/*
+		 * Samsung SSD 960 EVO drops off the PCIe bus after system
+		 * suspend on a Ryzen board, ASUS PRIME B350M-A.
+		 */
+		if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+		    dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
+			return NVME_QUIRK_NO_APST;
 	}
 
 	return 0;
@@ -2482,7 +2491,7 @@
 	if (result)
 		goto unmap;
 
-	quirks |= check_dell_samsung_bug(pdev);
+	quirks |= check_vendor_combination_bug(pdev);
 
 	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
 			quirks);
@@ -2665,6 +2674,8 @@
 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
+		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
 	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
 	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4f9bf2f..37af565 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <rdma/mr_pool.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/atomic.h>
@@ -59,6 +60,9 @@
 	struct nvme_request	req;
 	struct ib_mr		*mr;
 	struct nvme_rdma_qe	sqe;
+	union nvme_result	result;
+	__le16			status;
+	refcount_t		ref;
 	struct ib_sge		sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
 	u32			num_sge;
 	int			nents;
@@ -73,11 +77,11 @@
 enum nvme_rdma_queue_flags {
 	NVME_RDMA_Q_ALLOCATED		= 0,
 	NVME_RDMA_Q_LIVE		= 1,
+	NVME_RDMA_Q_TR_READY		= 2,
 };
 
 struct nvme_rdma_queue {
 	struct nvme_rdma_qe	*rsp_ring;
-	atomic_t		sig_count;
 	int			queue_size;
 	size_t			cmnd_capsule_len;
 	struct nvme_rdma_ctrl	*ctrl;
@@ -258,32 +262,6 @@
 	return ret;
 }
 
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
-	struct nvme_rdma_ctrl *ctrl = data;
-	struct nvme_rdma_device *dev = ctrl->device;
-	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-	int ret = 0;
-
-	if (WARN_ON_ONCE(!req->mr))
-		return 0;
-
-	ib_dereg_mr(req->mr);
-
-	req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-			ctrl->max_fr_pages);
-	if (IS_ERR(req->mr)) {
-		ret = PTR_ERR(req->mr);
-		req->mr = NULL;
-		goto out;
-	}
-
-	req->mr->need_inval = false;
-
-out:
-	return ret;
-}
-
 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
 		struct request *rq, unsigned int hctx_idx)
 {
@@ -293,9 +271,6 @@
 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
 	struct nvme_rdma_device *dev = queue->device;
 
-	if (req->mr)
-		ib_dereg_mr(req->mr);
-
 	nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
 			DMA_TO_DEVICE);
 }
@@ -317,21 +292,9 @@
 	if (ret)
 		return ret;
 
-	req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-			ctrl->max_fr_pages);
-	if (IS_ERR(req->mr)) {
-		ret = PTR_ERR(req->mr);
-		goto out_free_qe;
-	}
-
 	req->queue = queue;
 
 	return 0;
-
-out_free_qe:
-	nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
-			DMA_TO_DEVICE);
-	return -ENOMEM;
 }
 
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -428,10 +391,23 @@
 
 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 {
-	struct nvme_rdma_device *dev = queue->device;
-	struct ib_device *ibdev = dev->dev;
+	struct nvme_rdma_device *dev;
+	struct ib_device *ibdev;
 
-	rdma_destroy_qp(queue->cm_id);
+	if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+		return;
+
+	dev = queue->device;
+	ibdev = dev->dev;
+
+	ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+	/*
+	 * The cm_id object might have been destroyed during RDMA connection
+	 * establishment error flow to avoid getting other cma events, thus
+	 * the destruction of the QP shouldn't use rdma_cm API.
+	 */
+	ib_destroy_qp(queue->qp);
 	ib_free_cq(queue->ib_cq);
 
 	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
@@ -440,6 +416,12 @@
 	nvme_rdma_dev_put(dev);
 }
 
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+	return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+		     ibdev->attrs.max_fast_reg_page_list_len);
+}
+
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
 	struct ib_device *ibdev;
@@ -482,8 +464,24 @@
 		goto out_destroy_qp;
 	}
 
+	ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+			      queue->queue_size,
+			      IB_MR_TYPE_MEM_REG,
+			      nvme_rdma_get_max_fr_pages(ibdev));
+	if (ret) {
+		dev_err(queue->ctrl->ctrl.device,
+			"failed to initialize MR pool sized %d for QID %d\n",
+			queue->queue_size, idx);
+		goto out_destroy_ring;
+	}
+
+	set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
 	return 0;
 
+out_destroy_ring:
+	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+			    sizeof(struct nvme_completion), DMA_FROM_DEVICE);
 out_destroy_qp:
 	rdma_destroy_qp(queue->cm_id);
 out_destroy_ib_cq:
@@ -510,7 +508,6 @@
 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
 
 	queue->queue_size = queue_size;
-	atomic_set(&queue->sig_count, 0);
 
 	queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
 			RDMA_PS_TCP, IB_QPT_RC);
@@ -546,6 +543,7 @@
 
 out_destroy_cm_id:
 	rdma_destroy_id(queue->cm_id);
+	nvme_rdma_destroy_queue_ib(queue);
 	return ret;
 }
 
@@ -756,8 +754,7 @@
 
 	ctrl->device = ctrl->queues[0].device;
 
-	ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-		ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+	ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
 	if (new) {
 		ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
@@ -771,10 +768,6 @@
 			error = PTR_ERR(ctrl->ctrl.admin_q);
 			goto out_free_tagset;
 		}
-	} else {
-		error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
-		if (error)
-			goto out_free_queue;
 	}
 
 	error = nvme_rdma_start_queue(ctrl, 0);
@@ -854,10 +847,6 @@
 			goto out_free_tag_set;
 		}
 	} else {
-		ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
-		if (ret)
-			goto out_free_io_queues;
-
 		blk_mq_update_nr_hw_queues(&ctrl->tag_set,
 			ctrl->ctrl.queue_count - 1);
 	}
@@ -1018,8 +1007,18 @@
 
 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-	if (unlikely(wc->status != IB_WC_SUCCESS))
+	struct nvme_rdma_request *req =
+		container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+	struct request *rq = blk_mq_rq_from_pdu(req);
+
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
 		nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+		return;
+	}
+
+	if (refcount_dec_and_test(&req->ref))
+		nvme_end_request(rq, req->status, req->result);
+
 }
 
 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1030,7 +1029,7 @@
 		.opcode		    = IB_WR_LOCAL_INV,
 		.next		    = NULL,
 		.num_sge	    = 0,
-		.send_flags	    = 0,
+		.send_flags	    = IB_SEND_SIGNALED,
 		.ex.invalidate_rkey = req->mr->rkey,
 	};
 
@@ -1044,22 +1043,15 @@
 		struct request *rq)
 {
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 	struct nvme_rdma_device *dev = queue->device;
 	struct ib_device *ibdev = dev->dev;
-	int res;
 
 	if (!blk_rq_bytes(rq))
 		return;
 
-	if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
-		res = nvme_rdma_inv_rkey(queue, req);
-		if (unlikely(res < 0)) {
-			dev_err(ctrl->ctrl.device,
-				"Queueing INV WR for rkey %#x failed (%d)\n",
-				req->mr->rkey, res);
-			nvme_rdma_error_recovery(queue->ctrl);
-		}
+	if (req->mr) {
+		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+		req->mr = NULL;
 	}
 
 	ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
@@ -1118,12 +1110,18 @@
 	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
 	int nr;
 
+	req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+	if (WARN_ON_ONCE(!req->mr))
+		return -EAGAIN;
+
 	/*
 	 * Align the MR to a 4K page size to match the ctrl page size and
 	 * the block virtual boundary.
 	 */
 	nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
 	if (unlikely(nr < count)) {
+		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+		req->mr = NULL;
 		if (nr < 0)
 			return nr;
 		return -EINVAL;
@@ -1142,8 +1140,6 @@
 			     IB_ACCESS_REMOTE_READ |
 			     IB_ACCESS_REMOTE_WRITE;
 
-	req->mr->need_inval = true;
-
 	sg->addr = cpu_to_le64(req->mr->iova);
 	put_unaligned_le24(req->mr->length, sg->length);
 	put_unaligned_le32(req->mr->rkey, sg->key);
@@ -1163,7 +1159,7 @@
 
 	req->num_sge = 1;
 	req->inline_data = false;
-	req->mr->need_inval = false;
+	refcount_set(&req->ref, 2); /* send and recv completions */
 
 	c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1200,25 +1196,24 @@
 
 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-	if (unlikely(wc->status != IB_WC_SUCCESS))
+	struct nvme_rdma_qe *qe =
+		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+	struct nvme_rdma_request *req =
+		container_of(qe, struct nvme_rdma_request, sqe);
+	struct request *rq = blk_mq_rq_from_pdu(req);
+
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
 		nvme_rdma_wr_error(cq, wc, "SEND");
-}
+		return;
+	}
 
-/*
- * We want to signal completion at least every queue depth/2.  This returns the
- * largest power of two that is not above half of (queue size + 1) to optimize
- * (avoid divisions).
- */
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
-{
-	int limit = 1 << ilog2((queue->queue_size + 1) / 2);
-
-	return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
+	if (refcount_dec_and_test(&req->ref))
+		nvme_end_request(rq, req->status, req->result);
 }
 
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
 		struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
-		struct ib_send_wr *first, bool flush)
+		struct ib_send_wr *first)
 {
 	struct ib_send_wr wr, *bad_wr;
 	int ret;
@@ -1227,31 +1222,12 @@
 	sge->length = sizeof(struct nvme_command),
 	sge->lkey   = queue->device->pd->local_dma_lkey;
 
-	qe->cqe.done = nvme_rdma_send_done;
-
 	wr.next       = NULL;
 	wr.wr_cqe     = &qe->cqe;
 	wr.sg_list    = sge;
 	wr.num_sge    = num_sge;
 	wr.opcode     = IB_WR_SEND;
-	wr.send_flags = 0;
-
-	/*
-	 * Unsignalled send completions are another giant desaster in the
-	 * IB Verbs spec:  If we don't regularly post signalled sends
-	 * the send queue will fill up and only a QP reset will rescue us.
-	 * Would have been way to obvious to handle this in hardware or
-	 * at least the RDMA stack..
-	 *
-	 * Always signal the flushes. The magic request used for the flush
-	 * sequencer is not allocated in our driver's tagset and it's
-	 * triggered to be freed by blk_cleanup_queue(). So we need to
-	 * always mark it as signaled to ensure that the "wr_cqe", which is
-	 * embedded in request's payload, is not freed when __ib_process_cq()
-	 * calls wr_cqe->done().
-	 */
-	if (nvme_rdma_queue_sig_limit(queue) || flush)
-		wr.send_flags |= IB_SEND_SIGNALED;
+	wr.send_flags = IB_SEND_SIGNALED;
 
 	if (first)
 		first->next = &wr;
@@ -1301,6 +1277,12 @@
 	return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	if (unlikely(wc->status != IB_WC_SUCCESS))
+		nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
@@ -1319,10 +1301,12 @@
 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
 	nvme_rdma_set_sg_null(cmd);
 
+	sqe->cqe.done = nvme_rdma_async_done;
+
 	ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
 			DMA_TO_DEVICE);
 
-	ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+	ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
 	WARN_ON_ONCE(ret);
 }
 
@@ -1343,14 +1327,34 @@
 	}
 	req = blk_mq_rq_to_pdu(rq);
 
-	if (rq->tag == tag)
-		ret = 1;
+	req->status = cqe->status;
+	req->result = cqe->result;
 
-	if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
-	    wc->ex.invalidate_rkey == req->mr->rkey)
-		req->mr->need_inval = false;
+	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+		if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+			dev_err(queue->ctrl->ctrl.device,
+				"Bogus remote invalidation for rkey %#x\n",
+				req->mr->rkey);
+			nvme_rdma_error_recovery(queue->ctrl);
+		}
+	} else if (req->mr) {
+		ret = nvme_rdma_inv_rkey(queue, req);
+		if (unlikely(ret < 0)) {
+			dev_err(queue->ctrl->ctrl.device,
+				"Queueing INV WR for rkey %#x failed (%d)\n",
+				req->mr->rkey, ret);
+			nvme_rdma_error_recovery(queue->ctrl);
+		}
+		/* the local invalidation completion will end the request */
+		return 0;
+	}
 
-	nvme_end_request(rq, cqe->status, cqe->result);
+	if (refcount_dec_and_test(&req->ref)) {
+		if (rq->tag == tag)
+			ret = 1;
+		nvme_end_request(rq, req->status, req->result);
+	}
+
 	return ret;
 }
 
@@ -1591,31 +1595,11 @@
  * We cannot accept any other command until the Connect command has completed.
  */
 static inline blk_status_t
-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
 {
-	if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-		struct nvme_command *cmd = nvme_req(rq)->cmd;
-
-		if (!blk_rq_is_passthrough(rq) ||
-		    cmd->common.opcode != nvme_fabrics_command ||
-		    cmd->fabrics.fctype != nvme_fabrics_type_connect) {
-			/*
-			 * reconnecting state means transport disruption, which
-			 * can take a long time and even might fail permanently,
-			 * fail fast to give upper layers a chance to failover.
-			 * deleting state means that the ctrl will never accept
-			 * commands again, fail it permanently.
-			 */
-			if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
-			    queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
-				nvme_req(rq)->status = NVME_SC_ABORT_REQ;
-				return BLK_STS_IOERR;
-			}
-			return BLK_STS_RESOURCE; /* try again later */
-		}
-	}
-
-	return 0;
+	if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+	return BLK_STS_OK;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1627,14 +1611,13 @@
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
 	struct nvme_rdma_qe *sqe = &req->sqe;
 	struct nvme_command *c = sqe->data;
-	bool flush = false;
 	struct ib_device *dev;
 	blk_status_t ret;
 	int err;
 
 	WARN_ON_ONCE(rq->tag < 0);
 
-	ret = nvme_rdma_queue_is_ready(queue, rq);
+	ret = nvme_rdma_is_ready(queue, rq);
 	if (unlikely(ret))
 		return ret;
 
@@ -1656,13 +1639,13 @@
 		goto err;
 	}
 
+	sqe->cqe.done = nvme_rdma_send_done;
+
 	ib_dma_sync_single_for_device(dev, sqe->dma,
 			sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-	if (req_op(rq) == REQ_OP_FLUSH)
-		flush = true;
 	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-			req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+			req->mr ? &req->reg_wr.wr : NULL);
 	if (unlikely(err)) {
 		nvme_rdma_unmap_data(queue, rq);
 		goto err;
@@ -1810,7 +1793,6 @@
 	.submit_async_event	= nvme_rdma_submit_async_event,
 	.delete_ctrl		= nvme_rdma_delete_ctrl,
 	.get_address		= nvmf_get_address,
-	.reinit_request		= nvme_rdma_reinit_request,
 };
 
 static inline bool
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 664d301..5fd8603 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -533,15 +533,15 @@
 
 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
 
+	/* release the queue lookup reference on the completed IO */
+	nvmet_fc_tgt_q_put(queue);
+
 	spin_lock_irqsave(&queue->qlock, flags);
 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
 				struct nvmet_fc_defer_fcp_req, req_list);
 	if (!deferfcp) {
 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
 		spin_unlock_irqrestore(&queue->qlock, flags);
-
-		/* Release reference taken at queue lookup and fod allocation */
-		nvmet_fc_tgt_q_put(queue);
 		return;
 	}
 
@@ -760,6 +760,9 @@
 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
 				deferfcp->fcp_req);
 
+		/* release the queue lookup reference */
+		nvmet_fc_tgt_q_put(queue);
+
 		kfree(deferfcp);
 
 		spin_lock_irqsave(&queue->qlock, flags);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 96d3904..1e21b28 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -52,10 +52,15 @@
 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
 }
 
+enum nvme_loop_queue_flags {
+	NVME_LOOP_Q_LIVE	= 0,
+};
+
 struct nvme_loop_queue {
 	struct nvmet_cq		nvme_cq;
 	struct nvmet_sq		nvme_sq;
 	struct nvme_loop_ctrl	*ctrl;
+	unsigned long		flags;
 };
 
 static struct nvmet_port *nvmet_loop_port;
@@ -144,6 +149,14 @@
 	return BLK_EH_HANDLED;
 }
 
+static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+		struct request *rq)
+{
+	if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+	return BLK_STS_OK;
+}
+
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 		const struct blk_mq_queue_data *bd)
 {
@@ -153,6 +166,10 @@
 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 	blk_status_t ret;
 
+	ret = nvme_loop_is_ready(queue, req);
+	if (unlikely(ret))
+		return ret;
+
 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
 	if (ret)
 		return ret;
@@ -267,6 +284,7 @@
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 	blk_cleanup_queue(ctrl->ctrl.admin_q);
 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -297,8 +315,10 @@
 {
 	int i;
 
-	for (i = 1; i < ctrl->ctrl.queue_count; i++)
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+	}
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -338,6 +358,7 @@
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
 			return ret;
+		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
 	}
 
 	return 0;
@@ -380,6 +401,8 @@
 	if (error)
 		goto out_cleanup_queue;
 
+	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
 	if (error) {
 		dev_err(ctrl->ctrl.device,
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 38a2b47..5938113 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -59,11 +59,20 @@
 void ttm_pool_unpopulate(struct ttm_tt *ttm);
 
 /**
+ * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
+ */
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
+ * Unpopulates and DMA unmaps pages as part of a
+ * ttm_dma_unpopulate() request */
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
  * Output the state of pools to debugfs file
  */
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 
-
 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
@@ -83,17 +92,6 @@
 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
 #else
 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
 					  unsigned max_pages)
@@ -116,16 +114,6 @@
 				      struct device *dev)
 {
 }
-
-static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-	return -ENOMEM;
-}
-
-static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-}
-
 #endif
 
 #endif
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 90fc490..821f71a 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -91,7 +91,7 @@
 
 #define PTR_CHECK_DEV			((1 << PTR_DEV_BITS) - 1)
 
-#define PTR(gen, offset, dev)						\
+#define MAKE_PTR(gen, offset, dev)					\
 	((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
 
 /* Bkey utility code */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 731d0df..6e80501 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -233,29 +233,29 @@
 };
 
 struct kfd_ioctl_set_scratch_backing_va_args {
-	uint64_t va_addr;	/* to KFD */
-	uint32_t gpu_id;	/* to KFD */
-	uint32_t pad;
+	__u64 va_addr;	/* to KFD */
+	__u32 gpu_id;	/* to KFD */
+	__u32 pad;
 };
 
 struct kfd_ioctl_get_tile_config_args {
 	/* to KFD: pointer to tile array */
-	uint64_t tile_config_ptr;
+	__u64 tile_config_ptr;
 	/* to KFD: pointer to macro tile array */
-	uint64_t macro_tile_config_ptr;
+	__u64 macro_tile_config_ptr;
 	/* to KFD: array size allocated by user mode
 	 * from KFD: array size filled by kernel
 	 */
-	uint32_t num_tile_configs;
+	__u32 num_tile_configs;
 	/* to KFD: array size allocated by user mode
 	 * from KFD: array size filled by kernel
 	 */
-	uint32_t num_macro_tile_configs;
+	__u32 num_macro_tile_configs;
 
-	uint32_t gpu_id;		/* to KFD */
-	uint32_t gb_addr_config;	/* from KFD */
-	uint32_t num_banks;		/* from KFD */
-	uint32_t num_ranks;		/* from KFD */
+	__u32 gpu_id;		/* to KFD */
+	__u32 gb_addr_config;	/* from KFD */
+	__u32 num_banks;		/* from KFD */
+	__u32 num_ranks;		/* from KFD */
 	/* struct size can be extended later if needed
 	 * without breaking ABI compatibility
 	 */
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 206e0e2..987d9a9a 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -591,7 +591,7 @@
 		return ret;
 
 	if (copy_to_user(arg, &buts, sizeof(buts))) {
-		blk_trace_remove(q);
+		__blk_trace_remove(q);
 		return -EFAULT;
 	}
 	return 0;
@@ -637,7 +637,7 @@
 		return ret;
 
 	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
-		blk_trace_remove(q);
+		__blk_trace_remove(q);
 		return -EFAULT;
 	}
 
@@ -872,7 +872,7 @@
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-			      u32 what, int error, union kernfs_node_id *cgid)
+			      u32 what, int error)
 {
 	struct blk_trace *bt = q->blk_trace;
 
@@ -880,22 +880,21 @@
 		return;
 
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-			bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
+			bio_op(bio), bio->bi_opf, what, error, 0, NULL,
+			blk_trace_bio_get_cgid(q, bio));
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
 				     struct request_queue *q, struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0,
-			  blk_trace_bio_get_cgid(q, bio));
+	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
 				       struct request_queue *q, struct bio *bio,
 				       int error)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error,
-			  blk_trace_bio_get_cgid(q, bio));
+	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
@@ -903,8 +902,7 @@
 					struct request *rq,
 					struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0,
-			 blk_trace_bio_get_cgid(q, bio));
+	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
@@ -912,15 +910,13 @@
 					 struct request *rq,
 					 struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0,
-			  blk_trace_bio_get_cgid(q, bio));
+	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
 				    struct request_queue *q, struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0,
-			  blk_trace_bio_get_cgid(q, bio));
+	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -928,8 +924,7 @@
 				struct bio *bio, int rw)
 {
 	if (bio)
-		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
-				  blk_trace_bio_get_cgid(q, bio));
+		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
 	else {
 		struct blk_trace *bt = q->blk_trace;
 
@@ -945,8 +940,7 @@
 				  struct bio *bio, int rw)
 {
 	if (bio)
-		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
-				  blk_trace_bio_get_cgid(q, bio));
+		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
 	else {
 		struct blk_trace *bt = q->blk_trace;
 
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 74b52df..84b2dc7 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -113,11 +113,23 @@
 	.release	= single_release,
 };
 
-static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 {
+	if (!bdi_debug_root)
+		return -ENOMEM;
+
 	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
+	if (!bdi->debug_dir)
+		return -ENOMEM;
+
 	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
 					       bdi, &bdi_debug_stats_fops);
+	if (!bdi->debug_stats) {
+		debugfs_remove(bdi->debug_dir);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 static void bdi_debug_unregister(struct backing_dev_info *bdi)
@@ -129,9 +141,10 @@
 static inline void bdi_debug_init(void)
 {
 }
-static inline void bdi_debug_register(struct backing_dev_info *bdi,
+static inline int bdi_debug_register(struct backing_dev_info *bdi,
 				      const char *name)
 {
+	return 0;
 }
 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 {
@@ -869,10 +882,13 @@
 	if (IS_ERR(dev))
 		return PTR_ERR(dev);
 
+	if (bdi_debug_register(bdi, dev_name(dev))) {
+		device_destroy(bdi_class, dev->devt);
+		return -ENOMEM;
+	}
 	cgwb_bdi_register(bdi);
 	bdi->dev = dev;
 
-	bdi_debug_register(bdi, dev_name(dev));
 	set_bit(WB_registered, &bdi->wb.state);
 
 	spin_lock_bh(&bdi_lock);