drm/radeon: Add flushing code for parisc architecture

This adds the necessary flushing code to allow using a Radeon
RV280 PCI card on a C8000 PA-RISC workstation.

Signed-off-by: Helge Deller <deller@gmx.de>
Cc: stable@vger.kernel.org
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 7051c9c..a84eba2f 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -174,6 +174,8 @@ drm_clflush_virt_range(void *addr, unsigned long length)
 
 	if (wbinvd_on_all_cpus())
 		pr_err("Timed out waiting for cache flush\n");
+#elif defined(CONFIG_PARISC)
+	flush_kernel_dcache_range((unsigned long) addr, length);
 #else
 	WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
 #endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index affa9e0..f9941b9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -642,6 +642,12 @@ void r100_hpd_fini(struct radeon_device *rdev)
  */
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
 {
+#if defined(CONFIG_PARISC)
+	/* flush gtt[] gart table entries from r100_pci_gart_set_page() */
+	dma_sync_single_for_device(&rdev->pdev->dev, rdev->gart.table_addr,
+		rdev->gart.table_size, DMA_TO_DEVICE);
+#endif
+
 	/* TODO: can we do somethings here ? */
 	/* It seems hw only cache one entry so we should discard this
 	 * entry otherwise if first GPU GART read hit this entry it
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e6534fa..645c466 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -29,6 +29,7 @@
 
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
+#include <drm/drm_cache.h>
 
 #include "radeon.h"
 
@@ -177,6 +178,10 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
 		radeon_ring_write(ring, ring->nop);
 	}
 	mb();
+	if (IS_ENABLED(CONFIG_PARISC))
+		drm_clflush_virt_range((void *)&ring->ring[0],
+					ring->wptr * sizeof(uint32_t));
+
 	/* If we are emitting the HDP flush via MMIO, we need to do it after
 	 * all CPU writes to VRAM finished.
 	 */