blob: d206043ef42e93869f3d3d48b86a5056b07e7e01 [file] [log] [blame]
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,6 +377,9 @@ out:
return nr_free;
}
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc);
+
/**
* Callback for mm to request pool to reduce number of page held.
*
@@ -388,8 +391,13 @@ out:
*
* This code is crying out for a shrinker per pool....
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+#else
+static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+#endif
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
@@ -408,7 +416,12 @@ ttm_pool_shrink_scan(struct shrinker *sh
shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
return freed;
+#else
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_shrink_count(shrink, sc);
+#endif
}
@@ -426,8 +439,12 @@ ttm_pool_shrink_count(struct shrinker *s
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
+#else
+ manager->mm_shrink.shrink = ttm_pool_mm_shrink;
+#endif
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -987,6 +987,9 @@ void ttm_dma_unpopulate(struct ttm_dma_t
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc);
+
/**
* Callback for mm to request pool to reduce number of page held.
*
@@ -1000,8 +1003,14 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+#else
+#define SHRINK_STOP 0
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+#endif
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
@@ -1034,7 +1043,12 @@ ttm_dma_pool_shrink_scan(struct shrinker
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
return freed;
+#else
+ /* return estimated number of unused pages in pool */
+ return ttm_dma_pool_shrink_count(shrink, sc);
+#endif
}
static unsigned long
@@ -1052,8 +1066,12 @@ ttm_dma_pool_shrink_count(struct shrinke
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
+#else
+ manager->mm_shrink.shrink = ttm_dma_pool_mm_shrink;
+#endif
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}