summaryrefslogtreecommitdiff
path: root/patches/collateral-evolutions/drm/14-shrinkers-api/drivers_gpu_drm_ttm.patch
blob: d206043ef42e93869f3d3d48b86a5056b07e7e01 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,6 +377,9 @@ out:
 	return nr_free;
 }
 
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc);
+
 /**
  * Callback for mm to request pool to reduce number of page held.
  *
@@ -388,8 +391,13 @@ out:
  *
  * This code is crying out for a shrinker per pool....
  */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
 static unsigned long
 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+#else
+static int ttm_pool_mm_shrink(struct shrinker *shrink,
+			      struct shrink_control *sc)
+#endif
 {
 	static atomic_t start_pool = ATOMIC_INIT(0);
 	unsigned i;
@@ -408,7 +416,12 @@ ttm_pool_shrink_scan(struct shrinker *sh
 		shrink_pages = ttm_page_pool_free(pool, nr_free);
 		freed += nr_free - shrink_pages;
 	}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
 	return freed;
+#else
+	/* return estimated number of unused pages in pool */
+	return ttm_pool_shrink_count(shrink, sc);
+#endif
 }
 
 
@@ -426,8 +439,12 @@ ttm_pool_shrink_count(struct shrinker *s
 
 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
 	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
+#else
+	manager->mm_shrink.shrink = ttm_pool_mm_shrink;
+#endif
 	manager->mm_shrink.seeks = 1;
 	register_shrinker(&manager->mm_shrink);
 }
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -987,6 +987,9 @@ void ttm_dma_unpopulate(struct ttm_dma_t
 }
 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
 
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc);
+
 /**
  * Callback for mm to request pool to reduce number of page held.
  *
@@ -1000,8 +1003,14 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  * shrinkers
  */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
 static unsigned long
 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+#else
+#define SHRINK_STOP 0
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+				  struct shrink_control *sc)
+#endif
 {
 	static atomic_t start_pool = ATOMIC_INIT(0);
 	unsigned idx = 0;
@@ -1034,7 +1043,12 @@ ttm_dma_pool_shrink_scan(struct shrinker
 			 nr_free, shrink_pages);
 	}
 	mutex_unlock(&_manager->lock);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
 	return freed;
+#else
+	/* return estimated number of unused pages in pool */
+	return ttm_dma_pool_shrink_count(shrink, sc);
+#endif
 }
 
 static unsigned long
@@ -1052,8 +1066,12 @@ ttm_dma_pool_shrink_count(struct shrinke
 
 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
 	manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
 	manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
+#else
+	manager->mm_shrink.shrink = ttm_dma_pool_mm_shrink;
+#endif
 	manager->mm_shrink.seeks = 1;
 	register_shrinker(&manager->mm_shrink);
 }