summaryrefslogtreecommitdiff
path: root/drivers/staging/android/ion/ion_heap.c
diff options
context:
space:
mode:
authorRebecca Schultz Zavin <rebecca@android.com>2013-12-13 14:24:39 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-14 08:55:41 -0800
commitea313b5f88ed7119f79ad3f6b85e9620971b9875 (patch)
tree2d2e68ecab9c1114d013791d2fe49f78d68f6828 /drivers/staging/android/ion/ion_heap.c
parentda4aab37836b8e45e31c7d557c6d6a2c623d8eea (diff)
gpu: ion: Also shrink memory cached in the deferred free list
When the system is low on memory, we want to shrink any cached system memory ion is holding. Previously we were shrinking memory in the page pools, but not in the deferred free list. This patch makes it possible to shrink both. It also moves the shrinker code into the heaps so they can correctly manage any caches they might contain. Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/android/ion/ion_heap.c')
-rw-r--r--drivers/staging/android/ion/ion_heap.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 11066a2abb4b..f541bff26435 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -15,7 +15,11 @@
*/
#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include "ion.h"
@@ -130,6 +134,109 @@ end:
return ret;
}
+void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ int i;
+
+ if (!ion_buffer_fault_user_mappings(buffer)) {
+ __free_pages(page, order);
+ return;
+ }
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
+{
+ rt_mutex_lock(&heap->lock);
+ list_add(&buffer->list, &heap->free_list);
+ heap->free_list_size += buffer->size;
+ rt_mutex_unlock(&heap->lock);
+ wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+ size_t size;
+
+ rt_mutex_lock(&heap->lock);
+ size = heap->free_list_size;
+ rt_mutex_unlock(&heap->lock);
+
+ return size;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ struct ion_buffer *buffer, *tmp;
+ size_t total_drained = 0;
+
+ if (ion_heap_freelist_size(heap) == 0)
+ return 0;
+
+ rt_mutex_lock(&heap->lock);
+ if (size == 0)
+ size = heap->free_list_size;
+
+ list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+ if (total_drained >= size)
+ break;
+ list_del(&buffer->list);
+ ion_buffer_destroy(buffer);
+ heap->free_list_size -= buffer->size;
+ total_drained += buffer->size;
+ }
+ rt_mutex_unlock(&heap->lock);
+
+ return total_drained;
+}
+
+int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ ion_heap_freelist_size(heap) > 0);
+
+ rt_mutex_lock(&heap->lock);
+ if (list_empty(&heap->free_list)) {
+ rt_mutex_unlock(&heap->lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ rt_mutex_unlock(&heap->lock);
+ ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&heap->free_list);
+ heap->free_list_size = 0;
+ rt_mutex_init(&heap->lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
+ if (IS_ERR(heap->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_RET(heap->task);
+ }
+ return 0;
+}
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;