summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_handle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_handle.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c115
1 files changed, 48 insertions, 67 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 539b7ce9801f..05046ed8ba79 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -30,30 +30,21 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/shrinker.h>
+#include <linux/moduleparam.h>
+#include <linux/nvmap.h>
#include <asm/cacheflush.h>
#include <asm/outercache.h>
#include <asm/pgtable.h>
#include <mach/iovmm.h>
-#include <mach/nvmap.h>
-
-#include <linux/vmstat.h>
-#include <linux/swap.h>
-#include <linux/shrinker.h>
-#include <linux/moduleparam.h>
+#include <trace/events/nvmap.h>
#include "nvmap.h"
#include "nvmap_mru.h"
#include "nvmap_common.h"
-#define PRINT_CARVEOUT_CONVERSION 0
-#if PRINT_CARVEOUT_CONVERSION
-#define PR_INFO pr_info
-#else
-#define PR_INFO(...)
-#endif
-
#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
NVMAP_HEAP_CARVEOUT_VPR)
#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
@@ -66,6 +57,9 @@
* preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
* the array is allocated using vmalloc. */
#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
+
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
static bool enable_pp = 1;
static int pool_size[NVMAP_NUM_POOLS];
@@ -377,6 +371,7 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
int i;
static int reg = 1;
struct sysinfo info;
+ int highmem_pages = 0;
typedef int (*set_pages_array) (struct page **pages, int addrinarray);
set_pages_array s_cpa[] = {
set_pages_array_uc,
@@ -395,14 +390,16 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
return 0;
si_meminfo(&info);
- if (!pool_size[flags]) {
+ if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE)
/* Use 3/8th of total ram for page pools.
* 1/8th for uc, 1/8th for wc and 1/8th for iwb.
*/
pool->max_pages = info.totalram >> 3;
- }
+ else
+ pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
+
if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
- pool->max_pages = NVMAP_DEFAULT_PAGE_POOL_SIZE;
+ goto fail;
pool_size[flags] = pool->max_pages;
pr_info("nvmap %s page pool size=%d pages",
s_memtype_str[flags], pool->max_pages);
@@ -425,7 +422,14 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
__free_page(page);
goto do_cpa;
}
+ if (PageHighMem(page))
+ highmem_pages++;
}
+ si_meminfo(&info);
+ pr_info("nvmap pool = %s, highmem=%d, pool_size=%d,"
+ "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu",
+ s_memtype_str[flags], highmem_pages, pool->max_pages,
+ info.totalram, info.freeram, info.totalhigh, info.freehigh);
do_cpa:
(*s_cpa[flags])(pool->page_array, pool->npages);
nvmap_page_pool_unlock(pool);
@@ -436,6 +440,7 @@ fail:
vfree(pool->page_array);
return -ENOMEM;
}
+#endif
static inline void *altalloc(size_t len)
{
@@ -460,7 +465,9 @@ void _nvmap_handle_free(struct nvmap_handle *h)
{
struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
unsigned int i, nr_page, page_index = 0;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
struct nvmap_page_pool *pool = NULL;
+#endif
if (nvmap_handle_remove(h->dev, h) != 0)
return;
@@ -481,6 +488,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
nvmap_mru_remove(share, h);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
if (h->flags < NVMAP_NUM_POOLS)
pool = &share->pools[h->flags];
@@ -490,6 +498,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
break;
page_index++;
}
+#endif
if (page_index == nr_page)
goto skip_attr_restore;
@@ -538,12 +547,14 @@ static int handle_page_alloc(struct nvmap_client *client,
struct nvmap_handle *h, bool contiguous)
{
size_t size = PAGE_ALIGN(h->size);
- struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
unsigned int nr_page = size >> PAGE_SHIFT;
pgprot_t prot;
unsigned int i = 0, page_index = 0;
struct page **pages;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
struct nvmap_page_pool *pool = NULL;
+ struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
+#endif
pages = altalloc(nr_page * sizeof(*pages));
if (!pages)
@@ -562,6 +573,7 @@ static int handle_page_alloc(struct nvmap_client *client,
pages[i] = nth_page(page, i);
} else {
+#ifdef CONFIG_NVMAP_PAGE_POOLS
if (h->flags < NVMAP_NUM_POOLS)
pool = &share->pools[h->flags];
@@ -572,7 +584,7 @@ static int handle_page_alloc(struct nvmap_client *client,
break;
page_index++;
}
-
+#endif
for (; i < nr_page; i++) {
pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
PAGE_SIZE);
@@ -625,36 +637,19 @@ fail:
static void alloc_handle(struct nvmap_client *client,
struct nvmap_handle *h, unsigned int type)
{
+ unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
+ unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
+
BUG_ON(type & (type - 1));
#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
-#define __NVMAP_HEAP_CARVEOUT (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_CARVEOUT_VPR)
-#define __NVMAP_HEAP_IOVMM (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC)
- if (type & NVMAP_HEAP_CARVEOUT_GENERIC) {
-#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
- if (h->size <= PAGE_SIZE) {
- PR_INFO("###CARVEOUT CONVERTED TO SYSMEM "
- "0x%x bytes %s(%d)###\n",
- h->size, current->comm, current->pid);
- goto sysheap;
- }
-#endif
- PR_INFO("###CARVEOUT CONVERTED TO IOVM "
- "0x%x bytes %s(%d)###\n",
- h->size, current->comm, current->pid);
- }
-#else
-#define __NVMAP_HEAP_CARVEOUT NVMAP_HEAP_CARVEOUT_MASK
-#define __NVMAP_HEAP_IOVMM NVMAP_HEAP_IOVMM
+ /* Convert generic carveout requests to iovmm requests. */
+ carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
+ iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
#endif
- if (type & __NVMAP_HEAP_CARVEOUT) {
+ if (type & carveout_mask) {
struct nvmap_heap_block *b;
-#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
- PR_INFO("###IRAM REQUEST RETAINED "
- "0x%x bytes %s(%d)###\n",
- h->size, current->comm, current->pid);
-#endif
/* Protect handle from relocation */
nvmap_usecount_inc(h);
@@ -668,7 +663,7 @@ static void alloc_handle(struct nvmap_client *client,
}
nvmap_usecount_dec(h);
- } else if (type & __NVMAP_HEAP_IOVMM) {
+ } else if (type & iovmm_mask) {
size_t reserved = PAGE_ALIGN(h->size);
int commit = 0;
int ret;
@@ -692,10 +687,6 @@ static void alloc_handle(struct nvmap_client *client,
}
} else if (type & NVMAP_HEAP_SYSMEM) {
-#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \
- defined(CONFIG_NVMAP_ALLOW_SYSMEM)
-sysheap:
-#endif
if (handle_page_alloc(client, h, true) == 0) {
BUG_ON(!h->pgalloc.contig);
h->heap_pgalloc = true;
@@ -730,10 +721,6 @@ static const unsigned int heap_policy_large[] = {
0,
};
-/* Do not override single page policy if there is not much space to
-avoid invoking system oom killer. */
-#define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
-
int nvmap_alloc_handle_id(struct nvmap_client *client,
unsigned long id, unsigned int heap_mask,
size_t align, unsigned int flags)
@@ -751,6 +738,7 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
if (h->alloc)
goto out;
+ trace_nvmap_alloc_handle_id(client, id, heap_mask, align, flags);
h->userflags = flags;
nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
h->secure = !!(flags & NVMAP_HANDLE_SECURE);
@@ -758,32 +746,22 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
h->align = max_t(size_t, align, L1_CACHE_BYTES);
#ifndef CONFIG_TEGRA_IOVMM
+ /* convert iovmm requests to generic carveout. */
if (heap_mask & NVMAP_HEAP_IOVMM) {
- heap_mask &= NVMAP_HEAP_IOVMM;
- heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+ heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
+ NVMAP_HEAP_CARVEOUT_GENERIC;
}
#endif
-#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
/* Allow single pages allocations in system memory to save
* carveout space and avoid extra iovm mappings */
if (nr_page == 1) {
- if (heap_mask & NVMAP_HEAP_IOVMM)
+ if (heap_mask &
+ (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC))
heap_mask |= NVMAP_HEAP_SYSMEM;
- else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
- /* Calculate size of free physical pages
- * managed by kernel */
- unsigned long freeMem =
- (global_page_state(NR_FREE_PAGES) +
- global_page_state(NR_FILE_PAGES) -
- total_swapcache_pages) << PAGE_SHIFT;
-
- if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
- heap_mask |= NVMAP_HEAP_SYSMEM;
- }
}
#endif
-
+#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
/* This restriction is deprecated as alignments greater than
PAGE_SIZE are now correctly handled, but it is retained for
AP20 compatibility. */
@@ -843,6 +821,7 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
return;
}
+ trace_nvmap_free_handle_id(client, id);
BUG_ON(!ref->handle);
h = ref->handle;
@@ -943,6 +922,7 @@ struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
ref->handle = h;
atomic_set(&ref->pin, 0);
add_handle_ref(client, ref);
+ trace_nvmap_create_handle(client, h, size, ref);
return ref;
}
@@ -1016,5 +996,6 @@ struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
ref->handle = h;
atomic_set(&ref->pin, 0);
add_handle_ref(client, ref);
+ trace_nvmap_duplicate_handle_id(client, id, ref);
return ref;
}