summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/video/tegra/Kconfig16
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h5
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c4
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c31
4 files changed, 50 insertions, 6 deletions
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
index 468a5566b667..59e27788680d 100644
--- a/drivers/video/tegra/Kconfig
+++ b/drivers/video/tegra/Kconfig
@@ -85,6 +85,22 @@ config NVMAP_CARVEOUT_COMPACTOR
heap and retries the failed allocation.
Say Y here to let nvmap to keep carveout fragmentation under control.
+config NVMAP_PAGE_POOLS
+ bool "Use page pools to reduce allocation overhead"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ say Y here to reduce the alloction overhead, which is significant
+ for uncached, writecombine and inner cacheable memories as it
+ involves changing page attributes during every allocation per page
+ and flushing cache. Alloc time is reduced by allcoating the pages
+ ahead and keeping them aside. The reserved pages would be released
+ when system is low on memory and acquired back during release of
+ memory.
+
+config NVMAP_PAGE_POOL_SIZE
+ hex
+ default 0x0
config NVMAP_VPR
bool "Enable VPR Heap."
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
index 44a0d86b6039..87a5a74cf515 100644
--- a/drivers/video/tegra/nvmap/nvmap.h
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -86,7 +86,7 @@ struct nvmap_handle {
struct mutex lock;
};
-#define NVMAP_DEFAULT_PAGE_POOL_SIZE 8192
+#ifdef CONFIG_NVMAP_PAGE_POOLS
#define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
#define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
#define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
@@ -103,11 +103,13 @@ struct nvmap_page_pool {
};
int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
+#endif
struct nvmap_share {
struct tegra_iovmm_client *iovmm;
wait_queue_head_t pin_wait;
struct mutex pin_lock;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
union {
struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
struct {
@@ -117,6 +119,7 @@ struct nvmap_share {
struct nvmap_page_pool wb_pool;
};
};
+#endif
#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
struct mutex mru_lock;
struct list_head *mru_lists;
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
index f84f38c93aad..27c4f61e8956 100644
--- a/drivers/video/tegra/nvmap/nvmap_dev.c
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -1182,8 +1182,10 @@ static int nvmap_probe(struct platform_device *pdev)
init_waitqueue_head(&dev->iovmm_master.pin_wait);
mutex_init(&dev->iovmm_master.pin_lock);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
for (i = 0; i < NVMAP_NUM_POOLS; i++)
nvmap_page_pool_init(&dev->iovmm_master.pools[i], i);
+#endif
dev->iovmm_master.iovmm =
tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL,
@@ -1311,6 +1313,7 @@ static int nvmap_probe(struct platform_device *pdev)
dev, &debug_iovmm_clients_fops);
debugfs_create_file("allocations", 0664, iovmm_root,
dev, &debug_iovmm_allocations_fops);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
for (i = 0; i < NVMAP_NUM_POOLS; i++) {
char name[40];
char *memtype_string[] = {"uc", "wc",
@@ -1321,6 +1324,7 @@ static int nvmap_probe(struct platform_device *pdev)
iovmm_root,
&dev->iovmm_master.pools[i].npages);
}
+#endif
}
}
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 539b7ce9801f..0708e7468dad 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -66,6 +66,9 @@
* preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
* the array is allocated using vmalloc. */
#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
+
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
static bool enable_pp = 1;
static int pool_size[NVMAP_NUM_POOLS];
@@ -377,6 +380,7 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
int i;
static int reg = 1;
struct sysinfo info;
+ int highmem_pages = 0;
typedef int (*set_pages_array) (struct page **pages, int addrinarray);
set_pages_array s_cpa[] = {
set_pages_array_uc,
@@ -395,14 +399,16 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
return 0;
si_meminfo(&info);
- if (!pool_size[flags]) {
+ if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE)
/* Use 3/8th of total ram for page pools.
* 1/8th for uc, 1/8th for wc and 1/8th for iwb.
*/
pool->max_pages = info.totalram >> 3;
- }
+ else
+ pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
+
if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
- pool->max_pages = NVMAP_DEFAULT_PAGE_POOL_SIZE;
+ goto fail;
pool_size[flags] = pool->max_pages;
pr_info("nvmap %s page pool size=%d pages",
s_memtype_str[flags], pool->max_pages);
@@ -425,7 +431,14 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
__free_page(page);
goto do_cpa;
}
+ if (PageHighMem(page))
+ highmem_pages++;
}
+ si_meminfo(&info);
+ pr_info("nvmap pool = %s, highmem=%d, pool_size=%d,"
+ "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu",
+ s_memtype_str[flags], highmem_pages, pool->max_pages,
+ info.totalram, info.freeram, info.totalhigh, info.freehigh);
do_cpa:
(*s_cpa[flags])(pool->page_array, pool->npages);
nvmap_page_pool_unlock(pool);
@@ -436,6 +449,7 @@ fail:
vfree(pool->page_array);
return -ENOMEM;
}
+#endif
static inline void *altalloc(size_t len)
{
@@ -460,7 +474,9 @@ void _nvmap_handle_free(struct nvmap_handle *h)
{
struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
unsigned int i, nr_page, page_index = 0;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
struct nvmap_page_pool *pool = NULL;
+#endif
if (nvmap_handle_remove(h->dev, h) != 0)
return;
@@ -481,6 +497,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
nvmap_mru_remove(share, h);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
if (h->flags < NVMAP_NUM_POOLS)
pool = &share->pools[h->flags];
@@ -490,6 +507,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
break;
page_index++;
}
+#endif
if (page_index == nr_page)
goto skip_attr_restore;
@@ -538,12 +556,14 @@ static int handle_page_alloc(struct nvmap_client *client,
struct nvmap_handle *h, bool contiguous)
{
size_t size = PAGE_ALIGN(h->size);
- struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
unsigned int nr_page = size >> PAGE_SHIFT;
pgprot_t prot;
unsigned int i = 0, page_index = 0;
struct page **pages;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
struct nvmap_page_pool *pool = NULL;
+ struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
+#endif
pages = altalloc(nr_page * sizeof(*pages));
if (!pages)
@@ -562,6 +582,7 @@ static int handle_page_alloc(struct nvmap_client *client,
pages[i] = nth_page(page, i);
} else {
+#ifdef CONFIG_NVMAP_PAGE_POOLS
if (h->flags < NVMAP_NUM_POOLS)
pool = &share->pools[h->flags];
@@ -572,7 +593,7 @@ static int handle_page_alloc(struct nvmap_client *client,
break;
page_index++;
}
-
+#endif
for (; i < nr_page; i++) {
pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
PAGE_SIZE);