summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHiro Sugawara <hsugawara@nvidia.com>2011-03-17 13:58:13 -0700
committerVarun Colbert <vcolbert@nvidia.com>2011-05-13 19:05:53 -0700
commitb17e8de9d5641edf76e3b6046a273d2a4a261e0a (patch)
tree18e7a9261b8b6eb1692acefb739b385451c6e9f1
parent30582abff8f85b56bafb86d0bc05db25257bf456 (diff)
arm: tegra: nvmap: Forcing to convert CarveOut requests to IOVM
Adding a build time CONFIG option to enable forcing of conversion of non-IRAM CarveOut memory allocation requests to IOVM requests. Default is "y" to force the conversion. Each forced conversion is reported to console. Allocation alignments larger than page size for IOVM are enabled. Single page CarveOut allocations are converted to system memory. CarveOut memory reservation has been removed for aruba, cardhu, and enterprise. Change-Id: I3a598431d15b92ce853b3bec97be4b583d021264 Reviewed-on: http://git-master/r/29849 Reviewed-by: Varun Colbert <vcolbert@nvidia.com> Tested-by: Varun Colbert <vcolbert@nvidia.com>
-rw-r--r--arch/arm/mach-tegra/board-aruba.c4
-rw-r--r--arch/arm/mach-tegra/board-cardhu.c4
-rw-r--r--arch/arm/mach-tegra/board-enterprise.c4
-rw-r--r--arch/arm/mach-tegra/include/mach/iovmm.h6
-rw-r--r--arch/arm/mach-tegra/include/mach/nvmap.h2
-rw-r--r--arch/arm/mach-tegra/iovmm.c128
-rw-r--r--drivers/video/tegra/Kconfig9
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h9
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c32
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c72
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.c6
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.h3
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.c9
13 files changed, 191 insertions, 97 deletions
diff --git a/arch/arm/mach-tegra/board-aruba.c b/arch/arm/mach-tegra/board-aruba.c
index 81ab9bf2150d..290a4124ac22 100644
--- a/arch/arm/mach-tegra/board-aruba.c
+++ b/arch/arm/mach-tegra/board-aruba.c
@@ -564,7 +564,11 @@ static void __init tegra_aruba_init(void)
static void __init tegra_aruba_reserve(void)
{
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM)
+ tegra_reserve(0, SZ_4M, 0);
+#else
tegra_reserve(SZ_32M, SZ_4M, 0);
+#endif
}
MACHINE_START(ARUBA, "aruba")
diff --git a/arch/arm/mach-tegra/board-cardhu.c b/arch/arm/mach-tegra/board-cardhu.c
index 68428916e710..d9aad8b07c86 100644
--- a/arch/arm/mach-tegra/board-cardhu.c
+++ b/arch/arm/mach-tegra/board-cardhu.c
@@ -614,7 +614,11 @@ static void __init tegra_cardhu_init(void)
static void __init tegra_cardhu_reserve(void)
{
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM)
+ tegra_reserve(0, SZ_4M, SZ_8M);
+#else
tegra_reserve(SZ_128M, SZ_4M, SZ_8M);
+#endif
}
MACHINE_START(CARDHU, "cardhu")
diff --git a/arch/arm/mach-tegra/board-enterprise.c b/arch/arm/mach-tegra/board-enterprise.c
index 0018fdfda00f..f1165bcea621 100644
--- a/arch/arm/mach-tegra/board-enterprise.c
+++ b/arch/arm/mach-tegra/board-enterprise.c
@@ -514,7 +514,11 @@ static void __init tegra_enterprise_init(void)
static void __init tegra_enterprise_reserve(void)
{
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM)
+ tegra_reserve(0, SZ_4M, SZ_8M);
+#else
tegra_reserve(SZ_128M, SZ_4M, SZ_8M);
+#endif
}
MACHINE_START(TEGRA_ENTERPRISE, "tegra_enterprise")
diff --git a/arch/arm/mach-tegra/include/mach/iovmm.h b/arch/arm/mach-tegra/include/mach/iovmm.h
index b0f348a608a3..1c20514069e4 100644
--- a/arch/arm/mach-tegra/include/mach/iovmm.h
+++ b/arch/arm/mach-tegra/include/mach/iovmm.h
@@ -1,7 +1,7 @@
/*
* arch/arm/mach-tegra/include/mach/iovmm.h
*
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -85,7 +85,7 @@ struct tegra_iovmm_client {
struct tegra_iovmm_area {
struct tegra_iovmm_domain *domain;
tegra_iovmm_addr_t iovm_start;
- tegra_iovmm_addr_t iovm_length;
+ size_t iovm_length;
pgprot_t pgprot;
struct tegra_iovmm_area_ops *ops;
};
@@ -155,7 +155,7 @@ void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client);
* respectively. VM operations may be called before this call returns */
struct tegra_iovmm_area *tegra_iovmm_create_vm(
struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
- unsigned long size, pgprot_t pgprot);
+ size_t size, size_t align, pgprot_t pgprot);
/* called by clients to "zap" an iovmm_area, and replace all mappings
* in it with invalid ones, without freeing the virtual address range */
diff --git a/arch/arm/mach-tegra/include/mach/nvmap.h b/arch/arm/mach-tegra/include/mach/nvmap.h
index b19ccf6ae7dd..280dff4ae999 100644
--- a/arch/arm/mach-tegra/include/mach/nvmap.h
+++ b/arch/arm/mach-tegra/include/mach/nvmap.h
@@ -1,5 +1,5 @@
/*
- * include/linux/nvmap.h
+ * arch/arm/mach-tegra/include/mach/nvmap.h
*
* structure declarations for nvmem and nvmap user-space ioctls
*
diff --git a/arch/arm/mach-tegra/iovmm.c b/arch/arm/mach-tegra/iovmm.c
index c127f823286d..803b9130474d 100644
--- a/arch/arm/mach-tegra/iovmm.c
+++ b/arch/arm/mach-tegra/iovmm.c
@@ -3,7 +3,7 @@
*
* Tegra I/O VM manager
*
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,6 +52,8 @@
struct tegra_iovmm_block {
struct tegra_iovmm_area vm_area;
+ tegra_iovmm_addr_t start;
+ size_t length;
atomic_t ref;
unsigned long flags;
unsigned long poison;
@@ -92,17 +94,16 @@ static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
unsigned int *num_blocks, unsigned int *num_free,
- tegra_iovmm_addr_t *total, tegra_iovmm_addr_t *total_free,
- tegra_iovmm_addr_t *max_free)
+ tegra_iovmm_addr_t *total, size_t *total_free, size_t *max_free)
{
struct rb_node *n;
struct tegra_iovmm_block *b;
*num_blocks = 0;
*num_free = 0;
- *total = (tegra_iovmm_addr_t)0;
- *total_free = (tegra_iovmm_addr_t)0;
- *max_free = (tegra_iovmm_addr_t)0;
+ *total = 0;
+ *total_free = 0;
+ *max_free = 0;
spin_lock(&domain->block_lock);
n = rb_first(&domain->all_blocks);
@@ -110,12 +111,11 @@ static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
b = rb_entry(n, struct tegra_iovmm_block, all_node);
n = rb_next(n);
(*num_blocks)++;
- (*total) += iovmm_length(b);
+ *total += b->length;
if (test_bit(BK_free, &b->flags)) {
(*num_free)++;
- (*total_free) += iovmm_length(b);
- (*max_free) = max_t(tegra_iovmm_addr_t,
- (*max_free), iovmm_length(b));
+ *total_free += b->length;
+ *max_free = max_t(size_t, *max_free, b->length);
}
}
spin_unlock(&domain->block_lock);
@@ -125,7 +125,7 @@ static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
struct iovmm_share_group *grp;
- tegra_iovmm_addr_t max_free, total_free, total;
+ size_t max_free, total_free, total;
unsigned int num, num_free;
int len = 0;
@@ -188,8 +188,8 @@ static void iovmm_free_block(struct tegra_iovmm_domain *domain,
if (succ) succ_free = test_bit(BK_free, &succ->flags);
if (pred_free && succ_free) {
- iovmm_length(pred) += iovmm_length(block);
- iovmm_length(pred) += iovmm_length(succ);
+ pred->length += block->length;
+ pred->length += succ->length;
rb_erase(&block->all_node, &domain->all_blocks);
rb_erase(&succ->all_node, &domain->all_blocks);
rb_erase(&succ->free_node, &domain->free_blocks);
@@ -198,13 +198,13 @@ static void iovmm_free_block(struct tegra_iovmm_domain *domain,
iovmm_block_put(succ);
block = pred;
} else if (pred_free) {
- iovmm_length(pred) += iovmm_length(block);
+ pred->length += block->length;
rb_erase(&block->all_node, &domain->all_blocks);
rb_erase(&pred->free_node, &domain->free_blocks);
iovmm_block_put(block);
block = pred;
} else if (succ_free) {
- iovmm_length(block) += iovmm_length(succ);
+ block->length += succ->length;
rb_erase(&succ->all_node, &domain->all_blocks);
rb_erase(&succ->free_node, &domain->free_blocks);
iovmm_block_put(succ);
@@ -215,7 +215,7 @@ static void iovmm_free_block(struct tegra_iovmm_domain *domain,
struct tegra_iovmm_block *b;
parent = *p;
b = rb_entry(parent, struct tegra_iovmm_block, free_node);
- if (iovmm_length(block) >= iovmm_length(b))
+ if (block->length >= b->length)
p = &parent->rb_right;
else
p = &parent->rb_left;
@@ -230,7 +230,8 @@ static void iovmm_free_block(struct tegra_iovmm_domain *domain,
* block will be created and inserted into the free list in its place.
* since all free blocks are stored in two trees the new block needs to be
* linked into both. */
-static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
+static struct tegra_iovmm_block *iovmm_split_free_block(
+ struct tegra_iovmm_domain *domain,
struct tegra_iovmm_block *block, unsigned long size)
{
struct rb_node **p;
@@ -239,20 +240,21 @@ static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
struct tegra_iovmm_block *b;
rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
- if (!rem) return;
+ if (!rem)
+ return NULL;
spin_lock(&domain->block_lock);
p = &domain->free_blocks.rb_node;
- iovmm_start(rem) = iovmm_start(block) + size;
- iovmm_length(rem) = iovmm_length(block) - size;
+ rem->start = block->start + size;
+ rem->length = block->length - size;
atomic_set(&rem->ref, 1);
- iovmm_length(block) = size;
+ block->length = size;
while (*p) {
parent = *p;
b = rb_entry(parent, struct tegra_iovmm_block, free_node);
- if (iovmm_length(rem) >= iovmm_length(b))
+ if (rem->length >= b->length)
p = &parent->rb_right;
else
p = &parent->rb_left;
@@ -266,24 +268,32 @@ static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
while (*p) {
parent = *p;
b = rb_entry(parent, struct tegra_iovmm_block, all_node);
- if (iovmm_start(rem) >= iovmm_start(b))
+ if (rem->start >= b->start)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&rem->all_node, parent, p);
rb_insert_color(&rem->all_node, &domain->all_blocks);
+
+ return rem;
}
static struct tegra_iovmm_block *iovmm_alloc_block(
- struct tegra_iovmm_domain *domain, unsigned long size)
+ struct tegra_iovmm_domain *domain, size_t size, size_t align)
{
+#define SIMALIGN(b, a) (((b)->start%(a)) ? ((a)-((b)->start%(a))) : 0)
+#define NO_SPLIT(m) ((m) < MIN_SPLIT_BYTES(domain))
+#define DO_SPLIT(m) ((m) >= MIN_SPLIT_BYTES(domain))
+
struct rb_node *n;
struct tegra_iovmm_block *b, *best;
+ size_t simalign;
static int splitting = 0;
BUG_ON(!size);
size = iovmm_align_up(domain->dev, size);
+ align = iovmm_align_up(domain->dev, align);
for (;;) {
spin_lock(&domain->block_lock);
if (!splitting)
@@ -294,31 +304,60 @@ static struct tegra_iovmm_block *iovmm_alloc_block(
n = domain->free_blocks.rb_node;
best = NULL;
while (n) {
+ tegra_iovmm_addr_t aligned_start, block_ceil;
+
b = rb_entry(n, struct tegra_iovmm_block, free_node);
- if (iovmm_length(b) < size) {
- n = n->rb_right;
- } else if (iovmm_length(b) == size) {
- best = b;
- break;
- } else {
+ simalign = SIMALIGN(b, align);
+ aligned_start = b->start + simalign;
+ block_ceil = b->start + b->length;
+
+ if (block_ceil >= aligned_start + size) {
+ /* Block has enough size */
best = b;
+ if (NO_SPLIT(simalign) &&
+ NO_SPLIT(block_ceil - (aligned_start + size)))
+ break;
n = n->rb_left;
+ } else {
+ n = n->rb_right;
}
}
if (!best) {
spin_unlock(&domain->block_lock);
return NULL;
}
+
+ simalign = SIMALIGN(best, align);
+ if (DO_SPLIT(simalign)) {
+ splitting = 1;
+ spin_unlock(&domain->block_lock);
+
+ /* Split off misalignment */
+ b = best;
+ best = iovmm_split_free_block(domain, b, simalign);
+ if (best)
+ simalign = 0;
+ else
+ best = b;
+ }
+
+ /* Unfree designed block */
rb_erase(&best->free_node, &domain->free_blocks);
clear_bit(BK_free, &best->flags);
atomic_inc(&best->ref);
- if (iovmm_length(best) >= size+MIN_SPLIT_BYTES(domain)) {
+
+ iovmm_start(best) = best->start + simalign;
+ iovmm_length(best) = size;
+
+ if (DO_SPLIT((best->start + best->length) - iovmm_end(best))) {
splitting = 1;
spin_unlock(&domain->block_lock);
- iovmm_split_free_block(domain, best, size);
- splitting = 0;
+
+ /* Split off excess */
+ (void)iovmm_split_free_block(domain, best, size + simalign);
}
+ splitting = 0;
spin_unlock(&domain->block_lock);
return best;
@@ -340,8 +379,8 @@ int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
spin_lock_init(&domain->block_lock);
init_rwsem(&domain->map_lock);
init_waitqueue_head(&domain->delay_lock);
- iovmm_start(b) = iovmm_align_up(dev, start);
- iovmm_length(b) = iovmm_align_down(dev, end) - iovmm_start(b);
+ b->start = iovmm_align_up(dev, start);
+ b->length = iovmm_align_down(dev, end) - b->start;
set_bit(BK_free, &b->flags);
rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
rb_insert_color(&b->free_node, &domain->free_blocks);
@@ -352,7 +391,7 @@ int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
struct tegra_iovmm_area *tegra_iovmm_create_vm(
struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
- unsigned long size, pgprot_t pgprot)
+ size_t size, size_t align, pgprot_t pgprot)
{
struct tegra_iovmm_block *b;
struct tegra_iovmm_domain *domain;
@@ -361,7 +400,7 @@ struct tegra_iovmm_area *tegra_iovmm_create_vm(
domain = client->domain;
- b = iovmm_alloc_block(domain, size);
+ b = iovmm_alloc_block(domain, size, align);
if (!b) return NULL;
b->vm_area.domain = domain;
@@ -477,8 +516,9 @@ struct tegra_iovmm_area *tegra_iovmm_find_area_get(
while (n) {
b = rb_entry(n, struct tegra_iovmm_block, all_node);
- if ((iovmm_start(b) <= addr) && (iovmm_end(b) >= addr)) {
- if (test_bit(BK_free, &b->flags)) b = NULL;
+ if (iovmm_start(b) <= addr && addr <= iovmm_end(b)) {
+ if (test_bit(BK_free, &b->flags))
+ b = NULL;
break;
}
if (addr > iovmm_start(b))
@@ -487,9 +527,11 @@ struct tegra_iovmm_area *tegra_iovmm_find_area_get(
n = n->rb_left;
b = NULL;
}
- if (b) atomic_inc(&b->ref);
+ if (b)
+ atomic_inc(&b->ref);
spin_unlock(&client->domain->block_lock);
- if (!b) return NULL;
+ if (!b)
+ return NULL;
return &b->vm_area;
}
@@ -604,7 +646,7 @@ size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
while (n) {
b = rb_entry(n, struct tegra_iovmm_block, all_node);
n = rb_next(n);
- size += iovmm_length(b);
+ size += b->length;
}
spin_unlock(&domain->block_lock);
@@ -727,7 +769,7 @@ int tegra_iovmm_register(struct tegra_iovmm_device *dev)
}
list_add_tail(&dev->list, &iovmm_devices);
mutex_unlock(&iovmm_list_lock);
- printk("%s: added %s\n", __func__, dev->name);
+ pr_info("%s: added %s\n", __func__, dev->name);
return 0;
}
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
index dd2ac9dc67c0..b6f4a7d2f894 100644
--- a/drivers/video/tegra/Kconfig
+++ b/drivers/video/tegra/Kconfig
@@ -109,5 +109,14 @@ config TEGRA_DSI
default n
help
Say Y here to enable the DSI panel.
+
+config NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+ bool "Convert carveout to IOVMM"
+ depends on TEGRA_NVMAP && TEGRA_IOVMM
+ default y
+ help
+ Say Y here to force to convert carveout memory requests to
+ I/O virtual memory requests.
+
endif
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
index 923ff8fc8d8a..713e6f4e2926 100644
--- a/drivers/video/tegra/nvmap/nvmap.h
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -3,7 +3,7 @@
*
* GPU memory management driver for Tegra
*
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -71,6 +71,7 @@ struct nvmap_handle {
unsigned long flags;
size_t size; /* padded (as-allocated) size */
size_t orig_size; /* original (as-requested) size */
+ size_t align;
struct nvmap_client *owner;
struct nvmap_device *dev;
union {
@@ -153,10 +154,8 @@ void nvmap_usecount_inc(struct nvmap_handle *h);
void nvmap_usecount_dec(struct nvmap_handle *h);
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
- size_t len, size_t align,
- unsigned long usage,
- unsigned int prot,
- struct nvmap_handle *handle);
+ struct nvmap_handle *handle,
+ unsigned long type);
unsigned long nvmap_carveout_usage(struct nvmap_client *c,
struct nvmap_heap_block *b);
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
index c15cc1b21901..aeba57c85482 100644
--- a/drivers/video/tegra/nvmap/nvmap_dev.c
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -437,11 +437,10 @@ out:
return wait;
}
+static
struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
- size_t len, size_t align,
- unsigned long usage,
- unsigned int prot,
- struct nvmap_handle *handle)
+ struct nvmap_handle *handle,
+ unsigned long type)
{
struct nvmap_carveout_node *co_heap;
struct nvmap_device *dev = client->dev;
@@ -451,14 +450,12 @@ struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
struct nvmap_heap_block *block;
co_heap = &dev->heaps[i];
- if (!(co_heap->heap_bit & usage))
+ if (!(co_heap->heap_bit & type))
continue;
- block = nvmap_heap_alloc(co_heap->carveout, len,
- align, prot, handle);
- if (block) {
+ block = nvmap_heap_alloc(co_heap->carveout, handle);
+ if (block)
return block;
- }
}
return NULL;
}
@@ -470,10 +467,8 @@ static bool nvmap_carveout_freed(int count)
}
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
- size_t len, size_t align,
- unsigned long usage,
- unsigned int prot,
- struct nvmap_handle *handle)
+ struct nvmap_handle *handle,
+ unsigned long type)
{
struct nvmap_heap_block *block;
struct nvmap_carveout_node *co_heap;
@@ -484,8 +479,7 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
int count = 0;
do {
- block = do_nvmap_carveout_alloc(client, len, align, usage,
- prot, handle);
+ block = do_nvmap_carveout_alloc(client, handle, type);
if (!carveout_killer)
return block;
@@ -500,11 +494,11 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
task_comm[0] = 0;
pr_info("%s: failed to allocate %u bytes for "
"process %s, firing carveout "
- "killer!\n", __func__, len, task_comm);
+ "killer!\n", __func__, handle->size, task_comm);
} else {
pr_info("%s: still can't allocate %u bytes, "
- "attempt %d!\n", __func__, len, count);
+ "attempt %d!\n", __func__, handle->size, count);
}
/* shrink carveouts that matter and try again */
@@ -512,7 +506,7 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
int count;
co_heap = &dev->heaps[i];
- if (!(co_heap->heap_bit & usage))
+ if (!(co_heap->heap_bit & type))
continue;
count = wait_count;
@@ -1177,6 +1171,8 @@ static int nvmap_probe(struct platform_device *pdev)
for (i = 0; i < plat->nr_carveouts; i++) {
struct nvmap_carveout_node *node = &dev->heaps[i];
const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+ if (!co->size)
+ continue;
node->carveout = nvmap_heap_create(dev->dev_user.this_device,
co->name, co->base, co->size,
co->buddy_size, node);
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 6e6b6d47d28c..3b1c2ee06b10 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -1,5 +1,5 @@
/*
- * drivers/video/tegra/nvmap_handle.c
+ * drivers/video/tegra/nvmap/nvmap_handle.c
*
* Handle allocation and freeing routines for nvmap
*
@@ -39,6 +39,13 @@
#include "nvmap_mru.h"
#include "nvmap_common.h"
+#define PRINT_CARVEOUT_CONVERSION 0
+#if PRINT_CARVEOUT_CONVERSION
+#define PR_INFO pr_info
+#else
+#define PR_INFO(...)
+#endif
+
#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -184,7 +191,7 @@ static int handle_page_alloc(struct nvmap_client *client,
#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
- NULL, size, prot);
+ NULL, size, h->align, prot);
if (!h->pgalloc.area)
goto fail;
@@ -207,18 +214,43 @@ fail:
return -ENOMEM;
}
-static void alloc_handle(struct nvmap_client *client, size_t align,
+static void alloc_handle(struct nvmap_client *client,
struct nvmap_handle *h, unsigned int type)
{
BUG_ON(type & (type - 1));
- if (type & NVMAP_HEAP_CARVEOUT_MASK) {
- struct nvmap_heap_block *b;
+#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+#define __NVMAP_HEAP_CARVEOUT NVMAP_HEAP_CARVEOUT_IRAM
+#define __NVMAP_HEAP_IOVMM (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC)
+ if (type & NVMAP_HEAP_CARVEOUT_GENERIC) {
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ if (h->size <= PAGE_SIZE) {
+ PR_INFO("###CARVEOUT CONVERTED TO SYSMEM "
+ "0x%x bytes %s(%d)###\n",
+ h->size, current->comm, current->pid);
+ goto sysheap;
+ }
+#endif
+ PR_INFO("###CARVEOUT CONVERTED TO IOVM "
+ "0x%x bytes %s(%d)###\n",
+ h->size, current->comm, current->pid);
+ }
+#else
+#define __NVMAP_HEAP_CARVEOUT NVMAP_HEAP_CARVEOUT_MASK
+#define __NVMAP_HEAP_IOVMM NVMAP_HEAP_IOVMM
+#endif
+
+ if (type & __NVMAP_HEAP_CARVEOUT) {
+ struct nvmap_heap_block *b;
+#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+ PR_INFO("###IRAM REQUEST RETAINED "
+ "0x%x bytes %s(%d)###\n",
+ h->size, current->comm, current->pid);
+#endif
/* Protect handle from relocation */
nvmap_usecount_inc(h);
- b = nvmap_carveout_alloc(client, h->size, align,
- type, h->flags, h);
+ b = nvmap_carveout_alloc(client, h, type);
if (b) {
h->heap_pgalloc = false;
h->alloc = true;
@@ -228,13 +260,11 @@ static void alloc_handle(struct nvmap_client *client, size_t align,
}
nvmap_usecount_dec(h);
- } else if (type & NVMAP_HEAP_IOVMM) {
+ } else if (type & __NVMAP_HEAP_IOVMM) {
size_t reserved = PAGE_ALIGN(h->size);
int commit;
int ret;
- BUG_ON(align > PAGE_SIZE);
-
/* increment the committed IOVM space prior to allocation
* to avoid race conditions with other threads simultaneously
* allocating. */
@@ -253,7 +283,10 @@ static void alloc_handle(struct nvmap_client *client, size_t align,
}
} else if (type & NVMAP_HEAP_SYSMEM) {
-
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \
+ defined(CONFIG_NVMAP_ALLOW_SYSMEM)
+sysheap:
+#endif
if (handle_page_alloc(client, h, true) == 0) {
BUG_ON(!h->pgalloc.contig);
h->heap_pgalloc = true;
@@ -295,12 +328,6 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
int nr_page;
int err = -ENOMEM;
- align = max_t(size_t, align, L1_CACHE_BYTES);
-
- /* can't do greater than page size alignment with page alloc */
- if (align > PAGE_SIZE)
- heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
-
h = nvmap_get_handle_id(client, id);
if (!h)
@@ -312,7 +339,14 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
h->secure = !!(flags & NVMAP_HANDLE_SECURE);
h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
-
+ h->align = max_t(size_t, align, L1_CACHE_BYTES);
+#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+ /* This resriction is deprecated as alignments greater than
+ PAGE_SIZE are now correctly handled, but it is retained for
+ AP20 compatibility. */
+ if (h->align > PAGE_SIZE)
+ heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
+#endif
/* secure allocations can only be served from secure heaps */
if (h->secure)
heap_mask &= NVMAP_SECURE_HEAPS;
@@ -341,7 +375,7 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
/* iterate possible heaps MSB-to-LSB, since higher-
* priority carveouts will have higher usage masks */
heap = 1 << __fls(heap_type);
- alloc_handle(client, align, h, heap);
+ alloc_handle(client, h, heap);
heap_type &= ~heap;
}
}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c
index b90a87671f98..1378a5cb1f79 100644
--- a/drivers/video/tegra/nvmap/nvmap_heap.c
+++ b/drivers/video/tegra/nvmap/nvmap_heap.c
@@ -844,11 +844,13 @@ void nvmap_usecount_dec(struct nvmap_handle *h)
/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
* align bytes. */
-struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, size_t len,
- size_t align, unsigned int prot,
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h,
struct nvmap_handle *handle)
{
struct nvmap_heap_block *b;
+ size_t len = handle->size;
+ size_t align = handle->align;
+ unsigned int prot = handle->flags;
mutex_lock(&h->lock);
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h
index a8884be218d1..2533c13049c8 100644
--- a/drivers/video/tegra/nvmap/nvmap_heap.h
+++ b/drivers/video/tegra/nvmap/nvmap_heap.h
@@ -45,8 +45,7 @@ void *nvmap_heap_device_to_arg(struct device *dev);
void *nvmap_heap_to_arg(struct nvmap_heap *heap);
-struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap, size_t len,
- size_t align, unsigned int prot,
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap,
struct nvmap_handle *handle);
struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c
index 252665427568..d525fbed74d2 100644
--- a/drivers/video/tegra/nvmap/nvmap_mru.c
+++ b/drivers/video/tegra/nvmap/nvmap_mru.c
@@ -1,9 +1,9 @@
/*
- * drivers/video/tegra/nvmap_mru.c
+ * drivers/video/tegra/nvmap/nvmap_mru.c
*
* IOVMM virtualization support for nvmap
*
- * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -115,7 +115,8 @@ struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
return h->pgalloc.area;
}
- vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);
+ vm = tegra_iovmm_create_vm(c->share->iovmm, NULL,
+ h->size, h->align, prot);
if (vm) {
INIT_LIST_HEAD(&h->pgalloc.mru_list);
@@ -159,7 +160,7 @@ struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
tegra_iovmm_free_vm(evict->pgalloc.area);
evict->pgalloc.area = NULL;
vm = tegra_iovmm_create_vm(c->share->iovmm,
- NULL, h->size, prot);
+ NULL, h->size, h->align, prot);
nvmap_mru_lock(c->share);
}
}