summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_dev.c
diff options
context:
space:
mode:
authorHiro Sugawara <hsugawara@nvidia.com>2011-03-17 13:58:13 -0700
committerVarun Colbert <vcolbert@nvidia.com>2011-05-13 19:05:53 -0700
commitb17e8de9d5641edf76e3b6046a273d2a4a261e0a (patch)
tree18e7a9261b8b6eb1692acefb739b385451c6e9f1 /drivers/video/tegra/nvmap/nvmap_dev.c
parent30582abff8f85b56bafb86d0bc05db25257bf456 (diff)
arm: tegra: nvmap: Forcing to convert CarveOut requests to IOVM
Adding a build time CONFIG option to enable forcing of conversion of non-IRAM CarveOut memory allocation requests to IOVM requests. Default is "y" to force the conversion. Each forced conversion is reported to console. Allocation alignments larger than page size for IOVM are enabled. Single page CarveOut allocations are converted to system memory. CarveOut memory reservation has been removed for aruba, cardhu, and enterprise. Change-Id: I3a598431d15b92ce853b3bec97be4b583d021264 Reviewed-on: http://git-master/r/29849 Reviewed-by: Varun Colbert <vcolbert@nvidia.com> Tested-by: Varun Colbert <vcolbert@nvidia.com>
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_dev.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
index c15cc1b21901..aeba57c85482 100644
--- a/drivers/video/tegra/nvmap/nvmap_dev.c
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -437,11 +437,10 @@ out:
return wait;
}
+static
struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
- size_t len, size_t align,
- unsigned long usage,
- unsigned int prot,
- struct nvmap_handle *handle)
+ struct nvmap_handle *handle,
+ unsigned long type)
{
struct nvmap_carveout_node *co_heap;
struct nvmap_device *dev = client->dev;
@@ -451,14 +450,12 @@ struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
struct nvmap_heap_block *block;
co_heap = &dev->heaps[i];
- if (!(co_heap->heap_bit & usage))
+ if (!(co_heap->heap_bit & type))
continue;
- block = nvmap_heap_alloc(co_heap->carveout, len,
- align, prot, handle);
- if (block) {
+ block = nvmap_heap_alloc(co_heap->carveout, handle);
+ if (block)
return block;
- }
}
return NULL;
}
@@ -470,10 +467,8 @@ static bool nvmap_carveout_freed(int count)
}
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
- size_t len, size_t align,
- unsigned long usage,
- unsigned int prot,
- struct nvmap_handle *handle)
+ struct nvmap_handle *handle,
+ unsigned long type)
{
struct nvmap_heap_block *block;
struct nvmap_carveout_node *co_heap;
@@ -484,8 +479,7 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
int count = 0;
do {
- block = do_nvmap_carveout_alloc(client, len, align, usage,
- prot, handle);
+ block = do_nvmap_carveout_alloc(client, handle, type);
if (!carveout_killer)
return block;
@@ -500,11 +494,11 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
task_comm[0] = 0;
pr_info("%s: failed to allocate %u bytes for "
"process %s, firing carveout "
- "killer!\n", __func__, len, task_comm);
+ "killer!\n", __func__, handle->size, task_comm);
} else {
pr_info("%s: still can't allocate %u bytes, "
- "attempt %d!\n", __func__, len, count);
+ "attempt %d!\n", __func__, handle->size, count);
}
/* shrink carveouts that matter and try again */
@@ -512,7 +506,7 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
int count;
co_heap = &dev->heaps[i];
- if (!(co_heap->heap_bit & usage))
+ if (!(co_heap->heap_bit & type))
continue;
count = wait_count;
@@ -1177,6 +1171,8 @@ static int nvmap_probe(struct platform_device *pdev)
for (i = 0; i < plat->nr_carveouts; i++) {
struct nvmap_carveout_node *node = &dev->heaps[i];
const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+ if (!co->size)
+ continue;
node->carveout = nvmap_heap_create(dev->dev_user.this_device,
co->name, co->base, co->size,
co->buddy_size, node);