summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorKirill Artamonov <kartamonov@nvidia.com>2011-02-09 23:53:03 +0200
committerVarun Colbert <vcolbert@nvidia.com>2011-02-11 16:52:27 -0800
commit85d8bb90e69076e6917531ed94e44397e4c4a03a (patch)
treef22886cf1f2fd8c45be9350be8d3395b7b742888 /drivers
parent2379292fb458bed79302dc79faf8175faa249b79 (diff)
nvmap: implementing K36 carveout compactor
bug 762482 Change-Id: Ifadebc1b0c4eb0df89e179091acca0ff6e527e56 Reviewed-on: http://git-master/r/15743 Reviewed-by: Kirill Artamonov <kartamonov@nvidia.com> Tested-by: Kirill Artamonov <kartamonov@nvidia.com> Reviewed-by: Varun Colbert <vcolbert@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/video/tegra/Kconfig9
-rw-r--r--drivers/video/tegra/nvmap/nvmap.c10
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h7
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c38
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c27
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.c357
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.h4
-rwxr-xr-xdrivers/video/tegra/nvmap/nvmap_ioctl.c25
8 files changed, 417 insertions, 60 deletions
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
index c9e5f21a69c2..ef47db6ba9b9 100644
--- a/drivers/video/tegra/Kconfig
+++ b/drivers/video/tegra/Kconfig
@@ -77,6 +77,15 @@ config NVMAP_CARVEOUT_KILLER
processes. This will kill the largest consumers of lowest priority
first.
+config NVMAP_CARVEOUT_COMPACTOR
+ bool "Compact carveout when it gets fragmented"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ When carveout allocation attempt fails, compactor defragements
+ heap and retries the failed allocation.
+ Say Y here to let nvmap to keep carveout fragmentation under control.
+
config NVMAP_SEARCH_GLOBAL_HANDLES
bool "Check global handle list when generating memory IDs"
depends on TEGRA_NVMAP
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c
index 262f1b9b357d..db4358f1fe55 100644
--- a/drivers/video/tegra/nvmap/nvmap.c
+++ b/drivers/video/tegra/nvmap/nvmap.c
@@ -578,8 +578,9 @@ unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id)
h = nvmap_get_handle_id(c, id);
if (!h)
return -EPERM;
-
+ mutex_lock(&h->lock);
phys = handle_phys(h);
+ mutex_unlock(&h->lock);
nvmap_handle_put(h);
return phys;
@@ -628,12 +629,16 @@ void *nvmap_mmap(struct nvmap_handle_ref *ref)
-1, prot);
/* carveout - explicitly map the pfns into a vmalloc area */
+
+ nvmap_usecount_inc(h);
+
adj_size = h->carveout->base & ~PAGE_MASK;
adj_size += h->size;
adj_size = PAGE_ALIGN(adj_size);
v = alloc_vm_area(adj_size);
if (!v) {
+ nvmap_usecount_dec(h);
nvmap_handle_put(h);
return NULL;
}
@@ -665,6 +670,7 @@ void *nvmap_mmap(struct nvmap_handle_ref *ref)
if (offs != adj_size) {
free_vm_area(v);
+ nvmap_usecount_dec(h);
nvmap_handle_put(h);
return NULL;
}
@@ -691,8 +697,8 @@ void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
addr -= (h->carveout->base & ~PAGE_MASK);
vm = remove_vm_area(addr);
BUG_ON(!vm);
+ nvmap_usecount_dec(h);
}
-
nvmap_handle_put(h);
}
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
index 9bb7da77a501..54d9fc664591 100644
--- a/drivers/video/tegra/nvmap/nvmap.h
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -67,6 +67,7 @@ struct nvmap_handle {
struct rb_node node; /* entry on global handle tree */
atomic_t ref; /* reference count (i.e., # of duplications) */
atomic_t pin; /* pin count */
+ unsigned int usecount; /* how often is used */
unsigned long flags;
size_t size; /* padded (as-allocated) size */
size_t orig_size; /* original (as-requested) size */
@@ -148,10 +149,14 @@ pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
+void nvmap_usecount_inc(struct nvmap_handle *h);
+void nvmap_usecount_dec(struct nvmap_handle *h);
+
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
size_t len, size_t align,
unsigned long usage,
- unsigned int prot);
+ unsigned int prot,
+ struct nvmap_handle *handle);
unsigned long nvmap_carveout_usage(struct nvmap_client *c,
struct nvmap_heap_block *b);
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
index 2cea073499b7..3d0e69b83b51 100644
--- a/drivers/video/tegra/nvmap/nvmap_dev.c
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -402,9 +402,10 @@ out:
}
struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
- size_t len, size_t align,
- unsigned long usage,
- unsigned int prot)
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot,
+ struct nvmap_handle *handle)
{
struct nvmap_carveout_node *co_heap;
struct nvmap_device *dev = client->dev;
@@ -417,16 +418,17 @@ struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
if (!(co_heap->heap_bit & usage))
continue;
- block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
+ block = nvmap_heap_alloc(co_heap->carveout, len,
+ align, prot, handle);
if (block) {
/* flush any stale data that may be left in the
* cache at the block's address, since the new
* block may be mapped uncached */
if (nvmap_flush_heap_block(client, block, len)) {
nvmap_heap_free(block);
- return NULL;
- } else
- return block;
+ block = NULL;
+ }
+ return block;
}
}
return NULL;
@@ -441,7 +443,8 @@ static bool nvmap_carveout_freed(int count)
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
size_t len, size_t align,
unsigned long usage,
- unsigned int prot)
+ unsigned int prot,
+ struct nvmap_handle *handle)
{
struct nvmap_heap_block *block;
struct nvmap_carveout_node *co_heap;
@@ -452,8 +455,8 @@ struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
int count = 0;
do {
- block = do_nvmap_carveout_alloc(client, len, align,
- usage, prot);
+ block = do_nvmap_carveout_alloc(client, len, align, usage,
+ prot, handle);
if (!carveout_killer)
return block;
@@ -859,12 +862,17 @@ static void nvmap_vma_close(struct vm_area_struct *vma)
{
struct nvmap_vma_priv *priv = vma->vm_private_data;
- if (priv && !atomic_dec_return(&priv->count)) {
- if (priv->handle)
- nvmap_handle_put(priv->handle);
- kfree(priv);
+ if (priv) {
+ if (priv->handle) {
+ nvmap_usecount_dec(priv->handle);
+ BUG_ON(priv->handle->usecount < 0);
+ }
+ if (!atomic_dec_return(&priv->count)) {
+ if (priv->handle)
+ nvmap_handle_put(priv->handle);
+ kfree(priv);
+ }
}
-
vma->vm_private_data = NULL;
}
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 44f55b3f59ba..f01149dfabe4 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -81,6 +81,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
goto out;
if (!h->heap_pgalloc) {
+ nvmap_usecount_inc(h);
nvmap_heap_free(h->carveout);
goto out;
}
@@ -199,19 +200,23 @@ static void alloc_handle(struct nvmap_client *client, size_t align,
struct nvmap_handle *h, unsigned int type)
{
BUG_ON(type & (type - 1));
-
if (type & NVMAP_HEAP_CARVEOUT_MASK) {
struct nvmap_heap_block *b;
+
+ /* Protect handle from relocation */
+ nvmap_usecount_inc(h);
+
b = nvmap_carveout_alloc(client, h->size, align,
- type, h->flags);
+ type, h->flags, h);
if (b) {
- h->carveout = b;
h->heap_pgalloc = false;
h->alloc = true;
nvmap_carveout_commit_add(client,
nvmap_heap_to_arg(nvmap_block_to_heap(b)),
h->size);
}
+ nvmap_usecount_dec(h);
+
} else if (type & NVMAP_HEAP_IOVMM) {
size_t reserved = PAGE_ALIGN(h->size);
int commit;
@@ -365,10 +370,13 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
atomic_sub(h->size, &client->iovm_commit);
- if (h->alloc && !h->heap_pgalloc)
+ if (h->alloc && !h->heap_pgalloc) {
+ mutex_lock(&h->lock);
nvmap_carveout_commit_subtract(client,
- nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
- h->size);
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+ mutex_unlock(&h->lock);
+ }
nvmap_ref_unlock(client);
@@ -379,8 +387,10 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
while (pins--)
nvmap_unpin_handles(client, &ref->handle, 1);
+ mutex_lock(&h->lock);
if (h->owner == client)
h->owner = NULL;
+ mutex_unlock(&h->lock);
kfree(ref);
@@ -505,10 +515,13 @@ struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
return ERR_PTR(-ENOMEM);
}
- if (!h->heap_pgalloc)
+ if (!h->heap_pgalloc) {
+ mutex_lock(&h->lock);
nvmap_carveout_commit_add(client,
nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
h->size);
+ mutex_unlock(&h->lock);
+ }
atomic_set(&ref->dupes, 1);
ref->handle = h;
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c
index abc72cc99720..c8355684f1f9 100644
--- a/drivers/video/tegra/nvmap/nvmap_heap.c
+++ b/drivers/video/tegra/nvmap/nvmap_heap.c
@@ -26,11 +26,15 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/err.h>
#include <mach/nvmap.h>
-
+#include "nvmap.h"
#include "nvmap_heap.h"
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
/*
* "carveouts" are platform-defined regions of physically contiguous memory
* which are not managed by the OS. a platform may specify multiple carveouts,
@@ -53,7 +57,7 @@
* TOP_DOWN in the code below). like "normal" allocations, each allocation
* is rounded up to be an integer multiple of the "small" allocation size.
*
- * o "small" allocations are treatedy differently: the heap manager maintains
+ * o "small" allocations are treated differently: the heap manager maintains
* a pool of "small"-sized blocks internally from which allocations less
* than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
* is requested and none of the buddy sub-heaps is able to service it,
@@ -75,6 +79,7 @@ enum direction {
enum block_type {
BLOCK_FIRST_FIT, /* block was allocated directly from the heap */
BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */
+ BLOCK_EMPTY,
};
struct heap_stat {
@@ -84,6 +89,10 @@ struct heap_stat {
size_t total; /* total size */
size_t largest; /* largest unique block */
size_t count; /* total number of blocks */
+ /* fast compaction attempt counter */
+ unsigned int compaction_count_fast;
+ /* full compaction attempt counter */
+ unsigned int compaction_count_full;
};
struct buddy_heap;
@@ -99,6 +108,7 @@ struct list_block {
unsigned int mem_prot;
unsigned long orig_addr;
size_t size;
+ size_t align;
struct nvmap_heap *heap;
struct list_head free_list;
};
@@ -289,7 +299,7 @@ static ssize_t heap_stat_show(struct device *dev,
else
return -EINVAL;
}
-
+#ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR
static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
size_t size, size_t align,
unsigned int mem_prot)
@@ -342,6 +352,7 @@ static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
b->block.type = BLOCK_BUDDY;
return &b->block;
}
+#endif
static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
{
@@ -371,9 +382,15 @@ static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
return NULL;
}
+
+/*
+ * base_max limits position of allocated chunk in memory.
+ * if base_max is 0 then there is no such limitation.
+ */
static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
size_t len, size_t align,
- unsigned int mem_prot)
+ unsigned int mem_prot,
+ unsigned long base_max)
{
struct list_block *b = NULL;
struct list_block *i = NULL;
@@ -392,7 +409,11 @@ static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
len = PAGE_ALIGN(len);
}
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+ dir = BOTTOM_UP;
+#else
dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
+#endif
if (dir == BOTTOM_UP) {
list_for_each_entry(i, &heap->free_list, free_list) {
@@ -400,6 +421,12 @@ static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
fix_base = ALIGN(i->block.base, align);
fix_size = i->size - (fix_base - i->block.base);
+ /* needed for compaction. relocated chunk
+ * should never go up */
+ if (base_max && fix_base > base_max) {
+ break;
+ }
+
if (fix_size >= len) {
b = i;
break;
@@ -421,7 +448,12 @@ static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
if (!b)
return NULL;
+ if (dir == BOTTOM_UP)
+ b->block.type = BLOCK_FIRST_FIT;
+
+ /* split free block */
if (b->block.base != fix_base) {
+ /* insert a new free block before allocated */
rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
if (!rem) {
b->orig_addr = b->block.base;
@@ -430,31 +462,32 @@ static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
goto out;
}
- rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.type = BLOCK_EMPTY;
rem->block.base = b->block.base;
rem->orig_addr = rem->block.base;
rem->size = fix_base - rem->block.base;
b->block.base = fix_base;
b->orig_addr = fix_base;
b->size -= rem->size;
- list_add_tail(&rem->all_list, &heap->all_list);
+ list_add_tail(&rem->all_list, &b->all_list);
list_add_tail(&rem->free_list, &b->free_list);
}
b->orig_addr = b->block.base;
if (b->size > len) {
+ /* insert a new free block after allocated */
rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
if (!rem)
goto out;
- rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.type = BLOCK_EMPTY;
rem->block.base = b->block.base + len;
rem->size = b->size - len;
BUG_ON(rem->size > b->size);
rem->orig_addr = rem->block.base;
b->size = len;
- list_add_tail(&rem->all_list, &heap->all_list);
+ list_add(&rem->all_list, &b->all_list);
list_add(&rem->free_list, &b->free_list);
}
@@ -462,6 +495,7 @@ out:
list_del(&b->free_list);
b->heap = heap;
b->mem_prot = mem_prot;
+ b->align = align;
return &b->block;
}
@@ -485,7 +519,7 @@ static void freelist_debug(struct nvmap_heap *heap, const char *title,
#define freelist_debug(_heap, _title, _token) do { } while (0)
#endif
-static void do_heap_free(struct nvmap_heap_block *block)
+static struct list_block *do_heap_free(struct nvmap_heap_block *block)
{
struct list_block *b = container_of(block, struct list_block, block);
struct list_block *n = NULL;
@@ -497,16 +531,20 @@ static void do_heap_free(struct nvmap_heap_block *block)
freelist_debug(heap, "free list before", b);
+ /* Find position of first free block to the right of freed one */
list_for_each_entry(n, &heap->free_list, free_list) {
if (n->block.base > b->block.base)
break;
}
+ /* Add freed block before found free one */
list_add_tail(&b->free_list, &n->free_list);
BUG_ON(list_empty(&b->all_list));
freelist_debug(heap, "free list pre-merge", b);
+ /* merge freed block with next if they connect
+ * freed block becomes bigger, next one is destroyed */
if (!list_is_last(&b->free_list, &heap->free_list)) {
n = list_first_entry(&b->free_list, struct list_block, free_list);
if (n->block.base == b->block.base + b->size) {
@@ -518,6 +556,8 @@ static void do_heap_free(struct nvmap_heap_block *block)
}
}
+ /* merge freed block with prev if they connect
+ * previous free block becomes bigger, freed one is destroyed */
if (b->free_list.prev != &heap->free_list) {
n = list_entry(b->free_list.prev, struct list_block, free_list);
if (n->block.base + n->size == b->block.base) {
@@ -526,12 +566,17 @@ static void do_heap_free(struct nvmap_heap_block *block)
BUG_ON(n->orig_addr >= b->orig_addr);
n->size += b->size;
kmem_cache_free(block_cache, b);
+ b = n;
}
}
freelist_debug(heap, "free list after", b);
+ b->block.type = BLOCK_EMPTY;
+ return b;
}
+#ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+
static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
size_t len, size_t align,
unsigned int mem_prot)
@@ -551,7 +596,8 @@ static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
if (!bh)
return NULL;
- b = do_heap_alloc(h, h->buddy_heap_size, h->buddy_heap_size, mem_prot);
+ b = do_heap_alloc(h, h->buddy_heap_size,
+ h->buddy_heap_size, mem_prot, 0);
if (!b) {
kmem_cache_free(buddy_heap_cache, bh);
return NULL;
@@ -565,41 +611,300 @@ static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
return buddy_alloc(bh, len, align, mem_prot);
}
+#endif
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+
+static int do_heap_copy_listblock(struct nvmap_device *dev,
+ unsigned long dst_base, unsigned long src_base, size_t len)
+{
+ pte_t **pte_src = NULL;
+ pte_t **pte_dst = NULL;
+ void *addr_src = NULL;
+ void *addr_dst = NULL;
+ unsigned long kaddr_src;
+ unsigned long kaddr_dst;
+ unsigned long phys_src = src_base;
+ unsigned long phys_dst = dst_base;
+ unsigned long pfn_src;
+ unsigned long pfn_dst;
+ int error = 0;
+
+ pgprot_t prot = pgprot_writecombine(pgprot_kernel);
+
+ int page;
+
+ pte_src = nvmap_alloc_pte(dev, &addr_src);
+ if (IS_ERR(pte_src)) {
+ pr_err("Error when allocating pte_src\n");
+ pte_src = NULL;
+ error = -1;
+ goto fail;
+ }
+
+ pte_dst = nvmap_alloc_pte(dev, &addr_dst);
+ if (IS_ERR(pte_dst)) {
+ pr_err("Error while allocating pte_dst\n");
+ pte_dst = NULL;
+ error = -1;
+ goto fail;
+ }
+
+ kaddr_src = (unsigned long)addr_src;
+ kaddr_dst = (unsigned long)addr_dst;
+
+ BUG_ON(phys_dst > phys_src);
+ BUG_ON((phys_src & PAGE_MASK) != phys_src);
+ BUG_ON((phys_dst & PAGE_MASK) != phys_dst);
+ BUG_ON((len & PAGE_MASK) != len);
+
+ for (page = 0; page < (len >> PAGE_SHIFT) ; page++) {
+
+ pfn_src = __phys_to_pfn(phys_src) + page;
+ pfn_dst = __phys_to_pfn(phys_dst) + page;
+
+ set_pte_at(&init_mm, kaddr_src, *pte_src,
+ pfn_pte(pfn_src, prot));
+ flush_tlb_kernel_page(kaddr_src);
+
+ set_pte_at(&init_mm, kaddr_dst, *pte_dst,
+ pfn_pte(pfn_dst, prot));
+ flush_tlb_kernel_page(kaddr_dst);
+
+ memcpy(addr_dst, addr_src, PAGE_SIZE);
+ }
+
+fail:
+ if (pte_src)
+ nvmap_free_pte(dev, pte_src);
+ if (pte_dst)
+ nvmap_free_pte(dev, pte_dst);
+ return error;
+}
+
+
+static struct nvmap_heap_block *do_heap_relocate_listblock(
+ struct list_block *block, bool fast)
+{
+ struct nvmap_heap_block *heap_block = &block->block;
+ struct nvmap_heap_block *heap_block_new = NULL;
+ struct nvmap_heap *heap = block->heap;
+ struct nvmap_handle *handle = heap_block->handle;
+ unsigned long src_base = heap_block->base;
+ unsigned long dst_base;
+ size_t src_size = block->size;
+ size_t src_align = block->align;
+ unsigned int src_prot = block->mem_prot;
+ int error = 0;
+
+ if (!handle) {
+ pr_err("INVALID HANDLE!\n");
+ return NULL;
+ }
+
+ mutex_lock(&handle->lock);
+
+ if (!handle->owner) {
+ mutex_unlock(&handle->lock);
+ return NULL;
+ }
+
+ /* TODO: It is possible to use only handle lock and no share
+ * pin_lock, but then we'll need to lock every handle during
+ * each pinning operation. Need to estimate performance impact
+ * if we decide to simplify locking this way. */
+ mutex_lock(&handle->owner->share->pin_lock);
+
+ /* abort if block is pinned */
+ if (atomic_read(&handle->pin))
+ goto fail;
+ /* abort if block is mapped */
+ if (handle->usecount)
+ goto fail;
+
+ if (fast) {
+ /* Fast compaction path - first allocate, then free. */
+ heap_block_new = do_heap_alloc(heap, src_size, src_align,
+ src_prot, src_base);
+ if (heap_block_new)
+ do_heap_free(heap_block);
+ else
+ goto fail;
+ } else {
+ /* Full compaction path, first free, then allocate
+ * It is slower but provide best compaction results */
+ do_heap_free(heap_block);
+ heap_block_new = do_heap_alloc(heap, src_size, src_align,
+ src_prot, src_base);
+ /* Allocation should always succeed*/
+ BUG_ON(!heap_block_new);
+ }
+
+ /* update handle */
+ handle->carveout = heap_block_new;
+ heap_block_new->handle = handle;
+
+ /* copy source data to new block location */
+ dst_base = heap_block_new->base;
+
+ /* new allocation should always go lower addresses */
+ BUG_ON(dst_base >= src_base);
+
+ error = do_heap_copy_listblock(handle->dev,
+ dst_base, src_base, src_size);
+ BUG_ON(error);
+
+fail:
+ mutex_unlock(&handle->owner->share->pin_lock);
+ mutex_unlock(&handle->lock);
+ return heap_block_new;
+}
+
+static void nvmap_heap_compact(struct nvmap_heap *heap,
+ size_t requested_size, bool fast)
+{
+ struct list_block *block_current = NULL;
+ struct list_block *block_prev = NULL;
+ struct list_block *block_next = NULL;
+
+ struct list_head *ptr, *ptr_prev, *ptr_next;
+ int relocation_count = 0;
+
+ ptr = heap->all_list.next;
+
+ /* walk through all blocks */
+ while (ptr != &heap->all_list) {
+ block_current = list_entry(ptr, struct list_block, all_list);
+
+ ptr_prev = ptr->prev;
+ ptr_next = ptr->next;
+
+ if (block_current->block.type != BLOCK_EMPTY) {
+ ptr = ptr_next;
+ continue;
+ }
+
+ if (fast && block_current->size >= requested_size)
+ break;
+
+ /* relocate prev block */
+ if (ptr_prev != &heap->all_list) {
+
+ block_prev = list_entry(ptr_prev,
+ struct list_block, all_list);
+
+ BUG_ON(block_prev->block.type != BLOCK_FIRST_FIT);
+
+ if (do_heap_relocate_listblock(block_prev, true)) {
+
+ /* After relocation current free block can be
+ * destroyed when it is merged with previous
+ * free block. Updated pointer to new free
+ * block can be obtained from the next block */
+ relocation_count++;
+ ptr = ptr_next->prev;
+ continue;
+ }
+ }
+
+ if (ptr_next != &heap->all_list) {
+
+ block_next = list_entry(ptr_next,
+ struct list_block, all_list);
+
+ BUG_ON(block_next->block.type != BLOCK_FIRST_FIT);
+
+ if (do_heap_relocate_listblock(block_next, fast)) {
+ ptr = ptr_prev->next;
+ relocation_count++;
+ continue;
+ }
+ }
+ ptr = ptr_next;
+ }
+ pr_err("Relocated %d chunks\n", relocation_count);
+}
+#endif
+
+void nvmap_usecount_inc(struct nvmap_handle *h)
+{
+ if (h->alloc && !h->heap_pgalloc) {
+ mutex_lock(&h->lock);
+ h->usecount++;
+ mutex_unlock(&h->lock);
+ } else {
+ h->usecount++;
+ }
+}
+
+
+void nvmap_usecount_dec(struct nvmap_handle *h)
+{
+ h->usecount--;
+}
+
/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
* align bytes. */
struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, size_t len,
- size_t align, unsigned int prot)
+ size_t align, unsigned int prot,
+ struct nvmap_handle *handle)
{
struct nvmap_heap_block *b;
mutex_lock(&h->lock);
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+ /* Align to page size */
+ align = ALIGN(align, PAGE_SIZE);
+ len = ALIGN(len, PAGE_SIZE);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ if (!b) {
+ pr_err("Compaction triggered!\n");
+ nvmap_heap_compact(h, len, true);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ if (!b) {
+ pr_err("Full compaction triggered!\n");
+ nvmap_heap_compact(h, len, false);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ }
+ }
+#else
if (len <= h->buddy_heap_size / 2) {
b = do_buddy_alloc(h, len, align, prot);
} else {
if (h->buddy_heap_size)
len = ALIGN(len, h->buddy_heap_size);
align = max(align, (size_t)L1_CACHE_BYTES);
- b = do_heap_alloc(h, len, align, prot);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ }
+#endif
+
+ if (b) {
+ b->handle = handle;
+ handle->carveout = b;
}
mutex_unlock(&h->lock);
return b;
}
-/* nvmap_heap_free: frees block b*/
-void nvmap_heap_free(struct nvmap_heap_block *b)
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
{
- struct buddy_heap *bh = NULL;
- struct nvmap_heap *h;
-
if (b->type == BLOCK_BUDDY) {
struct buddy_block *bb;
bb = container_of(b, struct buddy_block, block);
- h = bb->heap->heap_base->heap;
+ return parent_of(bb->heap);
} else {
struct list_block *lb;
lb = container_of(b, struct list_block, block);
- h = lb->heap;
+ return lb->heap;
}
+}
+
+/* nvmap_heap_free: frees block b*/
+void nvmap_heap_free(struct nvmap_heap_block *b)
+{
+ struct buddy_heap *bh = NULL;
+ struct nvmap_heap *h = nvmap_block_to_heap(b);
mutex_lock(&h->lock);
if (b->type == BLOCK_BUDDY)
@@ -616,18 +921,6 @@ void nvmap_heap_free(struct nvmap_heap_block *b)
mutex_unlock(&h->lock);
}
-struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
-{
- if (b->type == BLOCK_BUDDY) {
- struct buddy_block *bb;
- bb = container_of(b, struct buddy_block, block);
- return parent_of(bb->heap);
- } else {
- struct list_block *lb;
- lb = container_of(b, struct list_block, block);
- return lb->heap;
- }
-}
static void heap_release(struct device *heap)
{
@@ -712,7 +1005,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
INIT_LIST_HEAD(&h->all_list);
mutex_init(&h->lock);
l->block.base = base;
- l->block.type = BLOCK_FIRST_FIT;
+ l->block.type = BLOCK_EMPTY;
l->size = len;
l->orig_addr = base;
list_add_tail(&l->free_list, &h->free_list);
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h
index 40ee4ba02cb2..39539762175b 100644
--- a/drivers/video/tegra/nvmap/nvmap_heap.h
+++ b/drivers/video/tegra/nvmap/nvmap_heap.h
@@ -30,6 +30,7 @@ struct attribute_group;
struct nvmap_heap_block {
unsigned long base;
unsigned int type;
+ struct nvmap_handle *handle;
};
#define NVMAP_HEAP_MIN_BUDDY_SIZE 8192
@@ -45,7 +46,8 @@ void *nvmap_heap_device_to_arg(struct device *dev);
void *nvmap_heap_to_arg(struct nvmap_heap *heap);
struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap, size_t len,
- size_t align, unsigned int prot);
+ size_t align, unsigned int prot,
+ struct nvmap_handle *handle);
struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c
index 5e1392e56567..886f0ee252ad 100755
--- a/drivers/video/tegra/nvmap/nvmap_ioctl.c
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c
@@ -281,7 +281,10 @@ int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
goto out;
}
+ nvmap_usecount_inc(h);
+
if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
+ nvmap_usecount_dec(h);
err = -EFAULT;
goto out;
}
@@ -293,6 +296,7 @@ int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
out:
up_read(&current->mm->mmap_sem);
+
if (err)
nvmap_handle_put(h);
return err;
@@ -317,6 +321,7 @@ int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
op.result = h->orig_size;
break;
case NVMAP_HANDLE_PARAM_ALIGNMENT:
+ mutex_lock(&h->lock);
if (!h->alloc)
op.result = 0;
else if (h->heap_pgalloc)
@@ -325,12 +330,16 @@ int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
op.result = (h->carveout->base & -h->carveout->base);
else
op.result = SZ_4M;
+ mutex_unlock(&h->lock);
break;
case NVMAP_HANDLE_PARAM_BASE:
if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
op.result = -1ul;
- else if (!h->heap_pgalloc)
+ else if (!h->heap_pgalloc) {
+ mutex_lock(&h->lock);
op.result = h->carveout->base;
+ mutex_unlock(&h->lock);
+ }
else if (h->pgalloc.contig)
op.result = page_to_phys(h->pgalloc.pages[0]);
else if (h->pgalloc.area)
@@ -341,8 +350,11 @@ int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
case NVMAP_HANDLE_PARAM_HEAP:
if (!h->alloc)
op.result = 0;
- else if (!h->heap_pgalloc)
+ else if (!h->heap_pgalloc) {
+ mutex_lock(&h->lock);
op.result = nvmap_carveout_usage(client, h->carveout);
+ mutex_unlock(&h->lock);
+ }
else if (h->pgalloc.contig)
op.result = NVMAP_HEAP_SYSMEM;
else
@@ -379,6 +391,8 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
if (!h)
return -EPERM;
+ nvmap_usecount_inc(h);
+
copied = rw_handle(client, h, is_read, op.offset,
(unsigned long)op.addr, op.hmem_stride,
op.user_stride, op.elem_size, op.count);
@@ -391,6 +405,8 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
__put_user(copied, &uarg->count);
+ nvmap_usecount_dec(h);
+
nvmap_handle_put(h);
return err;
@@ -507,6 +523,9 @@ static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
return -EINVAL;
}
+ /* lock carveout from relocation by mapcount */
+ nvmap_usecount_inc(h);
+
start += h->carveout->base;
end += h->carveout->base;
@@ -531,6 +550,8 @@ static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
else
outer_inv_range(start, end);
}
+ /* unlock carveout */
+ nvmap_usecount_dec(h);
out:
if (pte)