diff options
author | vdumpa <vdumpa@nvidia.com> | 2010-10-29 11:37:42 -0700 |
---|---|---|
committer | Varun Colbert <vcolbert@nvidia.com> | 2010-11-24 15:18:45 -0800 |
commit | 05074ac6ce18a61467ad6d5dd5e53d56725c8d58 (patch) | |
tree | d71845194cb37ea94937ba0855cebbb440f41861 /drivers | |
parent | f2b1e18bc44cd6bf8e76d00a5f0fcb0d2b2e0995 (diff) |
video: tegra: nvmap: Clean whole L1 instead of VA cleaning
For large allocations, cleaning each page of the allocation can
take a significant amount of time. If an allocation that nvmap needs
to clean or invalidate out of the cache is significantly larger than
the cache, just flush the entire cache.
bug 711478 and bug 744221
Revert "video: tegra: nvmap: perform cache maintenance for rw_handle"
This reverts commit d963f09c3ebb2d690d266f8f607b4876acaf2ab1.
Reviewed-on: http://git-master/r/10213
(cherry picked from commit 6469f378a596f0572035cd27a17851ea86b763c9)
Change-Id: I1e514f505db860eb01d575a98f0f80c8794e8463
Reviewed-on: http://git-master/r/10497
Reviewed-by: Eric Werness <ewerness@nvidia.com>
Reviewed-by: Markus Holtmanns <mholtmanns@nvidia.com>
Tested-by: Markus Holtmanns <mholtmanns@nvidia.com>
Reviewed-by: Janne Hellsten <jhellsten@nvidia.com>
Reviewed-by: Varun Colbert <vcolbert@nvidia.com>
Tested-by: Varun Colbert <vcolbert@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/video/tegra/nvmap.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/drivers/video/tegra/nvmap.c b/drivers/video/tegra/nvmap.c index f260e1cb9573..ce0da9904afe 100644 --- a/drivers/video/tegra/nvmap.c +++ b/drivers/video/tegra/nvmap.c @@ -149,6 +149,9 @@ static struct rb_root nvmap_handles = RB_ROOT; static struct tegra_iovmm_client *nvmap_vm_client = NULL; +extern void v7_flush_kern_cache_all(void); +extern void v7_clean_kern_cache_all(void); + /* default heap order policy */ static unsigned int _nvmap_heap_policy (unsigned int heaps, int numpages) { @@ -2831,6 +2834,16 @@ static int _nvmap_do_cache_maint(struct nvmap_handle *h, outer_maint = NULL; } + if (end - start > PAGE_SIZE * 3) { + if (op == NVMEM_CACHE_OP_WB) { + v7_clean_kern_cache_all(); + inner_maint = NULL; + } else if (op == NVMEM_CACHE_OP_WB_INV) { + v7_flush_kern_cache_all(); + inner_maint = NULL; + } + } + prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel); if (h->alloc && !h->heap_pgalloc) { @@ -2839,7 +2852,7 @@ static int _nvmap_do_cache_maint(struct nvmap_handle *h, spin_unlock(&h->carveout.co_heap->lock); } - while (start < end) { + while (start < end && (inner_maint || outer_maint)) { struct page *page = NULL; unsigned long phys; void *src; @@ -2868,7 +2881,7 @@ static int _nvmap_do_cache_maint(struct nvmap_handle *h, src = addr + (phys & ~PAGE_MASK); count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK)); - inner_maint(src, src+count); + if (inner_maint) inner_maint(src, src+count); if (outer_maint) outer_maint(phys, phys+count); start += count; if (page) put_page(page); @@ -3013,19 +3026,12 @@ static ssize_t _nvmap_do_rw_handle(struct nvmap_handle *h, int is_read, } while (count--) { - size_t ret; - if (is_read) - _nvmap_do_cache_maint(h, h_offs, h_offs + elem_size, - NVMEM_CACHE_OP_INV, false); - ret = _nvmap_do_one_rw_handle(h, is_read, + size_t ret = _nvmap_do_one_rw_handle(h, is_read, is_user, h_offs, sys_addr, elem_size, &addr); if (ret < 0) { if (!bytes_copied) bytes_copied = ret; break; } - if (!is_read) - _nvmap_do_cache_maint(h, h_offs, h_offs + ret, - NVMEM_CACHE_OP_WB, false); bytes_copied += ret; if (ret < elem_size) break; sys_addr += sys_stride; |