summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorKrishna Reddy <vdumpa@nvidia.com>2011-07-06 14:57:44 -0700
committerVarun Colbert <vcolbert@nvidia.com>2011-07-22 17:42:16 -0700
commit5e4c3d2eaaa52475c929887c574f950d514bfff1 (patch)
tree698cf830c683c984a16cdf0feee1b55ce64c5cd9 /drivers
parent780d0b2662c0f70e358a535ec4fffea4881771c6 (diff)
video: tegra: nvmap: Fix cache flush issue during page alloc.
Bug 39790 Change-Id: I5ce0e35501442ed1a6818aebfeae1670ebb9d08d Reviewed-on: http://git-master/r/39867 Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Tested-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 21f50b1c98b0..0e916132e47f 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -118,12 +118,10 @@ out:
extern void __flush_dcache_page(struct address_space *, struct page *);
-static struct page *nvmap_alloc_pages_exact(gfp_t gfp,
- size_t size, bool flush_inner)
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
{
struct page *page, *p, *e;
unsigned int order;
- unsigned long base;
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -133,19 +131,10 @@ static struct page *nvmap_alloc_pages_exact(gfp_t gfp,
return NULL;
split_page(page, order);
-
e = page + (1 << order);
for (p = page + (size >> PAGE_SHIFT); p < e; p++)
__free_page(p);
- e = page + (size >> PAGE_SHIFT);
- if (flush_inner) {
- for (p = page; p < e; p++)
- __flush_dcache_page(page_mapping(p), p);
- }
-
- base = page_to_phys(page);
- outer_flush_range(base, base + size);
return page;
}
@@ -158,6 +147,7 @@ static int handle_page_alloc(struct nvmap_client *client,
unsigned int i = 0;
struct page **pages;
bool flush_inner = true;
+ unsigned long base;
pages = altalloc(nr_page * sizeof(*pages));
if (!pages)
@@ -170,14 +160,10 @@ static int handle_page_alloc(struct nvmap_client *client,
contiguous = true;
#endif
- if (size >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) {
- inner_flush_cache_all();
- flush_inner = false;
- }
h->pgalloc.area = NULL;
if (contiguous) {
struct page *page;
- page = nvmap_alloc_pages_exact(GFP_NVMAP, size, flush_inner);
+ page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
if (!page)
goto fail;
@@ -186,8 +172,8 @@ static int handle_page_alloc(struct nvmap_client *client,
} else {
for (i = 0; i < nr_page; i++) {
- pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE,
- flush_inner);
+ pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
+ PAGE_SIZE);
if (!pages[i])
goto fail;
}
@@ -203,6 +189,17 @@ static int handle_page_alloc(struct nvmap_client *client,
#endif
}
+ /* Flush the cache for allocated pages*/
+ if (size >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) {
+ inner_flush_cache_all();
+ flush_inner = false;
+ }
+ for (i = 0; i < nr_page; i++) {
+ if (flush_inner)
+ __flush_dcache_page(page_mapping(pages[i]), pages[i]);
+ base = page_to_phys(pages[i]);
+ outer_flush_range(base, base + PAGE_SIZE);
+ }
h->size = size;
h->pgalloc.pages = pages;