summaryrefslogtreecommitdiff
path: root/arch/xtensa/mm/cache.c
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2014-07-21 18:54:11 +0400
committerMax Filippov <jcmvbkbc@gmail.com>2014-08-14 11:59:20 +0400
commita91902db2990909ea5e6b110811b448f2e8f1571 (patch)
tree133c118a40292c1c4480acaaa1df1add1b70eaf0 /arch/xtensa/mm/cache.c
parent7128039fe2dd3d59da9e4ffa036f3aaa3ba87b9f (diff)
xtensa: implement clear_user_highpage and copy_user_highpage
Existing clear_user_page and copy_user_page cannot be used with highmem because they calculate physical page address from its virtual address and do it incorrectly in case of high memory page mapped with kmap_atomic. Also kmap is not needed, as most likely userspace mapping color would be different from the kmapped color. Provide clear_user_highpage and copy_user_highpage functions that determine if temporary mapping is needed for the pages. Move most of the logic of the former clear_user_page and copy_user_page to xtensa/mm/cache.c only leaving temporary mapping setup, invalidation and clearing/copying in the xtensa/mm/misc.S. Rename these functions to clear_page_alias and copy_page_alias. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa/mm/cache.c')
-rw-r--r--arch/xtensa/mm/cache.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 63cbb867dadd..96aea6624318 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -63,6 +63,69 @@
#error "HIGHMEM is not supported on cores with aliasing cache."
#endif
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+static inline void kmap_invalidate_coherent(struct page *page,
+ unsigned long vaddr)
+{
+ if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ unsigned long kvaddr;
+
+ if (!PageHighMem(page)) {
+ kvaddr = (unsigned long)page_to_virt(page);
+
+ __invalidate_dcache_page(kvaddr);
+ } else {
+ kvaddr = TLBTEMP_BASE_1 +
+ (page_to_phys(page) & DCACHE_ALIAS_MASK);
+
+ __invalidate_dcache_page_alias(kvaddr,
+ page_to_phys(page));
+ }
+ }
+}
+
+static inline void *coherent_kvaddr(struct page *page, unsigned long base,
+ unsigned long vaddr, unsigned long *paddr)
+{
+ if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ *paddr = page_to_phys(page);
+ return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
+ } else {
+ *paddr = 0;
+ return page_to_virt(page);
+ }
+}
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ unsigned long paddr;
+ void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+
+ pagefault_disable();
+ kmap_invalidate_coherent(page, vaddr);
+ set_bit(PG_arch_1, &page->flags);
+ clear_page_alias(kvaddr, paddr);
+ pagefault_enable();
+}
+
+void copy_user_highpage(struct page *dst, struct page *src,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long dst_paddr, src_paddr;
+ void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
+ &dst_paddr);
+ void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ &src_paddr);
+
+ pagefault_disable();
+ kmap_invalidate_coherent(dst, vaddr);
+ set_bit(PG_arch_1, &dst->flags);
+ copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+ pagefault_enable();
+}
+
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*