summaryrefslogtreecommitdiff
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/cacheflush.h2
-rw-r--r--arch/parisc/include/asm/page.h3
-rw-r--r--arch/parisc/kernel/cache.c14
3 files changed, 16 insertions, 3 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 2f9b751878ba..de65f66ea64e 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -132,7 +132,6 @@ void mark_rodata_ro(void);
static inline void *kmap(struct page *page)
{
might_sleep();
- flush_dcache_page(page);
return page_address(page);
}
@@ -144,7 +143,6 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
pagefault_disable();
- flush_dcache_page(page);
return page_address(page);
}
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index c53fc63149e8..637fe031aa84 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -29,7 +29,8 @@ struct page;
void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from);
#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
-#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg);
/* #define CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a72545554a31..ac87a40502e6 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -388,6 +388,20 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg)
+{
+ /* Copy using kernel mapping. No coherency is needed (all in
+ kunmap) for the `to' page. However, the `from' page needs to
+ be flushed through a mapping equivalent to the user mapping
+ before it can be accessed through the kernel mapping. */
+ preempt_disable();
+ flush_dcache_page_asm(__pa(vfrom), vaddr);
+ preempt_enable();
+ copy_page_asm(vto, vfrom);
+}
+EXPORT_SYMBOL(copy_user_page);
+
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;