summaryrefslogtreecommitdiff
path: root/arch/arc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/cacheflush.h1
-rw-r--r--arch/arc/mm/cache_arc700.c9
-rw-r--r--arch/arc/mm/tlb.c18
3 files changed, 21 insertions, 7 deletions
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 65ed8d2d4597..ee1f6eae82d2 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -33,6 +33,7 @@ void flush_cache_all(void);
void flush_icache_range(unsigned long start, unsigned long end);
void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
+void __flush_dcache_page(unsigned long paddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 3a9ef63b1a97..c854cf95f706 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -457,10 +457,10 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
* Exported APIs
*/
-/* TBD: use pg_arch_1 to optimize this */
void flush_dcache_page(struct page *page)
{
- __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+ /* Make a note that dcache is not yet flushed for this page */
+ set_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);
@@ -570,6 +570,11 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
}
+void __flush_dcache_page(unsigned long paddr)
+{
+ __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV);
+}
+
void flush_icache_all(void)
{
unsigned long flags;
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 086be526072a..003d69ac6ffa 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -418,9 +418,10 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
local_irq_restore(flags);
}
-/* arch hook called by core VM at the end of handle_mm_fault( ),
- * when a new PTE is entered in Page Tables or an existing one
- * is modified. We aggresively pre-install a TLB entry
+/*
+ * Called at the end of pagefault, for a userspace mapped page
+ * -pre-install the corresponding TLB entry into MMU
+ * -Finalize the delayed D-cache flush (wback+inv kernel mapping)
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
@@ -431,8 +432,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
/* icache doesn't snoop dcache, thus needs to be made coherent here */
if (vma->vm_flags & VM_EXEC) {
- unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
- __inv_icache_page(paddr, vaddr);
+ struct page *page = pfn_to_page(pte_pfn(*ptep));
+
+ /* if page was dcache dirty, flush now */
+ int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
+ if (dirty) {
+ unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+ __flush_dcache_page(paddr);
+ __inv_icache_page(paddr, vaddr);
+ }
}
}