summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2015-03-11 12:20:39 +0000
committerSasha Levin <sasha.levin@oracle.com>2015-03-28 09:22:42 -0400
commite9ab6db096e94d0e347ba5847022bfa1e80c2dca (patch)
tree84a01987682c7fef0a31050bd37f8ed880e9a1f0 /arch
parentbf91097cd544111aa767482cc88bd46b67700046 (diff)
arm64: Invalidate the TLB corresponding to intermediate page table levels
[ Upstream commit 285994a62c80f1d72c6924282bcb59608098d5ec ] The ARM architecture allows the caching of intermediate page table levels and page table freeing requires a sequence like: pmd_clear() TLB invalidation pte page freeing With commit 5e5f6dc10546 (arm64: mm: enable HAVE_RCU_TABLE_FREE logic), the page table freeing batching was moved from tlb_remove_page() to tlb_remove_table(). The former takes care of TLB invalidation as this is also shared with pte clearing and page cache page freeing. The latter, however, does not invalidate the TLBs for intermediate page table levels as it probably relies on the architecture code to do it if required. When the mm->mm_users < 2, tlb_remove_table() does not do any batching and page table pages are freed before tlb_finish_mmu() which performs the actual TLB invalidation. This patch introduces __tlb_flush_pgtable() for arm64 and calls it from the {pte,pmd,pud}_free_tlb() directly without relying on deferred page table freeing. Fixes: 5e5f6dc10546 arm64: mm: enable HAVE_RCU_TABLE_FREE logic Reported-by: Jon Masters <jcm@redhat.com> Tested-by: Jon Masters <jcm@redhat.com> Tested-by: Steve Capper <steve.capper@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/tlb.h3
-rw-r--r--arch/arm64/include/asm/tlbflush.h13
2 files changed, 16 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c028fe37456f..53d9c354219f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
+ __flush_tlb_pgtable(tlb->mm, addr);
pgtable_page_dtor(pte);
tlb_remove_entry(tlb, pte);
}
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
+ __flush_tlb_pgtable(tlb->mm, addr);
tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
+ __flush_tlb_pgtable(tlb->mm, addr);
tlb_remove_entry(tlb, virt_to_page(pudp));
}
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 73f0ce570fb3..8b8d8cb46e01 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -149,6 +149,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
}
/*
+ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
+ * table levels (pgd/pud/pmd).
+ */
+static inline void __flush_tlb_pgtable(struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+
+ dsb(ishst);
+ asm("tlbi vae1is, %0" : : "r" (addr));
+ dsb(ish);
+}
+/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,