summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNitin Garg <nitin.garg@nxp.com>2021-05-06 11:55:22 -0500
committerDenys Drozdov <denys.drozdov@toradex.com>2021-07-15 13:54:56 +0300
commit236620c5b121c73dce83870fabbaa144cb21211d (patch)
tree75333ca6066ac35020defcff2f291fe35da9d36c
parente146ff8ad03421015735f1ea7a48dbd2815b292e (diff)
MLK-23277: 8qm: Fix SW workaround for i.MX8QM TKT340553
Current workaround is looping uselessly on the address range doing a _tlbi(vmalle1is) which is harmful for the system performance and buggy as the instruction is flushing the entire TLB and there is no benefit of redoing it more than once. Also fix missing barriers. Signed-off-by: Nitin Garg <nitin.garg@nxp.com> Signed-off-by: Marouen Ghodhbane <marouen.ghodhbane@nxp.com> Reviewed-by: Jason Liu <jason.hui.liu@nxp.com> (cherry picked from commit 5799755f37dd7bc826dfe8a3cac12871a7946a1a)
-rw-r--r--arch/arm64/include/asm/tlbflush.h35
1 files changed, 22 insertions, 13 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 6042814301ed..e3660cce3e96 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -156,12 +156,15 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ishst);
if (TKT340553_SW_WORKAROUND) {
+ /* Flush the entire TLB */
__tlbi(vmalle1is);
+ dsb(ish);
+ isb();
} else {
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
+ dsb(ish);
}
- dsb(ish);
}
static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
@@ -171,7 +174,10 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
dsb(ishst);
if (TKT340553_SW_WORKAROUND) {
+ /* Flush the entire TLB */
__tlbi(vmalle1is);
+ dsb(ish);
+ isb();
} else {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
@@ -197,7 +203,6 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
- unsigned long mask = (1 << 20) - 1;
start = round_down(start, stride);
end = round_up(end, stride);
@@ -212,13 +217,19 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
start = __TLBI_VADDR(start, asid);
end = __TLBI_VADDR(end, asid);
- mask <<= 24;
dsb(ishst);
+
+ if (TKT340553_SW_WORKAROUND) {
+ /* Flush the entire TLB and exit */
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+ return;
+ }
+
for (addr = start; addr < end; addr += stride) {
- if (TKT340553_SW_WORKAROUND) {
- __tlbi(vmalle1is);
- } else if (last_level) {
+ if (last_level) {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
} else {
@@ -244,7 +255,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
{
unsigned long addr;
- if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
+ if (((end - start) > (MAX_TLBI_OPS * PAGE_SIZE))
+ || (TKT340553_SW_WORKAROUND)) {
flush_tlb_all();
return;
}
@@ -253,12 +265,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
end = __TLBI_VADDR(end, 0);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (TKT340553_SW_WORKAROUND)
- __tlbi(vmalle1is);
- else
- __tlbi(vaale1is, addr);
- }
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ __tlbi(vaale1is, addr);
dsb(ish);
isb();
}
@@ -273,6 +281,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
if (TKT340553_SW_WORKAROUND)
+ /* Flush the entire TLB */
__tlbi(vmalle1is);
else
__tlbi(vaae1is, addr);