summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNitin Garg <nitin.garg@nxp.com>2019-12-11 10:47:16 -0600
committerNitin Garg <nitin.garg@nxp.com>2019-12-12 09:52:35 -0600
commit898814df9b4f63524d7c09ccc218f1dcdb22a106 (patch)
tree19dc3b12a66abaa14cff95dc2100cbfc489595e8
parent8742e3e58e3ffbd402eb546d7b77586e1aced922 (diff)
MLK-23112 arm64: Update the SW workaround for i.MX8QM B0 ERR050104
The upper bits, above bit-35, of ARADDR and ACADDR buses within in Arm A53 sub-system have been incorrectly connected. Therefore ARADDR and ACADDR address bits above bit-35 should not be used. Hence downgrade instructions using higher address bits. Signed-off-by: Nitin Garg <nitin.garg@nxp.com> Reviewed-by: Anson Huang <Anson.Huang@nxp.com> (cherry picked from commit 1894c0ed972d04d790a742c67b330d8a5d57e54c)
-rw-r--r--arch/arm64/include/asm/tlbflush.h10
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/traps.c26
-rw-r--r--arch/arm64/kvm/hyp/tlb.c10
4 files changed, 39 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8c72efcb638e..ee0e4a0c3cf7 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -121,7 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
unsigned long asid = ASID(mm) << 48;
dsb(ishst);
- if (TKT340553_SW_WORKAROUND && ASID(mm) >> 11) {
+ if (TKT340553_SW_WORKAROUND) {
__tlbi(vmalle1is);
} else {
__tlbi(aside1is, asid);
@@ -136,7 +136,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
dsb(ishst);
- if (TKT340553_SW_WORKAROUND && (uaddr >> 36 || (ASID(vma->vm_mm) >> 12))) {
+ if (TKT340553_SW_WORKAROUND) {
__tlbi(vmalle1is);
} else {
__tlbi(vale1is, addr);
@@ -171,7 +171,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (TKT340553_SW_WORKAROUND && (addr & mask || (ASID(vma->vm_mm) >> 12))) {
+ if (TKT340553_SW_WORKAROUND) {
__tlbi(vmalle1is);
} else if (last_level) {
__tlbi(vale1is, addr);
@@ -204,7 +204,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (TKT340553_SW_WORKAROUND && addr >> 24)
+ if (TKT340553_SW_WORKAROUND)
__tlbi(vmalle1is);
else
__tlbi(vaae1is, addr);
@@ -222,7 +222,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
{
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
- if (TKT340553_SW_WORKAROUND && (uaddr >> 36 || (ASID(mm) >> 12))) {
+ if (TKT340553_SW_WORKAROUND) {
__tlbi(vmalle1is);
} else {
__tlbi(vae1is, addr);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 3d6d7fae45de..f130104b45c8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -430,7 +430,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A53 r0p[012] */
.desc = "ARM errata 826319, 827319, 824069",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
- MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
.enable = cpu_enable_cache_maint_trap,
},
#endif
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4fc0e958770b..abde4cfdb0ed 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -447,6 +447,27 @@ int cpu_enable_cache_maint_trap(void *__unused)
uaccess_ttbr0_disable(); \
}
+#define __user_cache_maint_ivau(insn, address, res) \
+ if (address >= user_addr_max()) { \
+ res = -EFAULT; \
+ } else { \
+ uaccess_ttbr0_enable(); \
+ asm volatile ( \
+ "1: " insn "\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT)); \
+ uaccess_ttbr0_disable(); \
+ }
+
+extern bool TKT340553_SW_WORKAROUND;
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
unsigned long address;
@@ -470,7 +491,10 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
__user_cache_maint("dc civac", address, ret);
break;
case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
- __user_cache_maint("ic ivau", address, ret);
+ if (TKT340553_SW_WORKAROUND)
+ __user_cache_maint_ivau("ic ialluis", address, ret);
+ else
+ __user_cache_maint("ic ivau", address, ret);
break;
default:
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 73464a96c365..2e93b4d86826 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -18,6 +18,8 @@
#include <asm/kvm_hyp.h>
#include <asm/tlbflush.h>
+extern bool TKT340553_SW_WORKAROUND;
+
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
{
u64 val;
@@ -80,8 +82,12 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
- ipa >>= 12;
- __tlbi(ipas2e1is, ipa);
+ if (TKT340553_SW_WORKAROUND) {
+ __tlbi(vmalls12e1is);
+ } else {
+ ipa >>= 12;
+ __tlbi(ipas2e1is, ipa);
+ }
/*
* We have to ensure completion of the invalidation at Stage-2,