summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBo Yan <byan@nvidia.com>2013-03-14 19:26:17 -0700
committerRiham Haidar <rhaidar@nvidia.com>2013-06-04 14:33:33 -0700
commit484f8e616e9ec97fb3e772ff4b2e2a3df0a61073 (patch)
tree2f39e98c1aceca484f7b9a7f19e398db454cb857 /arch
parent438e0d9a9017b3e6d516d7ea14a8cffee85ff8c2 (diff)
Revert "ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs"
This reverts commit b9d4d42ad901cc848ac87f1cb8923fded3645568. Change-Id: Icdc220a988b0e6b145466148fc922b5f8e5cdba8 Signed-off-by: Bo Yan <byan@nvidia.com> Reviewed-on: http://git-master/r/209826 (cherry picked from commit ccf463167767706377a8d7fd0ead3114ef561c02) Reviewed-on: http://git-master/r/221350 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Tested-by: Sang-Hun Lee <sanlee@nvidia.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/mmu.h9
-rw-r--r--arch/arm/include/asm/mmu_context.h31
2 files changed, 14 insertions, 26 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 14965658a923..20b43d6f23b3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -34,4 +34,13 @@ typedef struct {
#endif
+/*
+ * switch_mm() may do a full cache flush over the context switch,
+ * so enable interrupts over the context switch to avoid high
+ * latency.
+ */
+#ifndef CONFIG_CPU_HAS_ASID
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+#endif
+
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 0306bc642c0d..8da4b9c042fe 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -105,41 +105,20 @@ static inline void finish_arch_post_lock_switch(void)
#else /* !CONFIG_CPU_HAS_ASID */
-#ifdef CONFIG_MMU
-
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
+#ifdef CONFIG_MMU
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
-
- if (irqs_disabled())
- /*
- * cpu_switch_mm() needs to flush the VIVT caches. To avoid
- * high interrupt latencies, defer the call and continue
- * running with the old mm. Since we only support UP systems
- * on non-ASID CPUs, the old mm will remain valid until the
- * finish_arch_post_lock_switch() call.
- */
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
- else
- cpu_switch_mm(mm->pgd, mm);
-}
-
-#define finish_arch_post_lock_switch \
- finish_arch_post_lock_switch
-static inline void finish_arch_post_lock_switch(void)
-{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
- }
+ cpu_switch_mm(mm->pgd, mm);
+#endif
}
-#endif /* CONFIG_MMU */
-
#define init_new_context(tsk,mm) 0
+#define finish_arch_post_lock_switch() do { } while (0)
+
#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)