summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-11-28 21:57:24 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2012-04-17 15:29:44 +0100
commitb9d4d42ad901cc848ac87f1cb8923fded3645568 (patch)
tree37c2010e12eecb605720b4c5d41780fcba282937 /arch/arm/include/asm/mmu_context.h
parente323969ccda2d69f02e047c08b03faa09215c72a (diff)
ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs
This patch removes the __ARCH_WANT_INTERRUPTS_ON_CTXSW definition for ARMv5 and earlier processors. On such processors, the context switch requires a full cache flush. To avoid high interrupt latencies, this patch defers the mm switching to the post-lock switch hook if the interrupts are disabled. Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Tested-by: Marc Zyngier <Marc.Zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r--arch/arm/include/asm/mmu_context.h31
1 files changed, 26 insertions, 5 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 8da4b9c042fe..0306bc642c0d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -105,19 +105,40 @@ static inline void finish_arch_post_lock_switch(void)
#else /* !CONFIG_CPU_HAS_ASID */
+#ifdef CONFIG_MMU
+
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
-#ifdef CONFIG_MMU
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
- cpu_switch_mm(mm->pgd, mm);
-#endif
+
+ if (irqs_disabled())
+ /*
+ * cpu_switch_mm() needs to flush the VIVT caches. To avoid
+ * high interrupt latencies, defer the call and continue
+ * running with the old mm. Since we only support UP systems
+ * on non-ASID CPUs, the old mm will remain valid until the
+ * finish_arch_post_lock_switch() call.
+ */
+ set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ else
+ cpu_switch_mm(mm->pgd, mm);
}
-#define init_new_context(tsk,mm) 0
+#define finish_arch_post_lock_switch \
+ finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+ if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
+ struct mm_struct *mm = current->mm;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+}
-#define finish_arch_post_lock_switch() do { } while (0)
+#endif /* CONFIG_MMU */
+
+#define init_new_context(tsk,mm) 0
#endif /* CONFIG_CPU_HAS_ASID */