summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorMarkos Chandras <markos.chandras@imgtec.com>2015-01-26 13:04:33 +0000
committerRalf Baechle <ralf@linux-mips.org>2015-02-16 10:55:26 +0100
commited4cbc81addbc076b016c5b979fd1a02f0897f0a (patch)
treeab240d2c8f5bcd7eacbde7a3f46744ebb99b3995 /arch/mips/include/asm/mmu_context.h
parentfde3538a8a711aedf1173ecb2d45aed868f51c97 (diff)
MIPS: HTW: Prevent accidental HTW start due to nested htw_{start, stop}
activate_mm() and switch_mm() call get_new_mmu_context() which in turn can enable the HTW before the entryhi is changed with the new ASID. Since the latter will enable the HTW in local_flush_tlb_all(), then there is a small timing window where the HTW is running with the new ASID but with an old pgd since the TLBMISS_HANDLER_SETUP_PGD hasn't assigned a new one yet. In order to prevent that, we introduce a simple htw counter to avoid starting HTW accidentally due to nested htw_{start,stop}() sequences. Moreover, since various IPI calls can enforce TLB flushing operations on a different core, such an operation may interrupt another htw_{stop,start} in progress leading inconsistent updates of the htw_seq variable. In order to avoid that, we disable the interrupts whenever we update that variable. Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: <stable@vger.kernel.org> # 3.17+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9118/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/mmu_context.h')
-rw-r--r--arch/mips/include/asm/mmu_context.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 87f11072f557..45914b59824c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -25,7 +25,6 @@ do { \
if (cpu_has_htw) { \
write_c0_pwbase(pgd); \
back_to_back_c0_hazard(); \
- htw_reset(); \
} \
} while (0)
@@ -144,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned long flags;
local_irq_save(flags);
+ htw_stop();
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
get_new_mmu_context(next, cpu);
@@ -156,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
local_irq_restore(flags);
}
@@ -182,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
local_irq_save(flags);
+ htw_stop();
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu);
@@ -191,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
/* mark mmu ownership change */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
local_irq_restore(flags);
}
@@ -205,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
unsigned long flags;
local_irq_save(flags);
+ htw_stop();
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
@@ -213,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
/* will get a new context next time */
cpu_context(cpu, mm) = 0;
}
+ htw_start();
local_irq_restore(flags);
}