summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/mmu_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mmu_context.c')
-rw-r--r--arch/powerpc/mm/mmu_context.c47
1 files changed, 30 insertions, 17 deletions
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index 18f20da0d348..b24c19078eb1 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -18,6 +18,12 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
{
/* 32-bit keeps track of the current PGDIR in the thread struct */
tsk->thread.pgdir = mm->pgd;
+#ifdef CONFIG_PPC_BOOK3S_32
+ tsk->thread.sr0 = mm->context.sr0;
+#endif
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ tsk->thread.pid = mm->context.id;
+#endif
}
#elif defined(CONFIG_PPC_BOOK3E_64)
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -25,6 +31,9 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
get_paca()->pgd = mm->pgd;
+#ifdef CONFIG_PPC_KUAP
+ tsk->thread.pid = mm->context.id;
+#endif
}
#else
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -34,33 +43,37 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ int cpu = smp_processor_id();
bool new_on_cpu = false;
/* Mark this context has been used on the new CPU */
- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+ VM_WARN_ON_ONCE(next == &init_mm);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
inc_mm_active_cpus(next);
/*
* This full barrier orders the store to the cpumask above vs
- * a subsequent operation which allows this CPU to begin loading
- * translations for next.
+ * a subsequent load which allows this CPU/MMU to begin loading
+ * translations for 'next' from page table PTEs into the TLB.
*
- * When using the radix MMU that operation is the load of the
+ * When using the radix MMU, that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
- * in switch_slb(), and/or the store of paca->mm_ctx_id in
- * copy_mm_to_paca().
+ * in switch_slb() to preload the SLBs, or the load of
+ * get_user_context which loads the context for the VSID hash
+ * to insert a new SLB, in the SLB fault handler.
*
* On the other side, the barrier is in mm/tlb-radix.c for
- * radix which orders earlier stores to clear the PTEs vs
- * the load of mm_cpumask. And pte_xchg which does the same
- * thing for hash.
+ * radix which orders earlier stores to clear the PTEs before
+ * the load of mm_cpumask to check which CPU TLBs should be
+ * flushed. For hash, pte_xchg to clear the PTE includes the
+ * barrier.
*
- * This full barrier is needed by membarrier when switching
- * between processes after store to rq->curr, before user-space
- * memory accesses.
+ * This full barrier is also needed by membarrier when
+ * switching between processes after store to rq->curr, before
+ * user-space memory accesses.
*/
smp_mb();
@@ -79,11 +92,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall");
+ asm volatile (PPC_DSSALL);
- if (new_on_cpu)
- radix_kvm_prefetch_workaround(next);
- else
+ if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);
/*
@@ -91,6 +102,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* sub architectures. Out of line for now
*/
switch_mmu_context(prev, next, tsk);
+
+ VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev)));
}
#ifndef CONFIG_PPC_BOOK3S_64