summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2017-08-23 22:20:10 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2017-08-23 22:20:10 +1000
commit15c659ff9d5b367c886166a9854a89b72c524a68 (patch)
treed33acaf62889bf6304f6109dd69923a17ab938be /arch/powerpc/include/asm/mmu_context.h
parent516fa8d0e19d854253460ea7692551e836e7e2b5 (diff)
parent1a92a80ad386a1a6e3b36d576d52a1a456394b70 (diff)
Merge branch 'fixes' into next
There's a non-trivial dependency between some commits we want to put in next and the KVM prefetch work around that went into fixes. So merge fixes into next.
Diffstat (limited to 'arch/powerpc/include/asm/mmu_context.h')
-rw-r--r--arch/powerpc/include/asm/mmu_context.h36
1 files changed, 34 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index eb749c86dca5..2ab328f0159e 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -45,7 +45,7 @@ extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
- struct mm_struct *next);
+ struct mm_struct *next);
static inline void switch_mmu_context(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
@@ -67,6 +67,12 @@ extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
+#else
+static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
+#endif
+
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
@@ -79,10 +85,32 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
+ bool new_on_cpu = false;
+
/* Mark this context has been used on the new CPU */
- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ /*
+ * This full barrier orders the store to the cpumask above vs
+ * a subsequent operation which allows this CPU to begin loading
+ * translations for next.
+ *
+ * When using the radix MMU that operation is the load of the
+ * MMU context id, which is then moved to SPRN_PID.
+ *
+ * For the hash MMU it is either the first load from slb_cache
+ * in switch_slb(), and/or the store of paca->mm_ctx_id in
+ * copy_mm_to_paca().
+ *
+ * On the read side the barrier is in pte_xchg(), which orders
+ * the store to the PTE vs the load of mm_cpumask.
+ */
+ smp_mb();
+
+ new_on_cpu = true;
+ }
+
/* 32-bit keeps track of the current PGDIR in the thread struct */
#ifdef CONFIG_PPC32
tsk->thread.pgdir = next->pgd;
@@ -103,6 +131,10 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */
+
+ if (new_on_cpu)
+ radix_kvm_prefetch_workaround(next);
+
/*
* The actual HW switching method differs between the various
* sub architectures. Out of line for now