summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2017-08-17 23:14:17 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2017-08-17 23:14:17 +1000
commit8434f0892ee85504a230a0e402c569774a8d0c42 (patch)
treea8bc1734a06a1c57bd797220a99514e70ec72f93 /arch/powerpc/include/asm
parent6acdc9a6bad9f23771d24baca763141a24ead6cb (diff)
parent94171b19c3f1f4d9d4c0e3aaa1aa161def1ec7ea (diff)
Merge branch 'topic/ppc-kvm' into next
Bring in the commit to rename find_linux_pte_or_hugepte() which touches arch and KVM code, and might need to be merged with the kvmppc tree to avoid conflicts.
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/pte-walk.h35
2 files changed, 36 insertions, 9 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index ab7f44475b1f..7d0d38f58243 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -66,16 +66,8 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift);
-static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift)
-{
- VM_WARN(!arch_irqs_disabled(),
- "%s called with irq enabled\n", __func__);
- return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
-}
+/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
new file mode 100644
index 000000000000..2d633e9d686c
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_POWERPC_PTE_WALK_H
+#define _ASM_POWERPC_PTE_WALK_H
+
+#include <linux/sched.h>
+
+/* Don't use this directly */
+extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift);
+
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+ return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
+{
+ pgd_t *pgdir = init_mm.pgd;
+ return __find_linux_pte(pgdir, ea, NULL, hshift);
+}
+/*
+ * This is what we should always use. Any other lockless page table lookup needs
+ * careful audit against THP split.
+ */
+static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+ VM_WARN(pgdir != current->mm->pgd,
+ "%s lock less page table lookup called on wrong mm\n", __func__);
+ return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+#endif /* _ASM_POWERPC_PTE_WALK_H */