summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/pte-walk.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/pte-walk.h')
-rw-r--r--arch/powerpc/include/asm/pte-walk.h38
1 files changed, 21 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 33fa5dd8ee6a..73c22c579a79 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -31,29 +31,33 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
pgd_t *pgdir = init_mm.pgd;
return __find_linux_pte(pgdir, ea, NULL, hshift);
}
+
/*
- * This is what we should always use. Any other lockless page table lookup needs
- * careful audit against THP split.
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
*/
-static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *hshift)
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
{
- pte_t *pte;
-
- VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
- VM_WARN(pgdir != current->mm->pgd,
- "%s lock less page table lookup called on wrong mm\n", __func__);
- pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
-#if defined(CONFIG_DEBUG_VM) && \
- !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
/*
- * We should not find huge page if these configs are not enabled.
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
*/
- if (hshift)
- WARN_ON(*hshift);
-#endif
- return pte;
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
}
#endif /* _ASM_POWERPC_PTE_WALK_H */