summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/pgtable.h')
-rw-r--r--arch/powerpc/include/asm/pgtable.h189
1 files changed, 152 insertions, 37 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index afae9a336136..17fd7ff6e535 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -1,16 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_PGTABLE_H
#define _ASM_POWERPC_PGTABLE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
+#include <asm/tlbflush.h>
struct mm_struct;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgtable.h>
@@ -18,14 +20,60 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
-#ifndef __ASSEMBLY__
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-#include <asm/tlbflush.h>
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
+#ifndef __ASSEMBLER__
+
+#define PFN_PTE_SHIFT PTE_RPN_SHIFT
-/* Keep these as a macros to avoid include dependency mess */
+void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr);
+#define set_ptes set_ptes
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
+
+/* Keep this as a macro to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
+}
+
+/*
+ * Select all bits except the pfn
+ */
+#define pte_pgprot pte_pgprot
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pte_flags;
+
+ pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
+ return __pgprot(pte_flags);
+}
+
+static inline pgprot_t pgprot_nx(pgprot_t prot)
+{
+ return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
+}
+#define pgprot_nx pgprot_nx
+
+#ifndef pmd_page_vaddr
+static inline const void *pmd_page_vaddr(pmd_t pmd)
+{
+ return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
+}
+#define pmd_page_vaddr pmd_page_vaddr
+#endif
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -35,19 +83,44 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
-void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
-int dma_pfn_limit_to_zone(u64 pfn_limit);
extern void paging_init(void);
+void poking_init(void);
-/*
- * kern_addr_valid is intended to indicate whether an address is a valid
- * kernel address. Most 32-bit archs define it as always true (like this)
- * but most 64-bit archs actually perform a test. What should we do here?
- */
-#define kern_addr_valid(addr) (1)
+extern unsigned long ioremap_bot;
+extern const pgprot_t protection_map[16];
-#include <asm-generic/pgtable.h>
+/* can we use this in kvm */
+unsigned long vmalloc_to_phys(void *vmalloc_addr);
+
+void pgtable_cache_add(unsigned int shift);
+
+#ifdef CONFIG_PPC32
+void __init *early_alloc_pgtable(unsigned long size);
+#endif
+pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
+void mark_initmem_nx(void);
+#else
+static inline void mark_initmem_nx(void) { }
+#endif
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+
+pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot);
+
+struct file;
+static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ return __phys_mem_access_prot(pfn, size, vma_prot);
+}
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
/*
* This gets called at the end of handling a page fault, when
@@ -58,35 +131,77 @@ extern void paging_init(void);
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
+ (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
+ __update_mmu_cache(vma, address, ptep);
+}
-extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write,
- struct page **pages, int *nr);
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_large(pmd) 0
-#endif
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift);
-static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift)
+/*
+ * When used, PTE_FRAG_NR is defined in subarch pgtable.h
+ * so we are sure it is included when arriving here.
+ */
+#ifdef PTE_FRAG_NR
+static inline void *pte_frag_get(mm_context_t *ctx)
{
- VM_WARN(!arch_irqs_disabled(),
- "%s called with irq enabled\n", __func__);
- return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
+ return ctx->pte_frag;
}
-unsigned long vmalloc_to_phys(void *vmalloc_addr);
+static inline void pte_frag_set(mm_context_t *ctx, void *p)
+{
+ ctx->pte_frag = p;
+}
+#else
+#define PTE_FRAG_NR 1
+#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
+static inline void *pte_frag_get(mm_context_t *ctx)
+{
+ return NULL;
+}
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void mark_initmem_nx(void);
-#else
-static inline void mark_initmem_nx(void) { }
+static inline void pte_frag_set(mm_context_t *ctx, void *p)
+{
+}
#endif
-#endif /* __ASSEMBLY__ */
+#define pmd_pgtable pmd_pgtable
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
+#ifdef CONFIG_PPC64
+int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
+bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
+ unsigned long page_size);
+/*
+ * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
+ * some of the restrictions. We don't check for PMD_SIZE because our
+ * vmemmap allocation code can fallback correctly. The pageblock
+ * alignment requirement is met using altmap->reserve blocks.
+ */
+#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
+static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
+{
+ if (!radix_enabled())
+ return false;
+ /*
+ * With 4K page size and 2M PMD_SIZE, we can align
+ * things better with memory block size value
+ * starting from 128MB. Hence align things with PMD_SIZE.
+ */
+ if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
+ return IS_ALIGNED(vmemmap_size, PMD_SIZE);
+ return true;
+}
+
+#endif /* CONFIG_PPC64 */
+
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_POWERPC_PGTABLE_H */