diff options
Diffstat (limited to 'arch/powerpc/include/asm/nohash/32/pgtable.h')
| -rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 237 |
1 files changed, 35 insertions, 202 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 0d40b33184eb..2d71e4b7cd09 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -4,16 +4,12 @@ #include <asm-generic/pgtable-nopmd.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/sched.h> #include <linux/threads.h> #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ -#ifdef CONFIG_44x -extern int icache_44x_need_flush; -#endif - -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define PTE_INDEX_SIZE PTE_SHIFT #define PMD_INDEX_SIZE 0 @@ -23,14 +19,14 @@ extern int icache_44x_need_flush; #define PMD_CACHE_INDEX PMD_INDEX_SIZE #define PUD_CACHE_INDEX PUD_INDEX_SIZE -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) #define PMD_TABLE_SIZE 0 #define PUD_TABLE_SIZE 0 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) @@ -55,26 +51,22 @@ extern int icache_44x_need_flush; #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -#define pte_ERROR(e) \ - pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ - (unsigned long long)pte_val(e)) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) - -#ifndef __ASSEMBLY__ - -int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); -void unmap_kernel_page(unsigned long va); - -#endif /* !__ASSEMBLY__ */ - + pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e)) /* * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary * value (for now) on others, from where we can start layout kernel * virtual space that goes below PKMAP and FIXMAP */ -#include <asm/fixmap.h> + +#define FIXADDR_SIZE 0 +#ifdef CONFIG_KASAN +#include <asm/kasan.h> +#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE) +#else +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) +#endif /* * ioremap_bot starts at that address. Early ioremaps move down from there, @@ -126,9 +118,7 @@ void unmap_kernel_page(unsigned long va); * (hardware-defined) PowerPC PTE as closely as possible. */ -#if defined(CONFIG_40x) -#include <asm/nohash/32/pte-40x.h> -#elif defined(CONFIG_44x) +#if defined(CONFIG_44x) #include <asm/nohash/32/pte-44x.h> #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) #include <asm/nohash/pte-e500.h> @@ -151,7 +141,7 @@ void unmap_kernel_page(unsigned long va); * The mask covered by the RPN must be a ULL on 32-bit platforms with * 64-bit PTEs. */ -#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#ifdef CONFIG_PTE_64BIT #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) #define MAX_POSSIBLE_PHYSMEM_BITS 36 #else @@ -159,47 +149,7 @@ void unmap_kernel_page(unsigned long va); #define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif -/* - * _PAGE_CHG_MASK masks of bits that are to be preserved across - * pgprot changes. - */ -#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) - -#ifndef __ASSEMBLY__ - -#define pte_clear(mm, addr, ptep) \ - do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) - -#ifndef pte_mkwrite -static inline pte_t pte_mkwrite(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_RW); -} -#endif - -static inline pte_t pte_mkdirty(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_DIRTY); -} - -static inline pte_t pte_mkyoung(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_ACCESSED); -} - -#ifndef pte_wrprotect -static inline pte_t pte_wrprotect(pte_t pte) -{ - return __pte(pte_val(pte) & ~_PAGE_RW); -} -#endif - -#ifndef pte_mkexec -static inline pte_t pte_mkexec(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_EXEC); -} -#endif +#ifndef __ASSEMBLER__ #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -210,135 +160,6 @@ static inline void pmd_clear(pmd_t *pmdp) } /* - * PTE updates. This function is called whenever an existing - * valid PTE is updated. This does -not- include set_pte_at() - * which nowadays only sets a new PTE. - * - * Depending on the type of MMU, we may need to use atomic updates - * and the PTE may be either 32 or 64 bit wide. In the later case, - * when using atomic updates, only the low part of the PTE is - * accessed atomically. - * - * In addition, on 44x, we also maintain a global flag indicating - * that an executable user mapping was modified, which is needed - * to properly flush the virtually tagged instruction cache of - * those implementations. - * - * On the 8xx, the page tables are a bit special. For 16k pages, we have - * 4 identical entries. For 512k pages, we have 128 entries as if it was - * 4k pages, but they are flagged as 512k pages for the hardware. - * For other page sizes, we have a single entry in the table. - */ -#ifdef CONFIG_PPC_8xx -static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); -static int hugepd_ok(hugepd_t hpd); - -static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) -{ - if (!huge) - return PAGE_SIZE / SZ_4K; - else if (hugepd_ok(*((hugepd_t *)pmd))) - return 1; - else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) - return SZ_16K / SZ_4K; - else - return SZ_512K / SZ_4K; -} - -static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, - unsigned long clr, unsigned long set, int huge) -{ - pte_basic_t *entry = (pte_basic_t *)p; - pte_basic_t old = pte_val(*p); - pte_basic_t new = (old & ~(pte_basic_t)clr) | set; - int num, i; - pmd_t *pmd = pmd_off(mm, addr); - - num = number_of_cells_per_pte(pmd, new, huge); - - for (i = 0; i < num; i++, entry++, new += SZ_4K) - *entry = new; - - return old; -} - -#ifdef CONFIG_PPC_16K_PAGES -#define __HAVE_ARCH_PTEP_GET -static inline pte_t ptep_get(pte_t *ptep) -{ - pte_basic_t val = READ_ONCE(ptep->pte); - pte_t pte = {val, val, val, val}; - - return pte; -} -#endif /* CONFIG_PPC_16K_PAGES */ - -#else -static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, - unsigned long clr, unsigned long set, int huge) -{ - pte_basic_t old = pte_val(*p); - pte_basic_t new = (old & ~(pte_basic_t)clr) | set; - - *p = __pte(new); - -#ifdef CONFIG_44x - if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) - icache_44x_need_flush = 1; -#endif - return old; -} -#endif - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - unsigned long old; - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); - return (old & _PAGE_ACCESSED) != 0; -} -#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ - __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); -} - -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -#ifndef ptep_set_wrprotect -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); -} -#endif - -#ifndef __ptep_set_access_flags -static inline void __ptep_set_access_flags(struct vm_area_struct *vma, - pte_t *ptep, pte_t entry, - unsigned long address, - int psize) -{ - unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - int huge = psize > mmu_virtual_psize ? 1 : 0; - - pte_update(vma->vm_mm, address, ptep, 0, set, huge); - - flush_tlb_page(vma, address); -} -#endif - -static inline int pte_young(pte_t pte) -{ - return pte_val(pte) & _PAGE_ACCESSED; -} - -/* * Note that on Book E processors, the pmd contains the kernel virtual * (lowmem) address of the pte page. The physical address is less useful * because everything runs with translation enabled (even the TLB miss @@ -349,23 +170,35 @@ static inline int pte_young(pte_t pte) #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) + ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) #endif #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) + /* - * Encode and decode a swap entry. - * Note that the bits we use in a PTE for representing a swap entry - * must not include the _PAGE_PRESENT bit. - * -- paulus + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs (32bit PTEs): + * + * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * <------------------ offset -------------------> < type -> E 0 0 + * + * E is the exclusive marker that is not stored in swap entries. + * + * For 64bit PTEs, the offset is extended by 32bit. */ #define __swp_type(entry) ((entry).val & 0x1f) #define __swp_offset(entry) ((entry).val >> 5) -#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -#endif /* !__ASSEMBLY__ */ +/* We borrow LSB 2 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE 0x000004 + +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ |
