diff options
Diffstat (limited to 'arch/powerpc/include/asm/nohash/32')
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 46 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/kup-8xx.h | 86 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-40x.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-44x.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 166 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 269 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-40x.h | 44 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-44x.h | 38 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-85xx.h (renamed from arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h) | 30 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-8xx.h | 141 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/slice.h | 20 |
11 files changed, 344 insertions, 500 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index a46616937d20..92df40c6cc6b 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd) static inline unsigned int hugepd_shift(hugepd_t hpd) { - return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; + return PAGE_SHIFT_8M; } static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, unsigned int pdshift) { - unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT; + unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT; return hugepd_page(hpd) + idx; } @@ -32,8 +32,12 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) { - *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | - (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K)); + *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); +} + +static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M); } static inline int check_and_get_huge_psize(int shift) @@ -41,4 +45,38 @@ static inline int check_and_get_huge_psize(int shift) return shift_to_mmu_psize(shift); } +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte, unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_update(mm, addr, ptep, ~0UL, 0, 1); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); + unsigned long set = pte_val(pte_wrprotect(__pte(0))); + + pte_update(mm, addr, ptep, clr, set, 1); +} + +#ifdef CONFIG_PPC_4K_PAGES +static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t size = 1UL << shift; + + if (size == SZ_16K) + return __pte(pte_val(entry) | _PAGE_SPS); + else + return __pte(pte_val(entry) | _PAGE_SPS | _PAGE_HUGE); +} +#define arch_make_huge_pte arch_make_huge_pte +#endif + #endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */ diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index 1006a427e99c..46bc5925e5fd 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -7,49 +7,79 @@ #ifdef CONFIG_PPC_KUAP -#ifdef __ASSEMBLY__ +#ifndef __ASSEMBLY__ -.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3 - lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */ - mfspr \gpr1, SPRN_MD_AP - mtspr SPRN_MD_AP, \gpr2 - stw \gpr1, STACK_REGS_KUAP(\sp) -.endm +#include <asm/reg.h> + +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) +{ + regs->kuap = mfspr(SPRN_MD_AP); + mtspr(SPRN_MD_AP, MD_APG_KUAP); +} +#define __kuap_save_and_lock __kuap_save_and_lock + +static __always_inline void kuap_user_restore(struct pt_regs *regs) +{ +} -.macro kuap_restore sp, current, gpr1, gpr2, gpr3 - lwz \gpr1, STACK_REGS_KUAP(\sp) - mtspr SPRN_MD_AP, \gpr1 -.endm +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +{ + mtspr(SPRN_MD_AP, regs->kuap); +} -.macro kuap_check current, gpr #ifdef CONFIG_PPC_KUAP_DEBUG - mfspr \gpr, SPRN_MD_AP - rlwinm \gpr, \gpr, 16, 0xffff -999: twnei \gpr, MD_APG_KUAP@h - EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) +{ + WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16); + + return 0; +} +#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked #endif -.endm -#else /* !__ASSEMBLY__ */ +static __always_inline void uaccess_begin_8xx(unsigned long val) +{ + asm(ASM_MMU_FTR_IFSET("mtspr %0, %1", "", %2) : : + "i"(SPRN_MD_AP), "r"(val), "i"(MMU_FTR_KUAP) : "memory"); +} + +static __always_inline void uaccess_end_8xx(void) +{ + asm(ASM_MMU_FTR_IFSET("mtspr %0, %1", "", %2) : : + "i"(SPRN_MD_AP), "r"(MD_APG_KUAP), "i"(MMU_FTR_KUAP) : "memory"); +} -#include <asm/reg.h> +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + uaccess_begin_8xx(MD_APG_INIT); +} -static inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size) +static __always_inline void prevent_user_access(unsigned long dir) { - mtspr(SPRN_MD_AP, MD_APG_INIT); + uaccess_end_8xx(); } -static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size) +static __always_inline unsigned long prevent_user_access_return(void) { - mtspr(SPRN_MD_AP, MD_APG_KUAP); + unsigned long flags; + + flags = mfspr(SPRN_MD_AP); + + uaccess_end_8xx(); + + return flags; +} + +static __always_inline void restore_user_access(unsigned long flags) +{ + uaccess_begin_8xx(flags); } -static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) +static __always_inline bool +__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { - return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000), - "Bug: fault blocked by AP register !"); + return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); } #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h index 74f4edb5916e..8a8f13a22cf4 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h @@ -57,7 +57,7 @@ typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h index 28aa3b339c5e..2d92a39d8f2e 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h @@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index; typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; /* patch sites */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 76af5b0cb16e..141d82e249a8 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -19,7 +19,6 @@ #define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */ #define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ #define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */ -#define MI_RESETVAL 0x00000000 /* Value of register at reset */ /* These are the Ks and Kp from the PowerPC books. For proper operation, * Ks = 0, Kp = 1. @@ -34,19 +33,16 @@ * respectively NA for All or X for Supervisor and no access for User. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * Therefore, we define 2 APG groups. lsb is _PMD_USER - * 0 => Kernel => 01 (all accesses performed according to page definition) - * 1 => User => 00 (all accesses performed as supervisor iaw page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) + * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say + * "all User" rules, that will lead to NA for all. + * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED + * 0 => Kernel => 11 (all accesses performed according as user iaw page definition) + * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition) + * 2 => User => 11 (all accesses performed according as user iaw page definition) + * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP + * 4-15 => Not Used */ -#define MI_APG_INIT 0x4fffffff - -/* - * 0 => Kernel => 01 (all accesses performed according to page definition) - * 1 => User => 10 (all accesses performed according to swaped page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) - */ -#define MI_APG_KUEP 0x6fffffff +#define MI_APG_INIT 0xde000000 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -95,7 +91,6 @@ #define MD_TWAM 0x04000000 /* Use 4K page hardware assist */ #define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ #define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */ -#define MD_RESETVAL 0x04000000 /* Value of register at reset */ #define SPRN_M_CASID 793 /* Address space ID (context) to match */ #define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */ @@ -108,25 +103,9 @@ #define MD_Ks 0x80000000 /* Should not be set */ #define MD_Kp 0x40000000 /* Should always be set */ -/* - * All pages' PP data bits are set to either 000 or 011 or 001, which means - * respectively RW for Supervisor and no access for User, or RO for - * Supervisor and no access for user and NA for ALL. - * Then we use the APG to say whether accesses are according to Page rules or - * "all Supervisor" rules (Access to all) - * Therefore, we define 2 APG groups. lsb is _PMD_USER - * 0 => Kernel => 01 (all accesses performed according to page definition) - * 1 => User => 00 (all accesses performed as supervisor iaw page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) - */ -#define MD_APG_INIT 0x4fffffff - -/* - * 0 => No user => 01 (all accesses performed according to page definition) - * 1 => User => 10 (all accesses performed according to swaped page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) - */ -#define MD_APG_KUAP 0x6fffffff +/* See explanation above at the definition of MI_APG_INIT */ +#define MD_APG_INIT 0xdc000000 +#define MD_APG_KUAP 0xde000000 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in @@ -178,12 +157,6 @@ */ #define SPRN_M_TW 799 -#ifdef CONFIG_PPC_MM_SLICES -#include <asm/nohash/32/slice.h> -#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) -#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE -#endif - #if defined(CONFIG_PPC_4K_PAGES) #define mmu_virtual_psize MMU_PAGE_4K #elif defined(CONFIG_PPC_16K_PAGES) @@ -197,77 +170,24 @@ #define mmu_linear_psize MMU_PAGE_8M +#define MODULES_VADDR (PAGE_OFFSET - SZ_256M) +#define MODULES_END PAGE_OFFSET + #ifndef __ASSEMBLY__ #include <linux/mmdebug.h> +#include <linux/sizes.h> -struct slice_mask { - u64 low_slices; - DECLARE_BITMAP(high_slices, 0); -}; +void mmu_pin_tlb(unsigned long top, bool readonly); typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; -#ifdef CONFIG_PPC_MM_SLICES - u16 user_psize; /* page size index */ - unsigned char low_slices_psize[SLICE_ARRAY_SIZE]; - unsigned char high_slices_psize[0]; - unsigned long slb_addr_limit; - struct slice_mask mask_base_psize; /* 4k or 16k */ - struct slice_mask mask_512k; - struct slice_mask mask_8m; -#endif + void __user *vdso; void *pte_frag; } mm_context_t; -#ifdef CONFIG_PPC_MM_SLICES -static inline u16 mm_ctx_user_psize(mm_context_t *ctx) -{ - return ctx->user_psize; -} - -static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize) -{ - ctx->user_psize = user_psize; -} - -static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx) -{ - return ctx->low_slices_psize; -} - -static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx) -{ - return ctx->high_slices_psize; -} - -static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx) -{ - return ctx->slb_addr_limit; -} - -static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit) -{ - ctx->slb_addr_limit = limit; -} - -static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize) -{ - if (psize == MMU_PAGE_512K) - return &ctx->mask_512k; - if (psize == MMU_PAGE_8M) - return &ctx->mask_8m; - - BUG_ON(psize != mmu_virtual_psize); - - return &ctx->mask_base_psize; -} -#endif /* CONFIG_PPC_MM_SLICE */ - #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) -#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) /* Page size definitions, common between 32 and 64-bit * @@ -303,14 +223,50 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) BUG(); } -/* patch sites */ -extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8; -extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp; -extern s32 patch__fixupdar_linmem_top; -extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8; +static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn, + unsigned int max_page_shift, unsigned long size) +{ + if (end - addr < size) + return false; + + if ((1UL << max_page_shift) < size) + return false; + + if (!IS_ALIGNED(addr, size)) + return false; + + if (!IS_ALIGNED(PFN_PHYS(pfn), size)) + return false; -extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2; -extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3; + return true; +} + +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, + u64 pfn, unsigned int max_page_shift) +{ + if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K)) + return SZ_512K; + if (PAGE_SIZE == SZ_16K) + return SZ_16K; + if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K)) + return SZ_16K; + return PAGE_SIZE; +} +#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size + +static inline int arch_vmap_pte_supported_shift(unsigned long size) +{ + if (size >= SZ_512K) + return 19; + else if (size >= SZ_16K) + return 14; + else + return PAGE_SHIFT; +} +#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift + +/* patch sites */ +extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1; extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 552b96eef0c8..9164a9e41b02 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -2,18 +2,12 @@ #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H #define _ASM_POWERPC_NOHASH_32_PGTABLE_H -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/threads.h> #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ -#include <asm/asm-405.h> - -#ifdef CONFIG_44x -extern int icache_44x_need_flush; -#endif #endif /* __ASSEMBLY__ */ @@ -30,6 +24,8 @@ extern int icache_44x_need_flush; #define PMD_TABLE_SIZE 0 #define PUD_TABLE_SIZE 0 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) #endif /* __ASSEMBLY__ */ #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) @@ -54,27 +50,23 @@ extern int icache_44x_need_flush; #define PGD_MASKED_BITS 0 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0UL -#define pte_ERROR(e) \ - pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ - (unsigned long long)pte_val(e)) #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) -#ifndef __ASSEMBLY__ - -int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); - -#endif /* !__ASSEMBLY__ */ - - /* * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary * value (for now) on others, from where we can start layout kernel * virtual space that goes below PKMAP and FIXMAP */ -#include <asm/fixmap.h> + +#define FIXADDR_SIZE 0 +#ifdef CONFIG_KASAN +#include <asm/kasan.h> +#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE) +#else +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) +#endif /* * ioremap_bot starts at that address. Early ioremaps move down from there, @@ -110,11 +102,16 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ #ifdef PPC_PIN_SIZE -#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) #else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) #endif + +#ifdef CONFIG_KASAN_VMALLOC +#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else #define VMALLOC_END ioremap_bot +#endif /* * Bits in a linux-style PTE. These match the bits in the @@ -125,10 +122,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #include <asm/nohash/32/pte-40x.h> #elif defined(CONFIG_44x) #include <asm/nohash/32/pte-44x.h> -#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) -#include <asm/nohash/pte-book3e.h> -#elif defined(CONFIG_FSL_BOOKE) -#include <asm/nohash/32/pte-fsl-booke.h> +#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) +#include <asm/nohash/pte-e500.h> +#elif defined(CONFIG_PPC_85xx) +#include <asm/nohash/32/pte-85xx.h> #elif defined(CONFIG_PPC_8xx) #include <asm/nohash/32/pte-8xx.h> #endif @@ -146,52 +143,16 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); * The mask covered by the RPN must be a ULL on 32-bit platforms with * 64-bit PTEs. */ -#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#ifdef CONFIG_PTE_64BIT #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 36 #else #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif -/* - * _PAGE_CHG_MASK masks of bits that are to be preserved across - * pgprot changes. - */ -#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) - #ifndef __ASSEMBLY__ -#define pte_clear(mm, addr, ptep) \ - do { pte_update(ptep, ~0, 0); } while (0) - -#ifndef pte_mkwrite -static inline pte_t pte_mkwrite(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_RW); -} -#endif - -static inline pte_t pte_mkdirty(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_DIRTY); -} - -static inline pte_t pte_mkyoung(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_ACCESSED); -} - -#ifndef pte_wrprotect -static inline pte_t pte_wrprotect(pte_t pte) -{ - return __pte(pte_val(pte) & ~_PAGE_RW); -} -#endif - -static inline pte_t pte_mkexec(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_EXEC); -} - #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) @@ -200,141 +161,6 @@ static inline void pmd_clear(pmd_t *pmdp) *pmdp = __pmd(0); } - - -/* - * PTE updates. This function is called whenever an existing - * valid PTE is updated. This does -not- include set_pte_at() - * which nowadays only sets a new PTE. - * - * Depending on the type of MMU, we may need to use atomic updates - * and the PTE may be either 32 or 64 bit wide. In the later case, - * when using atomic updates, only the low part of the PTE is - * accessed atomically. - * - * In addition, on 44x, we also maintain a global flag indicating - * that an executable user mapping was modified, which is needed - * to properly flush the virtually tagged instruction cache of - * those implementations. - */ -#ifndef CONFIG_PTE_64BIT -static inline unsigned long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) -{ -#ifdef PTE_ATOMIC_UPDATES - unsigned long old, tmp; - - __asm__ __volatile__("\ -1: lwarx %0,0,%3\n\ - andc %1,%0,%4\n\ - or %1,%1,%5\n" - PPC405_ERR77(0,%3) -" stwcx. %1,0,%3\n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "r" (set), "m" (*p) - : "cc" ); -#else /* PTE_ATOMIC_UPDATES */ - unsigned long old = pte_val(*p); - unsigned long new = (old & ~clr) | set; - -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) - p->pte = p->pte1 = p->pte2 = p->pte3 = new; -#else - *p = __pte(new); -#endif -#endif /* !PTE_ATOMIC_UPDATES */ - -#ifdef CONFIG_44x - if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) - icache_44x_need_flush = 1; -#endif - return old; -} -#else /* CONFIG_PTE_64BIT */ -static inline unsigned long long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) -{ -#ifdef PTE_ATOMIC_UPDATES - unsigned long long old; - unsigned long tmp; - - __asm__ __volatile__("\ -1: lwarx %L0,0,%4\n\ - lwzx %0,0,%3\n\ - andc %1,%L0,%5\n\ - or %1,%1,%6\n" - PPC405_ERR77(0,%3) -" stwcx. %1,0,%4\n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) - : "cc" ); -#else /* PTE_ATOMIC_UPDATES */ - unsigned long long old = pte_val(*p); - *p = __pte((old & ~(unsigned long long)clr) | set); -#endif /* !PTE_ATOMIC_UPDATES */ - -#ifdef CONFIG_44x - if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) - icache_44x_need_flush = 1; -#endif - return old; -} -#endif /* CONFIG_PTE_64BIT */ - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) -{ - unsigned long old; - old = pte_update(ptep, _PAGE_ACCESSED, 0); - return (old & _PAGE_ACCESSED) != 0; -} -#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ - __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - return __pte(pte_update(ptep, ~0, 0)); -} - -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); - unsigned long set = pte_val(pte_wrprotect(__pte(0))); - - pte_update(ptep, clr, set); -} - -static inline void __ptep_set_access_flags(struct vm_area_struct *vma, - pte_t *ptep, pte_t entry, - unsigned long address, - int psize) -{ - pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); - pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); - unsigned long set = pte_val(entry) & pte_val(pte_set); - unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); - - pte_update(ptep, clr, set); - - flush_tlb_page(vma, address); -} - -static inline int pte_young(pte_t pte) -{ - return pte_val(pte) & _PAGE_ACCESSED; -} - -#define __HAVE_ARCH_PTE_SAME -#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) - /* * Note that on Book E processors, the pmd contains the kernel virtual * (lowmem) address of the pte page. The physical address is less useful @@ -343,47 +169,38 @@ static inline int pte_young(pte_t pte) * of the pte page. -- paulus */ #ifndef CONFIG_BOOKE -#define pmd_page_vaddr(pmd) \ - ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) -#define pmd_page(pmd) \ - pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) -#define pmd_page(pmd) \ - pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) + ((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) +#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) #endif -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* to find an entry in a page-table-directory */ -#define pgd_index(address) ((address) >> PGDIR_SHIFT) -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - -/* Find an entry in the third-level page table.. */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, addr) \ - (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ - pte_index(addr)) -#define pte_offset_map(dir, addr) \ - ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ - (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) -#define pte_unmap(pte) kunmap_atomic(pte) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) /* - * Encode and decode a swap entry. - * Note that the bits we use in a PTE for representing a swap entry - * must not include the _PAGE_PRESENT bit. - * -- paulus + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs (32bit PTEs): + * + * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * <------------------ offset -------------------> < type -> E 0 0 + * + * E is the exclusive marker that is not stored in swap entries. + * + * For 64bit PTEs, the offset is extended by 32bit. */ #define __swp_type(entry) ((entry).val & 0x1f) #define __swp_offset(entry) ((entry).val >> 5) -#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) +/* We borrow LSB 2 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE 0x000004 + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 12c6811e344b..d759cfd74754 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -27,9 +27,9 @@ * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB * miss handler. Bit 27 is PAGE_USER, thus selecting the correct * zone. - * - PRESENT *must* be in the bottom two bits because swap cache - * entries use the top 30 bits. Because 40x doesn't support SMP - * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 + * - PRESENT *must* be in the bottom two bits because swap PTEs + * use the top 30 bits. Because 40x doesn't support SMP anyway, M is + * irrelevant so we borrow it for PAGE_PRESENT. Bit 30 * is cleared in the TLB miss handler before the TLB entry is loaded. * - All other bits of the PTE are loaded into TLBLO without * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for @@ -42,11 +42,10 @@ #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ -#define _PAGE_USER 0x010 /* matches one of the zone permission bits */ +#define _PAGE_READ 0x010 /* software: read permission */ #define _PAGE_SPECIAL 0x020 /* software: Special page */ -#define _PAGE_RW 0x040 /* software: Writes permitted */ #define _PAGE_DIRTY 0x080 /* software: dirty page */ -#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ +#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */ #define _PAGE_EXEC 0x200 /* hardware: EX permission */ #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ @@ -56,11 +55,6 @@ /* cache related flags non existing on 40x */ #define _PAGE_COHERENT 0 -#define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX _PAGE_EXEC -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC) - #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ #define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_BAD 0x802 @@ -70,36 +64,10 @@ #define _PTE_NONE_MASK 0 -/* Until my rework is finished, 40x still needs atomic PTE updates */ -#define PTE_ATOMIC_UPDATES 1 - #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #define _PAGE_BASE (_PAGE_BASE_NC) -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) - -#ifndef __ASSEMBLY__ -static inline pte_t pte_wrprotect(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE)); -} - -#define pte_wrprotect pte_wrprotect - -static inline pte_t pte_mkclean(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); -} - -#define pte_mkclean pte_mkclean -#endif +#include <asm/pgtable-masks.h> #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index 78bc304f750e..851813725237 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -56,29 +56,19 @@ * above bits. Note that the bit values are CPU specific, not architecture * specific. * - * The kernel PTE entry holds an arch-dependent swp_entry structure under - * certain situations. In other words, in such situations some portion of - * the PTE bits are used as a swp_entry. In the PPC implementation, the - * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still - * hold protection values. That means the three protection bits are - * reserved for both PTE and SWAP entry at the most significant three - * LSBs. - * - * There are three protection bits available for SWAP entry: - * _PAGE_PRESENT - * _PAGE_HASHPTE (if HW has) - * - * So those three bits have to be inside of 0-2nd LSB of PTE. - * + * The kernel PTE entry can be an ordinary PTE mapping a page or a special swap + * PTE. In case of a swap PTE, LSB 2-24 are used to store information regarding + * the swap entry. However LSB 0-1 still hold protection values, for example, + * to distinguish swap PTEs from ordinary PTEs, and must be used with care. */ #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ -#define _PAGE_RW 0x00000002 /* S: Write permission */ +#define _PAGE_WRITE 0x00000002 /* S: Write permission */ #define _PAGE_EXEC 0x00000004 /* H: Execute permission */ -#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ +#define _PAGE_READ 0x00000008 /* S: Read permission */ #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ #define _PAGE_SPECIAL 0x00000020 /* S: Special page */ -#define _PAGE_USER 0x00000040 /* S: User page */ +#define _PAGE_ACCESSED 0x00000040 /* S: Page referenced */ #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ #define _PAGE_GUARDED 0x00000100 /* H: G bit */ #define _PAGE_COHERENT 0x00000200 /* H: M bit */ @@ -88,11 +78,6 @@ /* No page size encoding in the linux PTE */ #define _PAGE_PSIZE 0 -#define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX _PAGE_EXEC -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) - /* TODO: Add large page lowmem mapping support */ #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) @@ -115,14 +100,7 @@ #define _PAGE_BASE (_PAGE_BASE_NC) #endif -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#include <asm/pgtable-masks.h> #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h index 0fc1bd42bb3e..653a342d3b25 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H -#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H #ifdef __KERNEL__ /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based @@ -11,15 +11,15 @@ 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR - - PRESENT *must* be in the bottom three bits because swap cache - entries use the top 29 bits. + - PRESENT *must* be in the bottom two bits because swap PTEs use + the top 30 bits. */ /* Definitions for FSL Book-E Cores */ -#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ -#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ -#define _PAGE_RW 0x00004 /* S: Write permission (SW) */ +#define _PAGE_READ 0x00001 /* H: Read permission (SR) */ +#define _PAGE_PRESENT 0x00002 /* S: PTE contains a translation */ +#define _PAGE_WRITE 0x00004 /* S: Write permission (SW) */ #define _PAGE_DIRTY 0x00008 /* S: Page dirty */ #define _PAGE_EXEC 0x00010 /* H: SX permission */ #define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ @@ -31,11 +31,6 @@ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */ -#define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX _PAGE_EXEC -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) - /* No page size encoding in the linux PTE */ #define _PAGE_PSIZE 0 @@ -61,14 +56,7 @@ #define _PAGE_BASE (_PAGE_BASE_NC) #endif -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#include <asm/pgtable-masks.h> #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index c9e4b2d90f65..137dc3c84e45 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -39,13 +39,20 @@ * into the TLB. */ #define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */ -#define _PAGE_SPECIAL 0x0020 /* SW entry */ +#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */ #define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */ -#define _PAGE_ACCESSED 0x0080 /* software: page referenced */ +#define _PAGE_SPECIAL 0x0080 /* SW entry */ #define _PAGE_NA 0x0200 /* Supervisor NA, User no access */ #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ +#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */ + +#define _PAGE_NAX (_PAGE_NA | _PAGE_EXEC) +#define _PAGE_ROX (_PAGE_RO | _PAGE_EXEC) +#define _PAGE_RW 0 +#define _PAGE_RWX _PAGE_EXEC + /* cache related flags non existing on 8xx */ #define _PAGE_COHERENT 0 #define _PAGE_WRITETHRU 0 @@ -57,11 +64,12 @@ #define _PMD_PRESENT 0x0001 #define _PMD_PRESENT_MASK _PMD_PRESENT -#define _PMD_BAD 0x0fd0 +#define _PMD_BAD 0x0f90 #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_512K 0x0004 -#define _PMD_USER 0x0020 /* APG 1 */ +#define _PMD_ACCESSED 0x0020 /* APG 1 */ +#define _PMD_USER 0x0040 /* APG 2 */ #define _PTE_NONE_MASK 0 @@ -74,14 +82,7 @@ #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) #define _PAGE_BASE (_PAGE_BASE_NC) -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA) -#define PAGE_SHARED __pgprot(_PAGE_BASE) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC) +#include <asm/pgtable-masks.h> #ifndef __ASSEMBLY__ static inline pte_t pte_wrprotect(pte_t pte) @@ -91,6 +92,13 @@ static inline pte_t pte_wrprotect(pte_t pte) #define pte_wrprotect pte_wrprotect +static inline int pte_read(pte_t pte) +{ + return (pte_val(pte) & _PAGE_RO) != _PAGE_NA; +} + +#define pte_read pte_read + static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RO); @@ -98,40 +106,121 @@ static inline int pte_write(pte_t pte) #define pte_write pte_write -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RO); } -#define pte_mkwrite pte_mkwrite +#define pte_mkwrite_novma pte_mkwrite_novma -static inline bool pte_user(pte_t pte) +static inline pte_t pte_mkhuge(pte_t pte) { - return !(pte_val(pte) & _PAGE_SH); + return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE); } -#define pte_user pte_user +#define pte_mkhuge pte_mkhuge + +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge); -static inline pte_t pte_mkprivileged(pte_t pte) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_val(pte) | _PAGE_SH); + pte_update(mm, addr, ptep, 0, _PAGE_RO, 0); } +#define ptep_set_wrprotect ptep_set_wrprotect + +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, + pte_t entry, unsigned long address, int psize) +{ + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + int huge = psize > mmu_virtual_psize ? 1 : 0; + + pte_update(vma->vm_mm, address, ptep, clr, set, huge); -#define pte_mkprivileged pte_mkprivileged + flush_tlb_page(vma, address); +} +#define __ptep_set_access_flags __ptep_set_access_flags -static inline pte_t pte_mkuser(pte_t pte) +static inline unsigned long pgd_leaf_size(pgd_t pgd) { - return __pte(pte_val(pte) & ~_PAGE_SH); + if (pgd_val(pgd) & _PMD_PAGE_8M) + return SZ_8M; + return SZ_4M; } -#define pte_mkuser pte_mkuser +#define pgd_leaf_size pgd_leaf_size -static inline pte_t pte_mkhuge(pte_t pte) +static inline unsigned long pte_leaf_size(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SPS); + pte_basic_t val = pte_val(pte); + + if (val & _PAGE_HUGE) + return SZ_512K; + if (val & _PAGE_SPS) + return SZ_16K; + return SZ_4K; } -#define pte_mkhuge pte_mkhuge +#define pte_leaf_size pte_leaf_size + +/* + * On the 8xx, the page tables are a bit special. For 16k pages, we have + * 4 identical entries. For 512k pages, we have 128 entries as if it was + * 4k pages, but they are flagged as 512k pages for the hardware. + * For other page sizes, we have a single entry in the table. + */ +static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); +static int hugepd_ok(hugepd_t hpd); + +static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) +{ + if (!huge) + return PAGE_SIZE / SZ_4K; + else if (hugepd_ok(*((hugepd_t *)pmd))) + return 1; + else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) + return SZ_16K / SZ_4K; + else + return SZ_512K / SZ_4K; +} + +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) +{ + pte_basic_t *entry = (pte_basic_t *)p; + pte_basic_t old = pte_val(*p); + pte_basic_t new = (old & ~(pte_basic_t)clr) | set; + int num, i; + pmd_t *pmd = pmd_off(mm, addr); + + num = number_of_cells_per_pte(pmd, new, huge); + + for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) { + *entry++ = new; + if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) { + *entry++ = new; + *entry++ = new; + *entry++ = new; + } + } + + return old; +} + +#define pte_update pte_update + +#ifdef CONFIG_PPC_16K_PAGES +#define ptep_get ptep_get +static inline pte_t ptep_get(pte_t *ptep) +{ + pte_basic_t val = READ_ONCE(ptep->pte); + pte_t pte = {val, val, val, val}; + + return pte; +} +#endif /* CONFIG_PPC_16K_PAGES */ + #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h deleted file mode 100644 index 39eb0154ae2d..000000000000 --- a/arch/powerpc/include/asm/nohash/32/slice.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H -#define _ASM_POWERPC_NOHASH_32_SLICE_H - -#ifdef CONFIG_PPC_MM_SLICES - -#define SLICE_LOW_SHIFT 26 /* 64 slices */ -#define SLICE_LOW_TOP (0x100000000ull) -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) - -#define SLICE_HIGH_SHIFT 0 -#define SLICE_NUM_HIGH 0ul -#define GET_HIGH_SLICE_INDEX(addr) (addr & 0) - -#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW - -#endif /* CONFIG_PPC_MM_SLICES */ - -#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */ |