diff options
Diffstat (limited to 'arch/powerpc/include/asm/nohash/32')
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 38 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-40x.h | 68 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 12 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-40x.h | 73 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-44x.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-85xx.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pte-8xx.h | 58 |
8 files changed, 51 insertions, 212 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index 92df40c6cc6b..014799557f60 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -4,42 +4,12 @@ #define PAGE_SHIFT_8M 23 -static inline pte_t *hugepd_page(hugepd_t hpd) -{ - BUG_ON(!hugepd_ok(hpd)); - - return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK); -} - -static inline unsigned int hugepd_shift(hugepd_t hpd) -{ - return PAGE_SHIFT_8M; -} - -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned int pdshift) -{ - unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT; - - return hugepd_page(hpd) + idx; -} - static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { flush_tlb_page(vma, vmaddr); } -static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); -} - -static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M); -} - static inline int check_and_get_huge_psize(int shift) { return shift_to_mmu_psize(shift); @@ -49,6 +19,14 @@ static inline int check_and_get_huge_psize(int shift) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz); +#define __HAVE_ARCH_HUGE_PTEP_GET +static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + if (ptep_is_8m_pmdp(mm, addr, ptep)) + ptep = pte_offset_kernel((pmd_t *)ptep, ALIGN_DOWN(addr, SZ_8M)); + return ptep_get(ptep); +} + #define __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h deleted file mode 100644 index 8a8f13a22cf4..000000000000 --- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_MMU_40X_H_ -#define _ASM_POWERPC_MMU_40X_H_ - -/* - * PPC40x support - */ - -#define PPC40X_TLB_SIZE 64 - -/* - * TLB entries are defined by a "high" tag portion and a "low" data - * portion. On all architectures, the data portion is 32-bits. - * - * TLB entries are managed entirely under software control by reading, - * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx - * instructions. - */ - -#define TLB_LO 1 -#define TLB_HI 0 - -#define TLB_DATA TLB_LO -#define TLB_TAG TLB_HI - -/* Tag portion */ - -#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ -#define TLB_PAGESZ_MASK 0x00000380 -#define TLB_PAGESZ(x) (((x) & 0x7) << 7) -#define PAGESZ_1K 0 -#define PAGESZ_4K 1 -#define PAGESZ_16K 2 -#define PAGESZ_64K 3 -#define PAGESZ_256K 4 -#define PAGESZ_1M 5 -#define PAGESZ_4M 6 -#define PAGESZ_16M 7 -#define TLB_VALID 0x00000040 /* Entry is valid */ - -/* Data portion */ - -#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ -#define TLB_PERM_MASK 0x00000300 -#define TLB_EX 0x00000200 /* Instruction execution allowed */ -#define TLB_WR 0x00000100 /* Writes permitted */ -#define TLB_ZSEL_MASK 0x000000F0 -#define TLB_ZSEL(x) (((x) & 0xF) << 4) -#define TLB_ATTR_MASK 0x0000000F -#define TLB_W 0x00000008 /* Caching is write-through */ -#define TLB_I 0x00000004 /* Caching is inhibited */ -#define TLB_M 0x00000002 /* Memory is coherent */ -#define TLB_G 0x00000001 /* Memory is guarded from prefetch */ - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned int id; - unsigned int active; - void __user *vdso; -} mm_context_t; - -#endif /* !__ASSEMBLY__ */ - -#define mmu_virtual_psize MMU_PAGE_4K -#define mmu_linear_psize MMU_PAGE_256M - -#endif /* _ASM_POWERPC_MMU_40X_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 141d82e249a8..2986f9ba40b8 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -170,8 +170,9 @@ #define mmu_linear_psize MMU_PAGE_8M -#define MODULES_VADDR (PAGE_OFFSET - SZ_256M) #define MODULES_END PAGE_OFFSET +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) #ifndef __ASSEMBLY__ @@ -189,19 +190,14 @@ typedef struct { #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) -/* Page size definitions, common between 32 and 64-bit +/* + * Page size definitions for 8xx * * shift : is the "PAGE_SHIFT" value for that page size - * penc : is the pte encoding mask * */ struct mmu_psize_def { unsigned int shift; /* number of bits */ - unsigned int enc; /* PTE encoding */ - unsigned int ind; /* Corresponding indirect page size shift */ - unsigned int flags; -#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ -#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ }; extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9164a9e41b02..b481738c4bb5 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -52,7 +52,7 @@ #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e)) /* * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary @@ -118,9 +118,7 @@ * (hardware-defined) PowerPC PTE as closely as possible. */ -#if defined(CONFIG_40x) -#include <asm/nohash/32/pte-40x.h> -#elif defined(CONFIG_44x) +#if defined(CONFIG_44x) #include <asm/nohash/32/pte-44x.h> #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) #include <asm/nohash/pte-e500.h> @@ -172,7 +170,7 @@ static inline void pmd_clear(pmd_t *pmdp) #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) + ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) #endif diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h deleted file mode 100644 index d759cfd74754..000000000000 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H -#define _ASM_POWERPC_NOHASH_32_PTE_40x_H -#ifdef __KERNEL__ - -/* - * At present, all PowerPC 400-class processors share a similar TLB - * architecture. The instruction and data sides share a unified, - * 64-entry, fully-associative TLB which is maintained totally under - * software control. In addition, the instruction side has a - * hardware-managed, 4-entry, fully-associative TLB which serves as a - * first level to the shared TLB. These two TLBs are known as the UTLB - * and ITLB, respectively (see "mmu.h" for definitions). - * - * There are several potential gotchas here. The 40x hardware TLBLO - * field looks like this: - * - * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - * RPN..................... 0 0 EX WR ZSEL....... W I M G - * - * Where possible we make the Linux PTE bits match up with this - * - * - bits 20 and 21 must be cleared, because we use 4k pages (40x can - * support down to 1k pages), this is done in the TLBMiss exception - * handler. - * - We use only zones 0 (for kernel pages) and 1 (for user pages) - * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB - * miss handler. Bit 27 is PAGE_USER, thus selecting the correct - * zone. - * - PRESENT *must* be in the bottom two bits because swap PTEs - * use the top 30 bits. Because 40x doesn't support SMP anyway, M is - * irrelevant so we borrow it for PAGE_PRESENT. Bit 30 - * is cleared in the TLB miss handler before the TLB entry is loaded. - * - All other bits of the PTE are loaded into TLBLO without - * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for - * software PTE bits. We actually use bits 21, 24, 25, and - * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and - * PRESENT. - */ - -#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ -#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ -#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ -#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ -#define _PAGE_READ 0x010 /* software: read permission */ -#define _PAGE_SPECIAL 0x020 /* software: Special page */ -#define _PAGE_DIRTY 0x080 /* software: dirty page */ -#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */ -#define _PAGE_EXEC 0x200 /* hardware: EX permission */ -#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ - -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - -/* cache related flags non existing on 40x */ -#define _PAGE_COHERENT 0 - -#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ -#define _PMD_PRESENT_MASK _PMD_PRESENT -#define _PMD_BAD 0x802 -#define _PMD_SIZE_4M 0x0c0 -#define _PMD_SIZE_16M 0x0e0 -#define _PMD_USER 0 - -#define _PTE_NONE_MASK 0 - -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) -#define _PAGE_BASE (_PAGE_BASE_NC) - -#include <asm/pgtable-masks.h> - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index 851813725237..da0469928273 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -75,9 +75,6 @@ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - /* TODO: Add large page lowmem mapping support */ #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h index 653a342d3b25..14d64b4f3f14 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-85xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h @@ -31,9 +31,6 @@ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */ -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 137dc3c84e45..54ebb91dbdcf 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -74,12 +74,11 @@ #define _PTE_NONE_MASK 0 #ifdef CONFIG_PPC_16K_PAGES -#define _PAGE_PSIZE _PAGE_SPS +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SPS) #else -#define _PAGE_PSIZE 0 +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #endif -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) #define _PAGE_BASE (_PAGE_BASE_NC) #include <asm/pgtable-masks.h> @@ -120,7 +119,7 @@ static inline pte_t pte_mkhuge(pte_t pte) #define pte_mkhuge pte_mkhuge -static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, unsigned long set, int huge); static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -142,19 +141,12 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *pt } #define __ptep_set_access_flags __ptep_set_access_flags -static inline unsigned long pgd_leaf_size(pgd_t pgd) -{ - if (pgd_val(pgd) & _PMD_PAGE_8M) - return SZ_8M; - return SZ_4M; -} - -#define pgd_leaf_size pgd_leaf_size - -static inline unsigned long pte_leaf_size(pte_t pte) +static inline unsigned long __pte_leaf_size(pmd_t pmd, pte_t pte) { pte_basic_t val = pte_val(pte); + if (pmd_val(pmd) & _PMD_PAGE_8M) + return SZ_8M; if (val & _PAGE_HUGE) return SZ_512K; if (val & _PAGE_SPS) @@ -162,31 +154,38 @@ static inline unsigned long pte_leaf_size(pte_t pte) return SZ_4K; } -#define pte_leaf_size pte_leaf_size +#define __pte_leaf_size __pte_leaf_size /* * On the 8xx, the page tables are a bit special. For 16k pages, we have * 4 identical entries. For 512k pages, we have 128 entries as if it was * 4k pages, but they are flagged as 512k pages for the hardware. - * For other page sizes, we have a single entry in the table. + * For 8M pages, we have 1024 entries as if it was 4M pages (PMD_SIZE) + * but they are flagged as 8M pages for the hardware. + * For 4k pages, we have a single entry in the table. */ static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); -static int hugepd_ok(hugepd_t hpd); +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address); + +static inline bool ptep_is_8m_pmdp(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + return (pmd_t *)ptep == pmd_off(mm, ALIGN_DOWN(addr, SZ_8M)); +} static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) { if (!huge) return PAGE_SIZE / SZ_4K; - else if (hugepd_ok(*((hugepd_t *)pmd))) - return 1; + else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M) + return SZ_4M / SZ_4K; else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) return SZ_16K / SZ_4K; else return SZ_512K / SZ_4K; } -static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, - unsigned long clr, unsigned long set, int huge) +static inline pte_basic_t __pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) { pte_basic_t *entry = (pte_basic_t *)p; pte_basic_t old = pte_val(*p); @@ -198,7 +197,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) { *entry++ = new; - if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) { + if (IS_ENABLED(CONFIG_PPC_16K_PAGES)) { *entry++ = new; *entry++ = new; *entry++ = new; @@ -208,6 +207,21 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p return old; } +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + unsigned long clr, unsigned long set, int huge) +{ + pte_basic_t old; + + if (huge && ptep_is_8m_pmdp(mm, addr, ptep)) { + pmd_t *pmdp = (pmd_t *)ptep; + + old = __pte_update(mm, addr, pte_offset_kernel(pmdp, 0), clr, set, huge); + __pte_update(mm, addr, pte_offset_kernel(pmdp + 1, 0), clr, set, huge); + } else { + old = __pte_update(mm, addr, ptep, clr, set, huge); + } + return old; +} #define pte_update pte_update #ifdef CONFIG_PPC_16K_PAGES |