summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 23:25:38 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 18:32:33 +1000
commit30bda41aba4efb2370c97e2cbe7385de93ccc372 (patch)
tree3584b20bb4e895469fb2dc0ca6309402928d1d63 /arch/powerpc/mm
parent72176dd0ad36c6d8e13515d085f7a229a55a2985 (diff)
powerpc/mm: Drop WIMG in favour of new constants
PowerISA 3.0 introduces two pte bits with the below meaning for radix: 00 -> Normal Memory 01 -> Strong Access Order (SAO) 10 -> Non idempotent I/O (Cache inhibited and guarded) 11 -> Tolerant I/O (Cache inhibited) We drop the existing WIMG bits in the Linux page table in favour of the above constants. We loose _PAGE_WRITETHRU with this conversion. We only use writethru via pgprot_cached_wthru() which is used by fbdev/controlfb.c which is Apple control display and also PPC32. With respect to _PAGE_COHERENCE, we have been marking hpte always coherent for some time now. htab_convert_pte_flags() always added HPTE_R_M. NOTE: KVM changes need closer review. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash64_64k.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c18
-rw-r--r--arch/powerpc/mm/pgtable.c8
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
4 files changed, 14 insertions, 18 deletions
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index e7782862362b..675331083728 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -244,7 +244,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
* If so, bail out and refault as a 4k page
*/
if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) &&
- unlikely(old_pte & _PAGE_NO_CACHE))
+ unlikely(pte_ci(pte)))
return 0;
/*
* Try to lock the PTE, add ACCESSED and DIRTY if it was
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 38ed869c119e..eb928d86ce01 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -192,12 +192,13 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
/*
* Add in WIG bits
*/
- if (pteflags & _PAGE_WRITETHRU)
- rflags |= HPTE_R_W;
- if (pteflags & _PAGE_NO_CACHE)
+
+ if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
rflags |= HPTE_R_I;
- if (pteflags & _PAGE_GUARDED)
- rflags |= HPTE_R_G;
+ if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+ rflags |= (HPTE_R_I | HPTE_R_G);
+ if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_I | HPTE_R_W);
return rflags;
}
@@ -1142,8 +1143,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
/* If this PTE is non-cacheable and we have restrictions on
* using non cacheable large pages, then we switch to 4k
*/
- if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
- (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
if (user_region) {
demote_segment_4k(mm, ea);
psize = MMU_PAGE_4K;
@@ -1297,13 +1297,13 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES
- /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
+ /* If either _PAGE_4K_PFN or cache inhibited is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take
* care of it once we actually try to access the page.
* That way we don't have to duplicate all of the logic for segment
* page size demotion here
*/
- if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
+ if ((pte_val(*ptep) & _PAGE_4K_PFN) || pte_ci(*ptep))
goto out_exit;
#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 125fb4b54445..db277b6d8e8b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -38,16 +38,16 @@ static inline int is_exec_fault(void)
/* We only try to do i/d cache coherency on stuff that looks like
* reasonably "normal" PTEs. We currently require a PTE to be present
- * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that
+ * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
* on userspace PTEs
*/
static inline int pte_looks_normal(pte_t pte)
{
#if defined(CONFIG_PPC_BOOK3S_64)
- if ((pte_val(pte) &
- (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
- _PAGE_PRESENT) {
+ if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) {
+ if (pte_ci(pte))
+ return 0;
if (pte_user(pte))
return 1;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ea60a24c17a6..b3040b4535da 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -167,10 +167,6 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
if ((flags & _PAGE_PRESENT) == 0)
flags |= pgprot_val(PAGE_KERNEL);
- /* Non-cacheable page cannot be coherent */
- if (flags & _PAGE_NO_CACHE)
- flags &= ~_PAGE_COHERENT;
-
/* We don't support the 4K PFN hack with ioremap */
if (flags & _PAGE_4K_PFN)
return NULL;