From a43c0eb8364c022725df586e91dd753633374d66 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 1 Dec 2015 09:06:49 +0530 Subject: powerpc/mm: Convert 4k insert from asm to C This is similar to 64K insert. May be we want to consolidate Acked-by: Scott Wood Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash64_4k.c | 139 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 arch/powerpc/mm/hash64_4k.c (limited to 'arch/powerpc/mm/hash64_4k.c') diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c new file mode 100644 index 000000000000..3b49c6f18741 --- /dev/null +++ b/arch/powerpc/mm/hash64_4k.c @@ -0,0 +1,139 @@ +/* + * Copyright IBM Corporation, 2015 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include +#include + +int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, + pte_t *ptep, unsigned long trap, unsigned long flags, + int ssize, int subpg_prot) +{ + unsigned long hpte_group; + unsigned long rflags, pa; + unsigned long old_pte, new_pte; + unsigned long vpn, hash, slot; + unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; + + /* + * atomically mark the linux large page PTE busy and dirty + */ + do { + pte_t pte = READ_ONCE(*ptep); + + old_pte = pte_val(pte); + /* If PTE busy, retry the access */ + if (unlikely(old_pte & _PAGE_BUSY)) + return 0; + /* If PTE permissions don't match, take page fault */ + if (unlikely(access & ~old_pte)) + return 1; + /* + * Try to lock the PTE, add ACCESSED and DIRTY if it was + * a write access. Since this is 4K insert of 64K page size + * also add _PAGE_COMBO + */ + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE; + if (access & _PAGE_RW) + new_pte |= _PAGE_DIRTY; + } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, + old_pte, new_pte)); + /* + * PP bits. _PAGE_USER is already PP bit 0x2, so we only + * need to add in 0x1 if it's a read-only user page + */ + rflags = new_pte & _PAGE_USER; + if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) && + (new_pte & _PAGE_DIRTY))) + rflags |= 0x1; + /* + * _PAGE_EXEC -> HW_NO_EXEC since it's inverted + */ + rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); + /* + * Always add C and Memory coherence bit + */ + rflags |= HPTE_R_C | HPTE_R_M; + /* + * Add in WIMG bits + */ + rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | + _PAGE_COHERENT | _PAGE_GUARDED)); + + if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && + !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); + + vpn = hpt_vpn(ea, vsid, ssize); + if (unlikely(old_pte & _PAGE_HASHPTE)) { + /* + * There MIGHT be an HPTE for this pte + */ + hash = hpt_hash(vpn, shift, ssize); + if (old_pte & _PAGE_F_SECOND) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT; + + if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K, + MMU_PAGE_4K, ssize, flags) == -1) + old_pte &= ~_PAGE_HPTEFLAGS; + } + + if (likely(!(old_pte & _PAGE_HASHPTE))) { + + pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; + hash = hpt_hash(vpn, shift, ssize); + +repeat: + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + + /* Insert into the hash table, primary slot */ + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, + MMU_PAGE_4K, MMU_PAGE_4K, ssize); + /* + * Primary is full, try the secondary + */ + if (unlikely(slot == -1)) { + hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, + rflags, HPTE_V_SECONDARY, + MMU_PAGE_4K, MMU_PAGE_4K, ssize); + if (slot == -1) { + if (mftb() & 0x1) + hpte_group = ((hash & htab_hash_mask) * + HPTES_PER_GROUP) & ~0x7UL; + ppc_md.hpte_remove(hpte_group); + /* + * FIXME!! Should be try the group from which we removed ? + */ + goto repeat; + } + } + /* + * Hypervisor failure. Restore old pmd and return -1 + * similar to __hash_page_* + */ + if (unlikely(slot == -2)) { + *ptep = __pte(old_pte); + hash_failure_debug(ea, access, vsid, trap, ssize, + MMU_PAGE_4K, MMU_PAGE_4K, old_pte); + return -1; + } + new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX); + } + *ptep = __pte(new_pte & ~_PAGE_BUSY); + return 0; +} -- cgit From c6a3c495f05a070d4c4016d4a51c384cba723971 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 1 Dec 2015 09:06:50 +0530 Subject: powerpc/mm: Add helper for converting pte bit to hpte bits Instead of open coding it in multiple code paths, export the helper and add more documentation. Also make sure we don't make assumption regarding pte bit position Acked-by: Scott Wood Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash64_4k.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'arch/powerpc/mm/hash64_4k.c') diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c index 3b49c6f18741..ee863137035a 100644 --- a/arch/powerpc/mm/hash64_4k.c +++ b/arch/powerpc/mm/hash64_4k.c @@ -53,18 +53,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * PP bits. _PAGE_USER is already PP bit 0x2, so we only * need to add in 0x1 if it's a read-only user page */ - rflags = new_pte & _PAGE_USER; - if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) && - (new_pte & _PAGE_DIRTY))) - rflags |= 0x1; - /* - * _PAGE_EXEC -> HW_NO_EXEC since it's inverted - */ - rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); - /* - * Always add C and Memory coherence bit - */ - rflags |= HPTE_R_C | HPTE_R_M; + rflags = htab_convert_pte_flags(new_pte); /* * Add in WIMG bits */ -- cgit From 40e8550afc19dfc588171c089cb3f31f7b9e16f7 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 1 Dec 2015 09:06:51 +0530 Subject: powerpc/mm: Move WIMG update to helper. Only difference here is, we apply the WIMG mapping early, so rflags passed to updatepp will also be changed. Acked-by: Scott Wood Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash64_4k.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/powerpc/mm/hash64_4k.c') diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c index ee863137035a..e7c04542ba62 100644 --- a/arch/powerpc/mm/hash64_4k.c +++ b/arch/powerpc/mm/hash64_4k.c @@ -54,11 +54,6 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * need to add in 0x1 if it's a read-only user page */ rflags = htab_convert_pte_flags(new_pte); - /* - * Add in WIMG bits - */ - rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | - _PAGE_COHERENT | _PAGE_GUARDED)); if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) -- cgit