From bf680d51605662aae5482d87e0e0a54ba6db056b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 1 Dec 2015 09:06:45 +0530 Subject: powerpc/mm: Don't track subpage valid bit in pte_t This free up 11 bits in pte_t. In the later patch we also change the pte_t format so that we can start supporting migration pte at pmd level. We now track 4k subpage valid bit as below If we have _PAGE_COMBO set, we override the _PAGE_F_GIX_SHIFT and _PAGE_F_SECOND. Together we have 4 bits, each of them used to indicate whether any of the 4 4k subpage in that group is valid. ie, [ group 1 bit ] [ group 2 bit ] ..... [ group 4 ] [ subpage 1 - 4] [ subpage 5- 8] ..... [ subpage 13 - 16] We still track each 4k subpage slot number and secondary hash information in the second half of pgtable_t. Removing the subpage tracking have some significant overhead on aim9 and ebizzy benchmark and to support THP with 4K subpage, we do need a pgtable_t of 4096 bytes. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash64_64k.c | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm/hash64_64k.c') diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index 9ffeae2cbb57..f1b86ba63430 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -15,6 +15,35 @@ #include #include #include +/* + * index from 0 - 15 + */ +bool __rpte_sub_valid(real_pte_t rpte, unsigned long index) +{ + unsigned long g_idx; + unsigned long ptev = pte_val(rpte.pte); + + g_idx = (ptev & _PAGE_COMBO_VALID) >> _PAGE_F_GIX_SHIFT; + index = index >> 2; + if (g_idx & (0x1 << index)) + return true; + else + return false; +} +/* + * index from 0 - 15 + */ +static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index) +{ + unsigned long g_idx; + + if (!(ptev & _PAGE_COMBO)) + return ptev; + index = index >> 2; + g_idx = 0x1 << index; + + return ptev | (g_idx << _PAGE_F_GIX_SHIFT); +} int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, @@ -102,7 +131,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, */ if (!(old_pte & _PAGE_COMBO)) { flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); - old_pte &= ~_PAGE_HPTE_SUB; + old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND; goto htab_insert_hpte; } /* @@ -192,7 +221,8 @@ repeat: /* __real_pte use pte_val() any idea why ? FIXME!! */ rpte.hidx &= ~(0xfUL << (subpg_index << 2)); *hidxp = rpte.hidx | (slot << (subpg_index << 2)); - new_pte |= (_PAGE_HPTE_SUB0 >> subpg_index); + new_pte = mark_subptegroup_valid(new_pte, subpg_index); + new_pte |= _PAGE_HASHPTE; /* * check __real_pte for details on matching smp_rmb() */ -- cgit