summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/pgtable-book3s64.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2018-04-16 16:57:22 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2018-05-15 22:29:12 +1000
commit8a6c697b993974eeb05f540fb0ff2515346dbefd (patch)
tree9f8c94b01635fa078410af3994c03855f9cb62a9 /arch/powerpc/mm/pgtable-book3s64.c
parent0c4d268029bf3ca78a57d934bdeb43bfdfd4790e (diff)
powerpc/mm: Implement helpers for pagetable fragment support at PMD level
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/pgtable-book3s64.c')
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c84
1 files changed, 84 insertions, 0 deletions
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 0a05e99b54a1..47323ed8d7b5 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -20,6 +20,11 @@
#include "mmu_decl.h"
#include <trace/events/thp.h>
+unsigned long __pmd_frag_nr;
+EXPORT_SYMBOL(__pmd_frag_nr);
+unsigned long __pmd_frag_size_shift;
+EXPORT_SYMBOL(__pmd_frag_size_shift);
+
int (*register_process_table)(unsigned long base, unsigned long page_size,
unsigned long tbl_size);
@@ -226,6 +231,85 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
+static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
+{
+ void *pmd_frag, *ret;
+
+ spin_lock(&mm->page_table_lock);
+ ret = mm->context.pmd_frag;
+ if (ret) {
+ pmd_frag = ret + PMD_FRAG_SIZE;
+ /*
+ * If we have taken up all the fragments mark PTE page NULL
+ */
+ if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
+ pmd_frag = NULL;
+ mm->context.pmd_frag = pmd_frag;
+ }
+ spin_unlock(&mm->page_table_lock);
+ return (pmd_t *)ret;
+}
+
+static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
+{
+ void *ret = NULL;
+ struct page *page;
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
+
+ if (mm == &init_mm)
+ gfp &= ~__GFP_ACCOUNT;
+ page = alloc_page(gfp);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+
+ ret = page_address(page);
+ /*
+ * if we support only one fragment just return the
+ * allocated page.
+ */
+ if (PMD_FRAG_NR == 1)
+ return ret;
+
+ spin_lock(&mm->page_table_lock);
+ /*
+ * If we find pgtable_page set, we return
+ * the allocated page with single fragement
+ * count.
+ */
+ if (likely(!mm->context.pmd_frag)) {
+ set_page_count(page, PMD_FRAG_NR);
+ mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ return (pmd_t *)ret;
+}
+
+pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
+{
+ pmd_t *pmd;
+
+ pmd = get_pmd_from_cache(mm);
+ if (pmd)
+ return pmd;
+
+ return __alloc_for_pmdcache(mm);
+}
+
+void pmd_fragment_free(unsigned long *pmd)
+{
+ struct page *page = virt_to_page(pmd);
+
+ if (put_page_testzero(page)) {
+ pgtable_pmd_page_dtor(page);
+ free_unref_page(page);
+ }
+}
+
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;