diff options
Diffstat (limited to 'arch/powerpc/mm/book3s64/radix_pgtable.c')
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_pgtable.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 376dba5992f2..be523e5fe9c5 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1122,18 +1122,25 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in pte_t *pte; /* - * Make sure we align the start vmemmap addr so that we calculate - * the correct start_pfn in altmap boundary check to decided whether - * we should use altmap or RAM based backing memory allocation. Also - * the address need to be aligned for set_pte operation. - - * If the start addr is already PMD_SIZE aligned we will try to use - * a pmd mapping. We don't want to be too aggressive here beacause - * that will cause more allocations in RAM. So only if the namespace - * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. + * If altmap is present, Make sure we align the start vmemmap addr + * to PAGE_SIZE so that we calculate the correct start_pfn in + * altmap boundary check to decide whether we should use altmap or + * RAM based backing memory allocation. Also the address need to be + * aligned for set_pte operation. If the start addr is already + * PMD_SIZE aligned and with in the altmap boundary then we will + * try to use a pmd size altmap mapping else we go for page size + * mapping. + * + * If altmap is not present, align the vmemmap addr to PMD_SIZE and + * always allocate a PMD size page for vmemmap backing. + * */ - start = ALIGN_DOWN(start, PAGE_SIZE); + if (altmap) + start = ALIGN_DOWN(start, PAGE_SIZE); + else + start = ALIGN_DOWN(start, PMD_SIZE); + for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); @@ -1159,7 +1166,7 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ - if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && + if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings @@ -1173,7 +1180,7 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in vmemmap_set_pmd(pmd, p, node, addr, next); pr_debug("PMD_SIZE vmemmap mapping\n"); continue; - } else if (altmap) { + } else { /* * A vmemmap block allocation can fail due to * alignment requirements and we trying to align @@ -1426,7 +1433,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!radix__pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif @@ -1443,7 +1450,7 @@ unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long add unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pud_devmap(*pudp)); + WARN_ON(!pud_trans_huge(*pudp)); assert_spin_locked(pud_lockptr(mm, pudp)); #endif @@ -1461,7 +1468,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); - VM_BUG_ON(pmd_devmap(*pmdp)); /* * khugepaged calls this for normal pmd */ |