diff options
Diffstat (limited to 'arch/powerpc/mm/book3s64')
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_hugepage.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_pgtable.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/pgtable.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_pgtable.c | 36 |
5 files changed, 29 insertions, 26 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c index 15d6f3ea7178..cdfd4fe75edb 100644 --- a/arch/powerpc/mm/book3s64/hash_hugepage.c +++ b/arch/powerpc/mm/book3s64/hash_hugepage.c @@ -54,7 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, /* * Make sure this is thp or devmap entry */ - if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))) + if (!(old_pmd & H_PAGE_THP_HUGE)) return 0; rflags = htab_convert_pte_flags(new_pmd, flags); diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 988948d69bc1..82d31177630b 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -195,7 +195,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!hash__pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif @@ -227,7 +227,6 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); - VM_BUG_ON(pmd_devmap(*pmdp)); pmd = *pmdp; pmd_clear(pmdp); diff --git a/arch/powerpc/mm/book3s64/hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c index 83c3361b358b..2bcbbf9d85ac 100644 --- a/arch/powerpc/mm/book3s64/hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/hugetlbpage.c @@ -74,7 +74,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* Make sure this is a hugetlb entry */ - if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)) + if (old_pte & H_PAGE_THP_HUGE) return 0; rflags = htab_convert_pte_flags(new_pte, flags); diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 0db01e10a3f8..c9431ae7f78a 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -62,7 +62,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, { int changed; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); #endif changed = !pmd_same(*(pmdp), entry); @@ -82,7 +82,6 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, { int changed; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pud_devmap(*pudp)); assert_spin_locked(pud_lockptr(vma->vm_mm, pudp)); #endif changed = !pud_same(*(pudp), entry); @@ -204,8 +203,8 @@ pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, { pmd_t pmd; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); - VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && - !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); + VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp)) || + !pmd_present(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); /* * if it not a fullmm flush, then we can possibly end up converting @@ -223,8 +222,7 @@ pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, pud_t pud; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); - VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) || - !pud_present(*pudp)); + VM_BUG_ON(!pud_present(*pudp)); pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp); /* * if it not a fullmm flush, then we can possibly end up converting @@ -644,7 +642,7 @@ unsigned long memremap_compat_align(void) EXPORT_SYMBOL_GPL(memremap_compat_align); #endif -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { unsigned long prot; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 376dba5992f2..be523e5fe9c5 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1122,18 +1122,25 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in pte_t *pte; /* - * Make sure we align the start vmemmap addr so that we calculate - * the correct start_pfn in altmap boundary check to decided whether - * we should use altmap or RAM based backing memory allocation. Also - * the address need to be aligned for set_pte operation. - - * If the start addr is already PMD_SIZE aligned we will try to use - * a pmd mapping. We don't want to be too aggressive here beacause - * that will cause more allocations in RAM. So only if the namespace - * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. + * If altmap is present, Make sure we align the start vmemmap addr + * to PAGE_SIZE so that we calculate the correct start_pfn in + * altmap boundary check to decide whether we should use altmap or + * RAM based backing memory allocation. Also the address need to be + * aligned for set_pte operation. If the start addr is already + * PMD_SIZE aligned and with in the altmap boundary then we will + * try to use a pmd size altmap mapping else we go for page size + * mapping. + * + * If altmap is not present, align the vmemmap addr to PMD_SIZE and + * always allocate a PMD size page for vmemmap backing. + * */ - start = ALIGN_DOWN(start, PAGE_SIZE); + if (altmap) + start = ALIGN_DOWN(start, PAGE_SIZE); + else + start = ALIGN_DOWN(start, PMD_SIZE); + for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); @@ -1159,7 +1166,7 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ - if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && + if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings @@ -1173,7 +1180,7 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in vmemmap_set_pmd(pmd, p, node, addr, next); pr_debug("PMD_SIZE vmemmap mapping\n"); continue; - } else if (altmap) { + } else { /* * A vmemmap block allocation can fail due to * alignment requirements and we trying to align @@ -1426,7 +1433,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!radix__pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif @@ -1443,7 +1450,7 @@ unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long add unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pud_devmap(*pudp)); + WARN_ON(!pud_trans_huge(*pudp)); assert_spin_locked(pud_lockptr(mm, pudp)); #endif @@ -1461,7 +1468,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); - VM_BUG_ON(pmd_devmap(*pmdp)); /* * khugepaged calls this for normal pmd */ |