diff options
Diffstat (limited to 'arch/arm64/mm/hugetlbpage.c')
| -rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 614 |
1 files changed, 435 insertions, 179 deletions
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 656e0ece2289..1d90a7e75333 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm64/mm/hugetlbpage.c * * Copyright (C) 2013 Linaro Ltd. * * Based on arch/x86/mm/hugetlbpage.c. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/init.h> @@ -25,262 +17,526 @@ #include <asm/mman.h> #include <asm/tlb.h> #include <asm/tlbflush.h> -#include <asm/pgalloc.h> -int pmd_huge(pmd_t pmd) +/* + * HugeTLB Support Matrix + * + * --------------------------------------------------- + * | Page Size | CONT PTE | PMD | CONT PMD | PUD | + * --------------------------------------------------- + * | 4K | 64K | 2M | 32M | 1G | + * | 16K | 2M | 32M | 1G | | + * | 64K | 2M | 512M | 16G | | + * --------------------------------------------------- + */ + +/* + * Reserve CMA areas for the largest supported gigantic + * huge page when requested. Any other smaller gigantic + * huge pages could still be served from those areas. + */ +#ifdef CONFIG_CMA +void __init arm64_hugetlb_cma_reserve(void) { - return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); + int order; + + if (pud_sect_supported()) + order = PUD_SHIFT - PAGE_SHIFT; + else + order = CONT_PMD_SHIFT - PAGE_SHIFT; + + hugetlb_cma_reserve(order); } +#endif /* CONFIG_CMA */ -int pud_huge(pud_t pud) +static bool __hugetlb_valid_size(unsigned long size) { + switch (size) { #ifndef __PAGETABLE_PMD_FOLDED - return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); -#else - return 0; + case PUD_SIZE: + return pud_sect_supported(); #endif + case CONT_PMD_SIZE: + case PMD_SIZE: + case CONT_PTE_SIZE: + return true; + } + + return false; +} + +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION +bool arch_hugetlb_migration_supported(struct hstate *h) +{ + size_t pagesize = huge_page_size(h); + + if (!__hugetlb_valid_size(pagesize)) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + return false; + } + return true; } +#endif static int find_num_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, size_t *pgsize) { - pgd_t *pgd = pgd_offset(mm, addr); - pud_t *pud; - pmd_t *pmd; + pgd_t *pgdp = pgd_offset(mm, addr); + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; *pgsize = PAGE_SIZE; - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - if ((pte_t *)pmd == ptep) { + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); + pmdp = pmd_offset(pudp, addr); + if ((pte_t *)pmdp == ptep) { *pgsize = PMD_SIZE; return CONT_PMDS; } return CONT_PTES; } +static inline int num_contig_ptes(unsigned long size, size_t *pgsize) +{ + int contig_ptes = 1; + + *pgsize = size; + + switch (size) { + case CONT_PMD_SIZE: + *pgsize = PMD_SIZE; + contig_ptes = CONT_PMDS; + break; + case CONT_PTE_SIZE: + *pgsize = PAGE_SIZE; + contig_ptes = CONT_PTES; + break; + default: + WARN_ON(!__hugetlb_valid_size(size)); + } + + return contig_ptes; +} + +pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + int ncontig, i; + size_t pgsize; + pte_t orig_pte = __ptep_get(ptep); + + if (!pte_present(orig_pte) || !pte_cont(orig_pte)) + return orig_pte; + + ncontig = find_num_contig(mm, addr, ptep, &pgsize); + for (i = 0; i < ncontig; i++, ptep++) { + pte_t pte = __ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + return orig_pte; +} + +/* + * Changing some bits of contiguous entries requires us to follow a + * Break-Before-Make approach, breaking the whole contiguous set + * before we can change any entries. See ARM DDI 0487A.k_iss10775, + * "Misprogramming of the Contiguous bit", page D4-1762. + * + * This helper performs the break step. + */ +static pte_t get_clear_contig(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + pte_t pte, tmp_pte; + bool present; + + pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize); + present = pte_present(pte); + while (--ncontig) { + ptep++; + tmp_pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + } + return pte; +} + +static pte_t get_clear_contig_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long end = addr + (pgsize * ncontig); + + __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true); + return orig_pte; +} + +/* + * Changing some bits of contiguous entries requires us to follow a + * Break-Before-Make approach, breaking the whole contiguous set + * before we can change any entries. See ARM DDI 0487A.k_iss10775, + * "Misprogramming of the Contiguous bit", page D4-1762. + * + * This helper performs the break step for use cases where the + * original pte is not needed. + */ +static void clear_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + __ptep_get_and_clear_anysz(mm, ptep, pgsize); + + if (mm == &init_mm) + flush_tlb_kernel_range(saddr, addr); + else + __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true); +} + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) + pte_t *ptep, pte_t pte, unsigned long sz) { size_t pgsize; int i; int ncontig; - unsigned long pfn; - pgprot_t hugeprot; - if (!pte_cont(pte)) { - set_pte_at(mm, addr, ptep, pte); + ncontig = num_contig_ptes(sz, &pgsize); + + if (!pte_present(pte)) { + for (i = 0; i < ncontig; i++, ptep++) + __set_ptes_anysz(mm, ptep, pte, 1, pgsize); return; } - ncontig = find_num_contig(mm, addr, ptep, &pgsize); - pfn = pte_pfn(pte); - hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); - for (i = 0; i < ncontig; i++) { - pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep, - pte_val(pfn_pte(pfn, hugeprot))); - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); - ptep++; - pfn += pgsize >> PAGE_SHIFT; - addr += pgsize; - } + /* Only need to "break" if transitioning valid -> valid. */ + if (pte_cont(pte) && pte_valid(__ptep_get(ptep))) + clear_flush(mm, addr, ptep, pgsize, ncontig); + + __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { - pgd_t *pgd; - pud_t *pud; - pte_t *pte = NULL; - - pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz); - pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); - if (!pud) + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep = NULL; + + pgdp = pgd_offset(mm, addr); + p4dp = p4d_alloc(mm, pgdp, addr); + if (!p4dp) + return NULL; + + pudp = pud_alloc(mm, p4dp, addr); + if (!pudp) return NULL; if (sz == PUD_SIZE) { - pte = (pte_t *)pud; - } else if (sz == (PAGE_SIZE * CONT_PTES)) { - pmd_t *pmd = pmd_alloc(mm, pud, addr); + ptep = (pte_t *)pudp; + } else if (sz == (CONT_PTE_SIZE)) { + pmdp = pmd_alloc(mm, pudp, addr); + if (!pmdp) + return NULL; WARN_ON(addr & (sz - 1)); - /* - * Note that if this code were ever ported to the - * 32-bit arm platform then it will cause trouble in - * the case where CONFIG_HIGHPTE is set, since there - * will be no pte_unmap() to correspond with this - * pte_alloc_map(). - */ - pte = pte_alloc_map(mm, pmd, addr); + ptep = pte_alloc_huge(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(*pud)) - pte = huge_pmd_share(mm, addr, pud); + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, vma, addr, pudp); else - pte = (pte_t *)pmd_alloc(mm, pud, addr); - } else if (sz == (PMD_SIZE * CONT_PMDS)) { - pmd_t *pmd; - - pmd = pmd_alloc(mm, pud, addr); + ptep = (pte_t *)pmd_alloc(mm, pudp, addr); + } else if (sz == (CONT_PMD_SIZE)) { + pmdp = pmd_alloc(mm, pudp, addr); WARN_ON(addr & (sz - 1)); - return (pte_t *)pmd; + return (pte_t *)pmdp; } - pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr, - sz, pte, pte_val(*pte)); - return pte; + return ptep; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + + pgdp = pgd_offset(mm, addr); + if (!pgd_present(READ_ONCE(*pgdp))) + return NULL; - pgd = pgd_offset(mm, addr); - pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd); - if (!pgd_present(*pgd)) + p4dp = p4d_offset(pgdp, addr); + if (!p4d_present(READ_ONCE(*p4dp))) return NULL; - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (sz != PUD_SIZE && pud_none(pud)) return NULL; - /* swap or huge page */ - if (!pud_present(*pud) || pud_huge(*pud)) - return (pte_t *)pud; + /* hugepage or swap? */ + if (pud_leaf(pud) || !pud_present(pud)) + return (pte_t *)pudp; /* table; check the next level */ - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && + pmd_none(pmd)) return NULL; - if (!pmd_present(*pmd) || pmd_huge(*pmd)) - return (pte_t *)pmd; + if (pmd_leaf(pmd) || !pmd_present(pmd)) + return (pte_t *)pmdp; + + if (sz == CONT_PTE_SIZE) + return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK)); return NULL; } -pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, - struct page *page, int writable) +unsigned long hugetlb_mask_last_page(struct hstate *h) { - size_t pagesize = huge_page_size(hstate_vma(vma)); + unsigned long hp_size = huge_page_size(h); - if (pagesize == CONT_PTE_SIZE) { - entry = pte_mkcont(entry); - } else if (pagesize == CONT_PMD_SIZE) { - entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); - } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { - pr_warn("%s: unrecognized huge page size 0x%lx\n", - __func__, pagesize); + switch (hp_size) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + if (pud_sect_supported()) + return PGDIR_SIZE - PUD_SIZE; + break; +#endif + case CONT_PMD_SIZE: + return PUD_SIZE - CONT_PMD_SIZE; + case PMD_SIZE: + return PUD_SIZE - PMD_SIZE; + case CONT_PTE_SIZE: + return PMD_SIZE - CONT_PTE_SIZE; + default: + break; } + + return 0UL; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t pagesize = 1UL << shift; + + switch (pagesize) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + if (pud_sect_supported()) + return pud_pte(pud_mkhuge(pte_pud(entry))); + break; +#endif + case CONT_PMD_SIZE: + return pmd_pte(pmd_mkhuge(pmd_mkcont(pte_pmd(entry)))); + case PMD_SIZE: + return pmd_pte(pmd_mkhuge(pte_pmd(entry))); + case CONT_PTE_SIZE: + return pte_mkcont(entry); + default: + break; + } + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); return entry; } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) { - pte_t pte; + int i, ncontig; + size_t pgsize; - if (pte_cont(*ptep)) { - int ncontig, i; - size_t pgsize; - bool is_dirty = false; - - ncontig = find_num_contig(mm, addr, ptep, &pgsize); - /* save the 1st pte to return */ - pte = ptep_get_and_clear(mm, addr, ptep); - for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) { - /* - * If HW_AFDBM is enabled, then the HW could - * turn on the dirty bit for any of the page - * in the set, so check them all. - */ - ++ptep; - if (pte_dirty(ptep_get_and_clear(mm, addr, ptep))) - is_dirty = true; - } - if (is_dirty) - return pte_mkdirty(pte); - else - return pte; - } else { - return ptep_get_and_clear(mm, addr, ptep); + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + __pte_clear(mm, addr, ptep); +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + int ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + return get_clear_contig(mm, addr, ptep, pgsize, ncontig); +} + +/* + * huge_ptep_set_access_flags will update access flags (dirty, accesssed) + * and write permission. + * + * For a contiguous huge pte range we need to check whether or not write + * permission has to change only on the first pte in the set. Then for + * all the contiguous ptes we need to check whether or not there is a + * discrepancy between dirty or young. + */ +static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) +{ + int i; + + if (pte_write(pte) != pte_write(__ptep_get(ptep))) + return 1; + + for (i = 0; i < ncontig; i++) { + pte_t orig_pte = __ptep_get(ptep + i); + + if (pte_dirty(pte) != pte_dirty(orig_pte)) + return 1; + + if (pte_young(pte) != pte_young(orig_pte)) + return 1; } + + return 0; } int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - if (pte_cont(pte)) { - int ncontig, i, changed = 0; - size_t pgsize = 0; - unsigned long pfn = pte_pfn(pte); - /* Select all bits except the pfn */ - pgprot_t hugeprot = - __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ - pte_val(pte)); - - pfn = pte_pfn(pte); - ncontig = find_num_contig(vma->vm_mm, addr, ptep, - &pgsize); - for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize) { - changed |= ptep_set_access_flags(vma, addr, ptep, - pfn_pte(pfn, - hugeprot), - dirty); - pfn += pgsize >> PAGE_SHIFT; - } - return changed; - } else { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); - } + int ncontig; + size_t pgsize = 0; + struct mm_struct *mm = vma->vm_mm; + pte_t orig_pte; + + VM_WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) + return __ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize); + + if (!__cont_access_flags_changed(ptep, pte, ncontig)) + return 0; + + orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); + VM_WARN_ON(!pte_present(orig_pte)); + + /* Make sure we don't lose the dirty or young state */ + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + return 1; } void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if (pte_cont(*ptep)) { - int ncontig, i; - size_t pgsize = 0; - - ncontig = find_num_contig(mm, addr, ptep, &pgsize); - for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize) - ptep_set_wrprotect(mm, addr, ptep); - } else { - ptep_set_wrprotect(mm, addr, ptep); + int ncontig; + size_t pgsize; + pte_t pte; + + pte = __ptep_get(ptep); + VM_WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + __ptep_set_wrprotect(mm, addr, ptep); + return; } + + ncontig = find_num_contig(mm, addr, ptep, &pgsize); + + pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); + pte = pte_wrprotect(pte); + + __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); } -void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { - if (pte_cont(*ptep)) { - int ncontig, i; - size_t pgsize = 0; - - ncontig = find_num_contig(vma->vm_mm, addr, ptep, - &pgsize); - for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize) - ptep_clear_flush(vma, addr, ptep); - } else { - ptep_clear_flush(vma, addr, ptep); - } + struct mm_struct *mm = vma->vm_mm; + size_t pgsize; + int ncontig; + + ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize); + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); } -static __init int setup_hugepagesz(char *opt) +static int __init hugetlbpage_init(void) { - unsigned long ps = memparse(opt, &opt); - - if (ps == PMD_SIZE) { - hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); - } else if (ps == PUD_SIZE) { + /* + * HugeTLB pages are supported on maximum four page table + * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base + * page size, corresponding to hugetlb_add_hstate() calls + * here. + * + * HUGE_MAX_HSTATE should at least match maximum supported + * HugeTLB page sizes on the platform. Any new addition to + * supported HugeTLB page sizes will also require changing + * HUGE_MAX_HSTATE as well. + */ + BUILD_BUG_ON(HUGE_MAX_HSTATE < 4); + if (pud_sect_supported()) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); - } else { - hugetlb_bad_size(); - pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); - return 0; + + hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT); + + return 0; +} +arch_initcall(hugetlbpage_init); + +bool __init arch_hugetlb_valid_size(unsigned long size) +{ + return __hugetlb_valid_size(size); +} + +pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ + unsigned long psize = huge_page_size(hstate_vma(vma)); + + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { + /* + * Break-before-make (BBM) is required for all user space mappings + * when the permission changes from executable to non-executable + * in cases where cpu is affected with errata #2645198. + */ + if (pte_user_exec(__ptep_get(ptep))) + return huge_ptep_clear_flush(vma, addr, ptep); } - return 1; + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); +} + +void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte) +{ + unsigned long psize = huge_page_size(hstate_vma(vma)); + + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); } -__setup("hugepagesz=", setup_hugepagesz); |
