diff options
Diffstat (limited to 'mm/mprotect.c')
| -rw-r--r-- | mm/mprotect.c | 986 |
1 files changed, 787 insertions, 199 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index 94722a4d6b43..283889e4f1ce 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * mm/mprotect.c * @@ -8,7 +9,7 @@ * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ -#include <linux/mm.h> +#include <linux/pagewalk.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/mman.h> @@ -23,227 +24,687 @@ #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/perf_event.h> -#include <asm/uaccess.h> -#include <asm/pgtable.h> +#include <linux/pkeys.h> +#include <linux/ksm.h> +#include <linux/uaccess.h> +#include <linux/mm_inline.h> +#include <linux/pgtable.h> +#include <linux/userfaultfd_k.h> +#include <uapi/linux/mman.h> #include <asm/cacheflush.h> +#include <asm/mmu_context.h> #include <asm/tlbflush.h> +#include <asm/tlb.h> -#ifndef pgprot_modify -static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +#include "internal.h" + +static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte) { - return newprot; + if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) + return false; + + /* Don't touch entries that are not even readable. */ + if (pte_protnone(pte)) + return false; + + /* Do we need write faults for softdirty tracking? */ + if (pte_needs_soft_dirty_wp(vma, pte)) + return false; + + /* Do we need write faults for uffd-wp tracking? */ + if (userfaultfd_pte_wp(vma, pte)) + return false; + + return true; } -#endif -static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa, bool *ret_all_same_node) +static bool can_change_private_pte_writable(struct vm_area_struct *vma, + unsigned long addr, pte_t pte) +{ + struct page *page; + + if (!maybe_change_pte_writable(vma, pte)) + return false; + + /* + * Writable MAP_PRIVATE mapping: We can only special-case on + * exclusive anonymous pages, because we know that our + * write-fault handler similarly would map them writable without + * any additional checks while holding the PT lock. + */ + page = vm_normal_page(vma, addr, pte); + return page && PageAnon(page) && PageAnonExclusive(page); +} + +static bool can_change_shared_pte_writable(struct vm_area_struct *vma, + pte_t pte) +{ + if (!maybe_change_pte_writable(vma, pte)) + return false; + + VM_WARN_ON_ONCE(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)); + + /* + * Writable MAP_SHARED mapping: "clean" might indicate that the FS still + * needs a real write-fault for writenotify + * (see vma_wants_writenotify()). If "dirty", the assumption is that the + * FS was already notified and we can simply mark the PTE writable + * just like the write-fault handler would do. + */ + return pte_dirty(pte); +} + +bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, + pte_t pte) +{ + if (!(vma->vm_flags & VM_SHARED)) + return can_change_private_pte_writable(vma, addr, pte); + + return can_change_shared_pte_writable(vma, pte); +} + +static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep, + pte_t pte, int max_nr_ptes, fpb_t flags) +{ + /* No underlying folio, so cannot batch */ + if (!folio) + return 1; + + if (!folio_test_large(folio)) + return 1; + + return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags); +} + +/* Set nr_ptes number of ptes, starting from idx */ +static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, + int idx, bool set_write, struct mmu_gather *tlb) +{ + /* + * Advance the position in the batch by idx; note that if idx > 0, + * then the nr_ptes passed here is <= batch size - idx. + */ + addr += idx * PAGE_SIZE; + ptep += idx; + oldpte = pte_advance_pfn(oldpte, idx); + ptent = pte_advance_pfn(ptent, idx); + + if (set_write) + ptent = pte_mkwrite(ptent, vma); + + modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes); + if (pte_needs_flush(oldpte, ptent)) + tlb_flush_pte_range(tlb, addr, nr_ptes * PAGE_SIZE); +} + +/* + * Get max length of consecutive ptes pointing to PageAnonExclusive() pages or + * !PageAnonExclusive() pages, starting from start_idx. Caller must enforce + * that the ptes point to consecutive pages of the same anon large folio. + */ +static int page_anon_exclusive_sub_batch(int start_idx, int max_len, + struct page *first_page, bool expected_anon_exclusive) +{ + int idx; + + for (idx = start_idx + 1; idx < start_idx + max_len; ++idx) { + if (expected_anon_exclusive != PageAnonExclusive(first_page + idx)) + break; + } + return idx - start_idx; +} + +/* + * This function is a result of trying our very best to retain the + * "avoid the write-fault handler" optimization. In can_change_pte_writable(), + * if the vma is a private vma, and we cannot determine whether to change + * the pte to writable just from the vma and the pte, we then need to look + * at the actual page pointed to by the pte. Unfortunately, if we have a + * batch of ptes pointing to consecutive pages of the same anon large folio, + * the anon-exclusivity (or the negation) of the first page does not guarantee + * the anon-exclusivity (or the negation) of the other pages corresponding to + * the pte batch; hence in this case it is incorrect to decide to change or + * not change the ptes to writable just by using information from the first + * pte of the batch. Therefore, we must individually check all pages and + * retrieve sub-batches. + */ +static void commit_anon_folio_batch(struct vm_area_struct *vma, + struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep, + pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) +{ + bool expected_anon_exclusive; + int sub_batch_idx = 0; + int len; + + while (nr_ptes) { + expected_anon_exclusive = PageAnonExclusive(first_page + sub_batch_idx); + len = page_anon_exclusive_sub_batch(sub_batch_idx, nr_ptes, + first_page, expected_anon_exclusive); + prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len, + sub_batch_idx, expected_anon_exclusive, tlb); + sub_batch_idx += len; + nr_ptes -= len; + } +} + +static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, + struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep, + pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) +{ + bool set_write; + + if (vma->vm_flags & VM_SHARED) { + set_write = can_change_shared_pte_writable(vma, ptent); + prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, + /* idx = */ 0, set_write, tlb); + return; + } + + set_write = maybe_change_pte_writable(vma, ptent) && + (folio && folio_test_anon(folio)); + if (!set_write) { + prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, + /* idx = */ 0, set_write, tlb); + return; + } + commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb); +} + +static long change_pte_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, + unsigned long end, pgprot_t newprot, unsigned long cp_flags) { - struct mm_struct *mm = vma->vm_mm; pte_t *pte, oldpte; spinlock_t *ptl; - unsigned long pages = 0; - bool all_same_node = true; - int last_nid = -1; - - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + long pages = 0; + bool is_private_single_threaded; + bool prot_numa = cp_flags & MM_CP_PROT_NUMA; + bool uffd_wp = cp_flags & MM_CP_UFFD_WP; + bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; + int nr_ptes; + + tlb_change_page_size(tlb, PAGE_SIZE); + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return -EAGAIN; + + if (prot_numa) + is_private_single_threaded = vma_is_single_threaded_private(vma); + + flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); do { - oldpte = *pte; + nr_ptes = 1; + oldpte = ptep_get(pte); if (pte_present(oldpte)) { + const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE; + int max_nr_ptes = (end - addr) >> PAGE_SHIFT; + struct folio *folio = NULL; + struct page *page; pte_t ptent; - bool updated = false; - ptent = ptep_modify_prot_start(mm, addr, pte); - if (!prot_numa) { - ptent = pte_modify(ptent, newprot); - updated = true; - } else { - struct page *page; - - page = vm_normal_page(vma, addr, oldpte); - if (page) { - int this_nid = page_to_nid(page); - if (last_nid == -1) - last_nid = this_nid; - if (last_nid != this_nid) - all_same_node = false; - - /* only check non-shared pages */ - if (!pte_numa(oldpte) && - page_mapcount(page) == 1) { - ptent = pte_mknuma(ptent); - updated = true; - } - } - } + /* Already in the desired state. */ + if (prot_numa && pte_protnone(oldpte)) + continue; + + page = vm_normal_page(vma, addr, oldpte); + if (page) + folio = page_folio(page); /* - * Avoid taking write faults for pages we know to be - * dirty. + * Avoid trapping faults against the zero or KSM + * pages. See similar comment in change_huge_pmd. */ - if (dirty_accountable && pte_dirty(ptent)) { - ptent = pte_mkwrite(ptent); - updated = true; + if (prot_numa && + !folio_can_map_prot_numa(folio, vma, + is_private_single_threaded)) { + + /* determine batch to skip */ + nr_ptes = mprotect_folio_pte_batch(folio, + pte, oldpte, max_nr_ptes, /* flags = */ 0); + continue; } - if (updated) + nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags); + + oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes); + ptent = pte_modify(oldpte, newprot); + + if (uffd_wp) + ptent = pte_mkuffd_wp(ptent); + else if (uffd_wp_resolve) + ptent = pte_clear_uffd_wp(ptent); + + /* + * In some writable, shared mappings, we might want + * to catch actual write access -- see + * vma_wants_writenotify(). + * + * In all writable, private mappings, we have to + * properly handle COW. + * + * In both cases, we can sometimes still change PTEs + * writable and avoid the write-fault handler, for + * example, if a PTE is already dirty and no other + * COW or special handling is required. + */ + if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && + !pte_write(ptent)) + set_write_prot_commit_flush_ptes(vma, folio, page, + addr, pte, oldpte, ptent, nr_ptes, tlb); + else + prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent, + nr_ptes, /* idx = */ 0, /* set_write = */ false, tlb); + pages += nr_ptes; + } else if (pte_none(oldpte)) { + /* + * Nobody plays with any none ptes besides + * userfaultfd when applying the protections. + */ + if (likely(!uffd_wp)) + continue; + + if (userfaultfd_wp_use_markers(vma)) { + /* + * For file-backed mem, we need to be able to + * wr-protect a none pte, because even if the + * pte is none, the page/swap cache could + * exist. Doing that by install a marker. + */ + set_pte_at(vma->vm_mm, addr, pte, + make_pte_marker(PTE_MARKER_UFFD_WP)); pages++; - ptep_modify_prot_commit(mm, addr, pte, ptent); - } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { - swp_entry_t entry = pte_to_swp_entry(oldpte); + } + } else { + softleaf_t entry = softleaf_from_pte(oldpte); + pte_t newpte; + + if (softleaf_is_migration_write(entry)) { + const struct folio *folio = softleaf_to_folio(entry); - if (is_write_migration_entry(entry)) { /* * A protection check is difficult so * just be safe and disable write */ - make_migration_entry_read(&entry); - set_pte_at(mm, addr, pte, - swp_entry_to_pte(entry)); + if (folio_test_anon(folio)) + entry = make_readable_exclusive_migration_entry( + swp_offset(entry)); + else + entry = make_readable_migration_entry(swp_offset(entry)); + newpte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(oldpte)) + newpte = pte_swp_mksoft_dirty(newpte); + } else if (softleaf_is_device_private_write(entry)) { + /* + * We do not preserve soft-dirtiness. See + * copy_nonpresent_pte() for explanation. + */ + entry = make_readable_device_private_entry( + swp_offset(entry)); + newpte = swp_entry_to_pte(entry); + if (pte_swp_uffd_wp(oldpte)) + newpte = pte_swp_mkuffd_wp(newpte); + } else if (softleaf_is_marker(entry)) { + /* + * Ignore error swap entries unconditionally, + * because any access should sigbus/sigsegv + * anyway. + */ + if (softleaf_is_poison_marker(entry) || + softleaf_is_guard_marker(entry)) + continue; + /* + * If this is uffd-wp pte marker and we'd like + * to unprotect it, drop it; the next page + * fault will trigger without uffd trapping. + */ + if (uffd_wp_resolve) { + pte_clear(vma->vm_mm, addr, pte); + pages++; + } + continue; + } else { + newpte = oldpte; + } + + if (uffd_wp) + newpte = pte_swp_mkuffd_wp(newpte); + else if (uffd_wp_resolve) + newpte = pte_swp_clear_uffd_wp(newpte); + + if (!pte_same(oldpte, newpte)) { + set_pte_at(vma->vm_mm, addr, pte, newpte); + pages++; } - pages++; } - } while (pte++, addr += PAGE_SIZE, addr != end); + } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); - *ret_all_same_node = all_same_node; return pages; } -#ifdef CONFIG_NUMA_BALANCING -static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, - pmd_t *pmd) +/* + * Return true if we want to split THPs into PTE mappings in change + * protection procedure, false otherwise. + */ +static inline bool +pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) { - spin_lock(&mm->page_table_lock); - set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); - spin_unlock(&mm->page_table_lock); + /* + * pte markers only resides in pte level, if we need pte markers, + * we need to split. For example, we cannot wr-protect a file thp + * (e.g. 2M shmem) because file thp is handled differently when + * split by erasing the pmd so far. + */ + return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); } -#else -static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, - pmd_t *pmd) + +/* + * Return true if we want to populate pgtables in change protection + * procedure, false otherwise + */ +static inline bool +pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) { - BUG(); + /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */ + if (!(cp_flags & MM_CP_UFFD_WP)) + return false; + + /* Populate if the userfaultfd mode requires pte markers */ + return userfaultfd_wp_use_markers(vma); } -#endif /* CONFIG_NUMA_BALANCING */ -static inline unsigned long change_pmd_range(struct vm_area_struct *vma, - pud_t *pud, unsigned long addr, unsigned long end, - pgprot_t newprot, int dirty_accountable, int prot_numa) +/* + * Populate the pgtable underneath for whatever reason if requested. + * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable + * allocation failures during page faults by kicking OOM and returning + * error. + */ +#define change_pmd_prepare(vma, pmd, cp_flags) \ + ({ \ + long err = 0; \ + if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ + if (pte_alloc(vma->vm_mm, pmd)) \ + err = -ENOMEM; \ + } \ + err; \ + }) + +/* + * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to + * have separate change_pmd_prepare() because pte_alloc() returns 0 on success, + * while {pmd|pud|p4d}_alloc() returns the valid pointer on success. + */ +#define change_prepare(vma, high, low, addr, cp_flags) \ + ({ \ + long err = 0; \ + if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ + low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ + if (p == NULL) \ + err = -ENOMEM; \ + } \ + err; \ + }) + +static inline long change_pmd_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pud_t *pud, unsigned long addr, + unsigned long end, pgprot_t newprot, unsigned long cp_flags) { pmd_t *pmd; unsigned long next; - unsigned long pages = 0; - bool all_same_node; + long pages = 0; + unsigned long nr_huge_updates = 0; pmd = pmd_offset(pud, addr); do { + long ret; + pmd_t _pmd; +again: next = pmd_addr_end(addr, end); - if (pmd_trans_huge(*pmd)) { - if (next - addr != HPAGE_PMD_SIZE) - split_huge_page_pmd(vma, addr, pmd); - else if (change_huge_pmd(vma, pmd, addr, newprot, - prot_numa)) { - pages += HPAGE_PMD_NR; - continue; + + ret = change_pmd_prepare(vma, pmd, cp_flags); + if (ret) { + pages = ret; + break; + } + + if (pmd_none(*pmd)) + goto next; + + _pmd = pmdp_get_lockless(pmd); + if (pmd_is_huge(_pmd)) { + if ((next - addr != HPAGE_PMD_SIZE) || + pgtable_split_needed(vma, cp_flags)) { + __split_huge_pmd(vma, pmd, addr, false); + /* + * For file-backed, the pmd could have been + * cleared; make sure pmd populated if + * necessary, then fall-through to pte level. + */ + ret = change_pmd_prepare(vma, pmd, cp_flags); + if (ret) { + pages = ret; + break; + } + } else { + ret = change_huge_pmd(tlb, vma, pmd, + addr, newprot, cp_flags); + if (ret) { + if (ret == HPAGE_PMD_NR) { + pages += HPAGE_PMD_NR; + nr_huge_updates++; + } + + /* huge pmd was handled */ + goto next; + } } - /* fall through */ + /* fall through, the trans huge pmd just split */ } - if (pmd_none_or_clear_bad(pmd)) - continue; - pages += change_pte_range(vma, pmd, addr, next, newprot, - dirty_accountable, prot_numa, &all_same_node); - /* - * If we are changing protections for NUMA hinting faults then - * set pmd_numa if the examined pages were all on the same - * node. This allows a regular PMD to be handled as one fault - * and effectively batches the taking of the PTL - */ - if (prot_numa && all_same_node) - change_pmd_protnuma(vma->vm_mm, addr, pmd); + ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, + cp_flags); + if (ret < 0) + goto again; + pages += ret; +next: + cond_resched(); } while (pmd++, addr = next, addr != end); + if (nr_huge_updates) + count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); return pages; } -static inline unsigned long change_pud_range(struct vm_area_struct *vma, - pgd_t *pgd, unsigned long addr, unsigned long end, - pgprot_t newprot, int dirty_accountable, int prot_numa) +static inline long change_pud_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, + unsigned long end, pgprot_t newprot, unsigned long cp_flags) { - pud_t *pud; + struct mmu_notifier_range range; + pud_t *pudp, pud; unsigned long next; - unsigned long pages = 0; + long pages = 0, ret; + + range.start = 0; - pud = pud_offset(pgd, addr); + pudp = pud_offset(p4d, addr); do { +again: next = pud_addr_end(addr, end); - if (pud_none_or_clear_bad(pud)) + ret = change_prepare(vma, pudp, pmd, addr, cp_flags); + if (ret) { + pages = ret; + break; + } + + pud = pudp_get(pudp); + if (pud_none(pud)) + continue; + + if (!range.start) { + mmu_notifier_range_init(&range, + MMU_NOTIFY_PROTECTION_VMA, 0, + vma->vm_mm, addr, end); + mmu_notifier_invalidate_range_start(&range); + } + + if (pud_leaf(pud)) { + if ((next - addr != PUD_SIZE) || + pgtable_split_needed(vma, cp_flags)) { + __split_huge_pud(vma, pudp, addr); + goto again; + } else { + ret = change_huge_pud(tlb, vma, pudp, + addr, newprot, cp_flags); + if (ret == 0) + goto again; + /* huge pud was handled */ + if (ret == HPAGE_PUD_NR) + pages += HPAGE_PUD_NR; + continue; + } + } + + pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot, + cp_flags); + } while (pudp++, addr = next, addr != end); + + if (range.start) + mmu_notifier_invalidate_range_end(&range); + + return pages; +} + +static inline long change_p4d_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, + unsigned long end, pgprot_t newprot, unsigned long cp_flags) +{ + p4d_t *p4d; + unsigned long next; + long pages = 0, ret; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + ret = change_prepare(vma, p4d, pud, addr, cp_flags); + if (ret) + return ret; + if (p4d_none_or_clear_bad(p4d)) continue; - pages += change_pmd_range(vma, pud, addr, next, newprot, - dirty_accountable, prot_numa); - } while (pud++, addr = next, addr != end); + pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, + cp_flags); + } while (p4d++, addr = next, addr != end); return pages; } -static unsigned long change_protection_range(struct vm_area_struct *vma, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa) +static long change_protection_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, unsigned long addr, + unsigned long end, pgprot_t newprot, unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; - unsigned long start = addr; - unsigned long pages = 0; + long pages = 0, ret; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); - flush_cache_range(vma, addr, end); + tlb_start_vma(tlb, vma); do { next = pgd_addr_end(addr, end); + ret = change_prepare(vma, pgd, p4d, addr, cp_flags); + if (ret) { + pages = ret; + break; + } if (pgd_none_or_clear_bad(pgd)) continue; - pages += change_pud_range(vma, pgd, addr, next, newprot, - dirty_accountable, prot_numa); + pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, + cp_flags); } while (pgd++, addr = next, addr != end); - /* Only flush the TLB if we actually modified any entries: */ - if (pages) - flush_tlb_range(vma, start, end); + tlb_end_vma(tlb, vma); return pages; } -unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa) +long change_protection(struct mmu_gather *tlb, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long cp_flags) { - struct mm_struct *mm = vma->vm_mm; - unsigned long pages; + pgprot_t newprot = vma->vm_page_prot; + long pages; + + BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); + +#ifdef CONFIG_NUMA_BALANCING + /* + * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking) + * are expected to reflect their requirements via VMA flags such that + * vma_set_page_prot() will adjust vma->vm_page_prot accordingly. + */ + if (cp_flags & MM_CP_PROT_NUMA) + newprot = PAGE_NONE; +#else + WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA); +#endif - mmu_notifier_invalidate_range_start(mm, start, end); if (is_vm_hugetlb_page(vma)) - pages = hugetlb_change_protection(vma, start, end, newprot); + pages = hugetlb_change_protection(vma, start, end, newprot, + cp_flags); else - pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); - mmu_notifier_invalidate_range_end(mm, start, end); + pages = change_protection_range(tlb, vma, start, end, newprot, + cp_flags); return pages; } +static int prot_none_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + return pfn_modify_allowed(pte_pfn(ptep_get(pte)), + *(pgprot_t *)(walk->private)) ? + 0 : -EACCES; +} + +static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long next, + struct mm_walk *walk) +{ + return pfn_modify_allowed(pte_pfn(ptep_get(pte)), + *(pgprot_t *)(walk->private)) ? + 0 : -EACCES; +} + +static int prot_none_test(unsigned long addr, unsigned long next, + struct mm_walk *walk) +{ + return 0; +} + +static const struct mm_walk_ops prot_none_walk_ops = { + .pte_entry = prot_none_pte_entry, + .hugetlb_entry = prot_none_hugetlb_entry, + .test_walk = prot_none_test, + .walk_lock = PGWALK_WRLOCK, +}; + int -mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, - unsigned long start, unsigned long end, unsigned long newflags) +mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, + struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; - unsigned long oldflags = vma->vm_flags; + vm_flags_t oldflags = READ_ONCE(vma->vm_flags); long nrpages = (end - start) >> PAGE_SHIFT; + unsigned int mm_cp_flags = 0; unsigned long charged = 0; - pgoff_t pgoff; int error; - int dirty_accountable = 0; + + if (vma_is_sealed(vma)) + return -EPERM; if (newflags == oldflags) { *pprev = vma; @@ -251,12 +712,35 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, } /* + * Do PROT_NONE PFN permission checks here when we can still + * bail out without undoing a lot of state. This is a rather + * uncommon case, so doesn't need to be very optimized. + */ + if (arch_has_pfn_modify_check() && + (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) && + (newflags & VM_ACCESS_FLAGS) == 0) { + pgprot_t new_pgprot = vm_get_page_prot(newflags); + + error = walk_page_range(current->mm, start, end, + &prot_none_walk_ops, &new_pgprot); + if (error) + return error; + } + + /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we - * make it unwritable again. hugetlb mapping were accounted for - * even if read-only so there is no need to account for them here + * make it unwritable again except in the anonymous case where no + * anon_vma has yet to be assigned. + * + * hugetlb mapping were accounted for even if read-only so there is + * no need to account for them here. */ if (newflags & VM_WRITE) { + /* Check space limits when area turns into data. */ + if (!may_expand_vm(mm, newflags, nrpages) && + may_expand_vm(mm, oldflags, nrpages)) + return -ENOMEM; if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| VM_SHARED|VM_NORESERVE))) { charged = nrpages; @@ -264,52 +748,45 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, return -ENOMEM; newflags |= VM_ACCOUNT; } + } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) && + !vma->anon_vma) { + newflags &= ~VM_ACCOUNT; } - /* - * First try to merge with previous and/or next vma. - */ - pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); - *pprev = vma_merge(mm, *pprev, start, end, newflags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); - if (*pprev) { - vma = *pprev; - goto success; + vma = vma_modify_flags(vmi, *pprev, vma, start, end, &newflags); + if (IS_ERR(vma)) { + error = PTR_ERR(vma); + goto fail; } *pprev = vma; - if (start != vma->vm_start) { - error = split_vma(mm, vma, start, 1); - if (error) - goto fail; - } - - if (end != vma->vm_end) { - error = split_vma(mm, vma, end, 0); - if (error) - goto fail; - } - -success: /* - * vm_flags and vm_page_prot are protected by the mmap_sem + * vm_flags and vm_page_prot are protected by the mmap_lock * held in write mode. */ - vma->vm_flags = newflags; - vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, - vm_get_page_prot(newflags)); + vma_start_write(vma); + vm_flags_reset_once(vma, newflags); + if (vma_wants_manual_pte_write_upgrade(vma)) + mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; + vma_set_page_prot(vma); - if (vma_wants_writenotify(vma)) { - vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); - dirty_accountable = 1; - } + change_protection(tlb, vma, start, end, mm_cp_flags); - change_protection(vma, start, end, vma->vm_page_prot, - dirty_accountable, 0); + if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT)) + vm_unacct_memory(nrpages); - vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); - vm_stat_account(mm, newflags, vma->vm_file, nrpages); + /* + * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major + * fault on access. + */ + if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && + (newflags & VM_WRITE)) { + populate_vma_page_range(vma, start, end, NULL); + } + + vm_stat_account(mm, oldflags, -nrpages); + vm_stat_account(mm, newflags, nrpages); perf_event_mmap(vma); return 0; @@ -318,13 +795,23 @@ fail: return error; } -SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, - unsigned long, prot) +/* + * pkey==-1 when doing a legacy mprotect() + */ +static int do_mprotect_pkey(unsigned long start, size_t len, + unsigned long prot, int pkey) { - unsigned long vm_flags, nstart, end, tmp, reqprot; + unsigned long nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; - int error = -EINVAL; + int error; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); + const bool rier = (current->personality & READ_IMPLIES_EXEC) && + (prot & PROT_READ); + struct mmu_gather tlb; + struct vma_iterator vmi; + + start = untagged_addr(start); + prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ return -EINVAL; @@ -337,25 +824,28 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, end = start + len; if (end <= start) return -ENOMEM; - if (!arch_validate_prot(prot)) + if (!arch_validate_prot(prot, start)) return -EINVAL; reqprot = prot; - /* - * Does the application expect PROT_READ to imply PROT_EXEC: - */ - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) - prot |= PROT_EXEC; - vm_flags = calc_vm_prot_bits(prot); + if (mmap_write_lock_killable(current->mm)) + return -EINTR; - down_write(¤t->mm->mmap_sem); + /* + * If userspace did not allocate the pkey, do not let + * them use it here. + */ + error = -EINVAL; + if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) + goto out; - vma = find_vma(current->mm, start); + vma_iter_init(&vmi, current->mm, start); + vma = vma_find(&vmi, end); error = -ENOMEM; if (!vma) goto out; - prev = vma->vm_prev; + if (unlikely(grows & PROT_GROWSDOWN)) { if (vma->vm_start >= end) goto out; @@ -373,47 +863,145 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, goto out; } } + + prev = vma_prev(&vmi); if (start > vma->vm_start) prev = vma; - for (nstart = start ; ; ) { - unsigned long newflags; + tlb_gather_mmu(&tlb, current->mm); + nstart = start; + tmp = vma->vm_start; + for_each_vma_range(vmi, vma, end) { + vm_flags_t mask_off_old_flags; + vm_flags_t newflags; + int new_vma_pkey; - /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ + if (vma->vm_start != tmp) { + error = -ENOMEM; + break; + } + + /* Does the application expect PROT_READ to imply PROT_EXEC */ + if (rier && (vma->vm_flags & VM_MAYEXEC)) + prot |= PROT_EXEC; + + /* + * Each mprotect() call explicitly passes r/w/x permissions. + * If a permission is not passed to mprotect(), it must be + * cleared from the VMA. + */ + mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR; - newflags = vm_flags; - newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); + new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); + newflags = calc_vm_prot_bits(prot, new_vma_pkey); + newflags |= (vma->vm_flags & ~mask_off_old_flags); /* newflags >> 4 shift VM_MAY% in place of VM_% */ - if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { + if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { error = -EACCES; - goto out; + break; + } + + if (map_deny_write_exec(vma->vm_flags, newflags)) { + error = -EACCES; + break; + } + + /* Allow architectures to sanity-check the new flags */ + if (!arch_validate_flags(newflags)) { + error = -EINVAL; + break; } error = security_file_mprotect(vma, reqprot, prot); if (error) - goto out; + break; tmp = vma->vm_end; if (tmp > end) tmp = end; - error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); + + if (vma->vm_ops && vma->vm_ops->mprotect) { + error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); + if (error) + break; + } + + error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); if (error) - goto out; + break; + + tmp = vma_iter_end(&vmi); nstart = tmp; + prot = reqprot; + } + tlb_finish_mmu(&tlb); - if (nstart < prev->vm_end) - nstart = prev->vm_end; - if (nstart >= end) - goto out; + if (!error && tmp < end) + error = -ENOMEM; - vma = prev->vm_next; - if (!vma || vma->vm_start != nstart) { - error = -ENOMEM; - goto out; - } - } out: - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); return error; } + +SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + unsigned long, prot) +{ + return do_mprotect_pkey(start, len, prot, -1); +} + +#ifdef CONFIG_ARCH_HAS_PKEYS + +SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, + unsigned long, prot, int, pkey) +{ + return do_mprotect_pkey(start, len, prot, pkey); +} + +SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) +{ + int pkey; + int ret; + + /* No flags supported yet. */ + if (flags) + return -EINVAL; + /* check for unsupported init values */ + if (init_val & ~PKEY_ACCESS_MASK) + return -EINVAL; + + mmap_write_lock(current->mm); + pkey = mm_pkey_alloc(current->mm); + + ret = -ENOSPC; + if (pkey == -1) + goto out; + + ret = arch_set_user_pkey_access(current, pkey, init_val); + if (ret) { + mm_pkey_free(current->mm, pkey); + goto out; + } + ret = pkey; +out: + mmap_write_unlock(current->mm); + return ret; +} + +SYSCALL_DEFINE1(pkey_free, int, pkey) +{ + int ret; + + mmap_write_lock(current->mm); + ret = mm_pkey_free(current->mm, pkey); + mmap_write_unlock(current->mm); + + /* + * We could provide warnings or errors if any VMA still + * has the pkey set here. + */ + return ret; +} + +#endif /* CONFIG_ARCH_HAS_PKEYS */ |
