diff options
Diffstat (limited to 'mm/mmap.c')
| -rw-r--r-- | mm/mmap.c | 3650 |
1 files changed, 1146 insertions, 2504 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index fbad7b091090..4bdb9ffa9e25 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * mm/mmap.c * @@ -6,10 +7,13 @@ * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> +#include <linux/mm_inline.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> @@ -22,264 +26,116 @@ #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> +#include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> +#include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> -#include <linux/rbtree_augmented.h> -#include <linux/sched/sysctl.h> #include <linux/notifier.h> #include <linux/memory.h> - -#include <asm/uaccess.h> +#include <linux/printk.h> +#include <linux/userfaultfd_k.h> +#include <linux/moduleparam.h> +#include <linux/pkeys.h> +#include <linux/oom.h> +#include <linux/sched/mm.h> +#include <linux/ksm.h> +#include <linux/memfd.h> + +#include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> +#define CREATE_TRACE_POINTS +#include <trace/events/mmap.h> + #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif -#ifndef arch_rebalance_pgtables -#define arch_rebalance_pgtables(addr, len) (addr) +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS +const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; +int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; +int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS +const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; +const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; +int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif -static void unmap_region(struct mm_struct *mm, - struct vm_area_struct *vma, struct vm_area_struct *prev, - unsigned long start, unsigned long end); - -/* description of effects of mapping type and prot in current implementation. - * this is due to the limited x86 page protection hardware. The expected - * behavior is in parens: - * - * map_type prot - * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC - * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes - * w: (no) no w: (no) no w: (yes) yes w: (no) no - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes - * w: (no) no w: (no) no w: (copy) copy w: (no) no - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - */ -pgprot_t protection_map[16] = { - __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, - __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 -}; +static bool ignore_rlimit_data; +core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); -pgprot_t vm_get_page_prot(unsigned long vm_flags) +/* Update vma->vm_page_prot to reflect vma->vm_flags. */ +void vma_set_page_prot(struct vm_area_struct *vma) { - return __pgprot(pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | - pgprot_val(arch_vm_get_page_prot(vm_flags))); -} -EXPORT_SYMBOL(vm_get_page_prot); - -int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ -int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ -int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; -unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ -unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ -/* - * Make sure vm_committed_as in one cacheline and not cacheline shared with - * other variables. It can be updated by several CPUs frequently. - */ -struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; + vm_flags_t vm_flags = vma->vm_flags; + pgprot_t vm_page_prot; -/* - * The global memory commitment made in the system can be a metric - * that can be used to drive ballooning decisions when Linux is hosted - * as a guest. On Hyper-V, the host implements a policy engine for dynamically - * balancing memory across competing virtual machines that are hosted. - * Several metrics drive this policy engine including the guest reported - * memory commitment. - */ -unsigned long vm_memory_committed(void) -{ - return percpu_counter_read_positive(&vm_committed_as); + vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); + if (vma_wants_writenotify(vma, vm_page_prot)) { + vm_flags &= ~VM_SHARED; + vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); + } + /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ + WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } -EXPORT_SYMBOL_GPL(vm_memory_committed); /* - * Check that a process has enough memory to allocate a new virtual - * mapping. 0 means there is enough memory for the allocation to - * succeed and -ENOMEM implies there is not. - * - * We currently support three overcommit policies, which are set via the - * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting - * - * Strict overcommit modes added 2002 Feb 26 by Alan Cox. - * Additional code 2002 Jul 20 by Robert Love. + * check_brk_limits() - Use platform specific check of range & verify mlock + * limits. + * @addr: The address to check + * @len: The size of increase. * - * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. - * - * Note this is a helper function intended to be used by LSMs which - * wish to use this logic. + * Return: 0 on success. */ -int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) +static int check_brk_limits(unsigned long addr, unsigned long len) { - unsigned long free, allowed, reserve; - - vm_acct_memory(pages); - - /* - * Sometimes we want to use more memory than we have - */ - if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) - return 0; - - if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { - free = global_page_state(NR_FREE_PAGES); - free += global_page_state(NR_FILE_PAGES); - - /* - * shmem pages shouldn't be counted as free in this - * case, they can't be purged, only swapped out, and - * that won't affect the overall amount of available - * memory in the system. - */ - free -= global_page_state(NR_SHMEM); - - free += get_nr_swap_pages(); - - /* - * Any slabs which are created with the - * SLAB_RECLAIM_ACCOUNT flag claim to have contents - * which are reclaimable, under pressure. The dentry - * cache and most inode caches should fall into this - */ - free += global_page_state(NR_SLAB_RECLAIMABLE); - - /* - * Leave reserved pages. The pages are not for anonymous pages. - */ - if (free <= totalreserve_pages) - goto error; - else - free -= totalreserve_pages; - - /* - * Reserve some for root - */ - if (!cap_sys_admin) - free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); - - if (free > pages) - return 0; - - goto error; - } - - allowed = (totalram_pages - hugetlb_total_pages()) - * sysctl_overcommit_ratio / 100; - /* - * Reserve some for root - */ - if (!cap_sys_admin) - allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); - allowed += total_swap_pages; - - /* - * Don't let a single process grow so big a user can't recover - */ - if (mm) { - reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); - allowed -= min(mm->total_vm / 32, reserve); - } - - if (percpu_counter_read_positive(&vm_committed_as) < allowed) - return 0; -error: - vm_unacct_memory(pages); + unsigned long mapped_addr; - return -ENOMEM; -} + mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (IS_ERR_VALUE(mapped_addr)) + return mapped_addr; -/* - * Requires inode->i_mapping->i_mmap_mutex - */ -static void __remove_shared_vm_struct(struct vm_area_struct *vma, - struct file *file, struct address_space *mapping) -{ - if (vma->vm_flags & VM_DENYWRITE) - atomic_inc(&file_inode(file)->i_writecount); - if (vma->vm_flags & VM_SHARED) - mapping->i_mmap_writable--; - - flush_dcache_mmap_lock(mapping); - if (unlikely(vma->vm_flags & VM_NONLINEAR)) - list_del_init(&vma->shared.nonlinear); - else - vma_interval_tree_remove(vma, &mapping->i_mmap); - flush_dcache_mmap_unlock(mapping); + return mlock_future_ok(current->mm, current->mm->def_flags, len) + ? 0 : -EAGAIN; } -/* - * Unlink a file-based vm structure from its interval tree, to hide - * vma from rmap and vmtruncate before freeing its page tables. - */ -void unlink_file_vma(struct vm_area_struct *vma) -{ - struct file *file = vma->vm_file; - - if (file) { - struct address_space *mapping = file->f_mapping; - mutex_lock(&mapping->i_mmap_mutex); - __remove_shared_vm_struct(vma, file, mapping); - mutex_unlock(&mapping->i_mmap_mutex); - } -} - -/* - * Close a vm structure and free it, returning the next. - */ -static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) -{ - struct vm_area_struct *next = vma->vm_next; - - might_sleep(); - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - if (vma->vm_file) - fput(vma->vm_file); - mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); - return next; -} - -static unsigned long do_brk(unsigned long addr, unsigned long len); - SYSCALL_DEFINE1(brk, unsigned long, brk) { - unsigned long rlim, retval; - unsigned long newbrk, oldbrk; + unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *brkvma, *next = NULL; unsigned long min_brk; - bool populate; + bool populate = false; + LIST_HEAD(uf); + struct vma_iterator vmi; - down_write(&mm->mmap_sem); + if (mmap_write_lock_killable(mm)) + return -EINTR; + origbrk = mm->brk; + + min_brk = mm->start_brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ - if (current->brk_randomized) - min_brk = mm->start_brk; - else + if (!current->brk_randomized) min_brk = mm->end_data; -#else - min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; @@ -290,935 +146,220 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ - rlim = rlimit(RLIMIT_DATA); - if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + - (mm->end_data - mm->start_data) > rlim) + if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, + mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); - if (oldbrk == newbrk) - goto set_brk; + if (oldbrk == newbrk) { + mm->brk = brk; + goto success; + } /* Always allow shrinking brk. */ if (brk <= mm->brk) { - if (!do_munmap(mm, newbrk, oldbrk-newbrk)) - goto set_brk; - goto out; + /* Search one past newbrk */ + vma_iter_init(&vmi, mm, newbrk); + brkvma = vma_find(&vmi, oldbrk); + if (!brkvma || brkvma->vm_start >= oldbrk) + goto out; /* mapping intersects with an existing non-brk vma. */ + /* + * mm->brk must be protected by write mmap_lock. + * do_vmi_align_munmap() will drop the lock on success, so + * update it before calling do_vma_munmap(). + */ + mm->brk = brk; + if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, + /* unlock = */ true)) + goto out; + + goto success_unlocked; } - /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + if (check_brk_limits(oldbrk, newbrk - oldbrk)) + goto out; + + /* + * Only check if the next VMA is within the stack_guard_gap of the + * expansion area + */ + vma_iter_init(&vmi, mm, oldbrk); + next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; + brkvma = vma_prev_limit(&vmi, mm->start_brk); /* Ok, looks good - let it rip. */ - if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) + if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) goto out; -set_brk: mm->brk = brk; - populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; - up_write(&mm->mmap_sem); + if (mm->def_flags & VM_LOCKED) + populate = true; + +success: + mmap_write_unlock(mm); +success_unlocked: + userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: - retval = mm->brk; - up_write(&mm->mmap_sem); - return retval; -} - -static long vma_compute_subtree_gap(struct vm_area_struct *vma) -{ - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; - if (vma->vm_rb.rb_left) { - subtree_gap = rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb)->rb_subtree_gap; - if (subtree_gap > max) - max = subtree_gap; - } - if (vma->vm_rb.rb_right) { - subtree_gap = rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb)->rb_subtree_gap; - if (subtree_gap > max) - max = subtree_gap; - } - return max; -} - -#ifdef CONFIG_DEBUG_VM_RB -static int browse_rb(struct rb_root *root) -{ - int i = 0, j, bug = 0; - struct rb_node *nd, *pn = NULL; - unsigned long prev = 0, pend = 0; - - for (nd = rb_first(root); nd; nd = rb_next(nd)) { - struct vm_area_struct *vma; - vma = rb_entry(nd, struct vm_area_struct, vm_rb); - if (vma->vm_start < prev) { - printk("vm_start %lx prev %lx\n", vma->vm_start, prev); - bug = 1; - } - if (vma->vm_start < pend) { - printk("vm_start %lx pend %lx\n", vma->vm_start, pend); - bug = 1; - } - if (vma->vm_start > vma->vm_end) { - printk("vm_end %lx < vm_start %lx\n", - vma->vm_end, vma->vm_start); - bug = 1; - } - if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { - printk("free gap %lx, correct %lx\n", - vma->rb_subtree_gap, - vma_compute_subtree_gap(vma)); - bug = 1; - } - i++; - pn = nd; - prev = vma->vm_start; - pend = vma->vm_end; - } - j = 0; - for (nd = pn; nd; nd = rb_prev(nd)) - j++; - if (i != j) { - printk("backwards %d, forwards %d\n", j, i); - bug = 1; - } - return bug ? -1 : i; -} - -static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) -{ - struct rb_node *nd; - - for (nd = rb_first(root); nd; nd = rb_next(nd)) { - struct vm_area_struct *vma; - vma = rb_entry(nd, struct vm_area_struct, vm_rb); - BUG_ON(vma != ignore && - vma->rb_subtree_gap != vma_compute_subtree_gap(vma)); - } -} - -void validate_mm(struct mm_struct *mm) -{ - int bug = 0; - int i = 0; - unsigned long highest_address = 0; - struct vm_area_struct *vma = mm->mmap; - while (vma) { - struct anon_vma_chain *avc; - vma_lock_anon_vma(vma); - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_verify(avc); - vma_unlock_anon_vma(vma); - highest_address = vma->vm_end; - vma = vma->vm_next; - i++; - } - if (i != mm->map_count) { - printk("map_count %d vm_next %d\n", mm->map_count, i); - bug = 1; - } - if (highest_address != mm->highest_vm_end) { - printk("mm->highest_vm_end %lx, found %lx\n", - mm->highest_vm_end, highest_address); - bug = 1; - } - i = browse_rb(&mm->mm_rb); - if (i != mm->map_count) { - printk("map_count %d rb %d\n", mm->map_count, i); - bug = 1; - } - BUG_ON(bug); -} -#else -#define validate_mm_rb(root, ignore) do { } while (0) -#define validate_mm(mm) do { } while (0) -#endif - -RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, - unsigned long, rb_subtree_gap, vma_compute_subtree_gap) - -/* - * Update augmented rbtree rb_subtree_gap values after vma->vm_start or - * vma->vm_prev->vm_end values changed, without modifying the vma's position - * in the rbtree. - */ -static void vma_gap_update(struct vm_area_struct *vma) -{ - /* - * As it turns out, RB_DECLARE_CALLBACKS() already created a callback - * function that does exacltly what we want. - */ - vma_gap_callbacks_propagate(&vma->vm_rb, NULL); -} - -static inline void vma_rb_insert(struct vm_area_struct *vma, - struct rb_root *root) -{ - /* All rb_subtree_gap values must be consistent prior to insertion */ - validate_mm_rb(root, NULL); - - rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); -} - -static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) -{ - /* - * All rb_subtree_gap values must be consistent prior to erase, - * with the possible exception of the vma being erased. - */ - validate_mm_rb(root, vma); - - /* - * Note rb_erase_augmented is a fairly large inline function, - * so make sure we instantiate it only once with our desired - * augmented rbtree callbacks. - */ - rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); + mm->brk = origbrk; + mmap_write_unlock(mm); + return origbrk; } /* - * vma has some anon_vma assigned, and is already inserted on that - * anon_vma's interval trees. - * - * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the - * vma must be removed from the anon_vma's interval trees using - * anon_vma_interval_tree_pre_update_vma(). - * - * After the update, the vma will be reinserted using - * anon_vma_interval_tree_post_update_vma(). - * - * The entire update must be protected by exclusive mmap_sem and by - * the root anon_vma's mutex. + * If a hint addr is less than mmap_min_addr change hint to be as + * low as possible but still greater than mmap_min_addr */ -static inline void -anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) -{ - struct anon_vma_chain *avc; - - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); -} - -static inline void -anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) -{ - struct anon_vma_chain *avc; - - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); -} - -static int find_vma_links(struct mm_struct *mm, unsigned long addr, - unsigned long end, struct vm_area_struct **pprev, - struct rb_node ***rb_link, struct rb_node **rb_parent) -{ - struct rb_node **__rb_link, *__rb_parent, *rb_prev; - - __rb_link = &mm->mm_rb.rb_node; - rb_prev = __rb_parent = NULL; - - while (*__rb_link) { - struct vm_area_struct *vma_tmp; - - __rb_parent = *__rb_link; - vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); - - if (vma_tmp->vm_end > addr) { - /* Fail if an existing vma overlaps the area */ - if (vma_tmp->vm_start < end) - return -ENOMEM; - __rb_link = &__rb_parent->rb_left; - } else { - rb_prev = __rb_parent; - __rb_link = &__rb_parent->rb_right; - } - } - - *pprev = NULL; - if (rb_prev) - *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); - *rb_link = __rb_link; - *rb_parent = __rb_parent; - return 0; -} - -static unsigned long count_vma_pages_range(struct mm_struct *mm, - unsigned long addr, unsigned long end) -{ - unsigned long nr_pages = 0; - struct vm_area_struct *vma; - - /* Find first overlaping mapping */ - vma = find_vma_intersection(mm, addr, end); - if (!vma) - return 0; - - nr_pages = (min(end, vma->vm_end) - - max(addr, vma->vm_start)) >> PAGE_SHIFT; - - /* Iterate over the rest of the overlaps */ - for (vma = vma->vm_next; vma; vma = vma->vm_next) { - unsigned long overlap_len; - - if (vma->vm_start > end) - break; - - overlap_len = min(end, vma->vm_end) - vma->vm_start; - nr_pages += overlap_len >> PAGE_SHIFT; - } - - return nr_pages; -} - -void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, - struct rb_node **rb_link, struct rb_node *rb_parent) -{ - /* Update tracking information for the gap following the new vma. */ - if (vma->vm_next) - vma_gap_update(vma->vm_next); - else - mm->highest_vm_end = vma->vm_end; - - /* - * vma->vm_prev wasn't known when we followed the rbtree to find the - * correct insertion point for that vma. As a result, we could not - * update the vma vm_rb parents rb_subtree_gap values on the way down. - * So, we first insert the vma with a zero rb_subtree_gap value - * (to be consistent with what we did on the way down), and then - * immediately update the gap to the correct value. Finally we - * rebalance the rbtree after all augmented values have been set. - */ - rb_link_node(&vma->vm_rb, rb_parent, rb_link); - vma->rb_subtree_gap = 0; - vma_gap_update(vma); - vma_rb_insert(vma, &mm->mm_rb); -} - -static void __vma_link_file(struct vm_area_struct *vma) -{ - struct file *file; - - file = vma->vm_file; - if (file) { - struct address_space *mapping = file->f_mapping; - - if (vma->vm_flags & VM_DENYWRITE) - atomic_dec(&file_inode(file)->i_writecount); - if (vma->vm_flags & VM_SHARED) - mapping->i_mmap_writable++; - - flush_dcache_mmap_lock(mapping); - if (unlikely(vma->vm_flags & VM_NONLINEAR)) - vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); - else - vma_interval_tree_insert(vma, &mapping->i_mmap); - flush_dcache_mmap_unlock(mapping); - } -} - -static void -__vma_link(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, struct rb_node **rb_link, - struct rb_node *rb_parent) +static inline unsigned long round_hint_to_min(unsigned long hint) { - __vma_link_list(mm, vma, prev, rb_parent); - __vma_link_rb(mm, vma, rb_link, rb_parent); + hint &= PAGE_MASK; + if (((void *)hint != NULL) && + (hint < mmap_min_addr)) + return PAGE_ALIGN(mmap_min_addr); + return hint; } -static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, struct rb_node **rb_link, - struct rb_node *rb_parent) +bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, + unsigned long bytes) { - struct address_space *mapping = NULL; + unsigned long locked_pages, limit_pages; - if (vma->vm_file) - mapping = vma->vm_file->f_mapping; + if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) + return true; - if (mapping) - mutex_lock(&mapping->i_mmap_mutex); - - __vma_link(mm, vma, prev, rb_link, rb_parent); - __vma_link_file(vma); - - if (mapping) - mutex_unlock(&mapping->i_mmap_mutex); - - mm->map_count++; - validate_mm(mm); -} + locked_pages = bytes >> PAGE_SHIFT; + locked_pages += mm->locked_vm; -/* - * Helper for vma_adjust() in the split_vma insert case: insert a vma into the - * mm's list and rbtree. It has already been inserted into the interval tree. - */ -static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) -{ - struct vm_area_struct *prev; - struct rb_node **rb_link, *rb_parent; - - if (find_vma_links(mm, vma->vm_start, vma->vm_end, - &prev, &rb_link, &rb_parent)) - BUG(); - __vma_link(mm, vma, prev, rb_link, rb_parent); - mm->map_count++; -} + limit_pages = rlimit(RLIMIT_MEMLOCK); + limit_pages >>= PAGE_SHIFT; -static inline void -__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev) -{ - struct vm_area_struct *next; - - vma_rb_erase(vma, &mm->mm_rb); - prev->vm_next = next = vma->vm_next; - if (next) - next->vm_prev = prev; - if (mm->mmap_cache == vma) - mm->mmap_cache = prev; + return locked_pages <= limit_pages; } -/* - * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that - * is already present in an i_mmap tree without adjusting the tree. - * The following helper function should be used when such adjustments - * are necessary. The "insert" vma (if any) is to be inserted - * before we drop the necessary locks. - */ -int vma_adjust(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) +static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *next = vma->vm_next; - struct vm_area_struct *importer = NULL; - struct address_space *mapping = NULL; - struct rb_root *root = NULL; - struct anon_vma *anon_vma = NULL; - struct file *file = vma->vm_file; - bool start_changed = false, end_changed = false; - long adjust_next = 0; - int remove_next = 0; - - if (next && !insert) { - struct vm_area_struct *exporter = NULL; - - if (end >= next->vm_end) { - /* - * vma expands, overlapping all the next, and - * perhaps the one after too (mprotect case 6). - */ -again: remove_next = 1 + (end > next->vm_end); - end = next->vm_end; - exporter = next; - importer = vma; - } else if (end > next->vm_start) { - /* - * vma expands, overlapping part of the next: - * mprotect case 5 shifting the boundary up. - */ - adjust_next = (end - next->vm_start) >> PAGE_SHIFT; - exporter = next; - importer = vma; - } else if (end < vma->vm_end) { - /* - * vma shrinks, and !insert tells it's not - * split_vma inserting another: so it must be - * mprotect case 4 shifting the boundary down. - */ - adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); - exporter = vma; - importer = next; - } - - /* - * Easily overlooked: when mprotect shifts the boundary, - * make sure the expanding vma has anon_vma set if the - * shrinking vma had, to cover any anon pages imported. - */ - if (exporter && exporter->anon_vma && !importer->anon_vma) { - if (anon_vma_clone(importer, exporter)) - return -ENOMEM; - importer->anon_vma = exporter->anon_vma; - } - } - - if (file) { - mapping = file->f_mapping; - if (!(vma->vm_flags & VM_NONLINEAR)) { - root = &mapping->i_mmap; - uprobe_munmap(vma, vma->vm_start, vma->vm_end); - - if (adjust_next) - uprobe_munmap(next, next->vm_start, - next->vm_end); - } - - mutex_lock(&mapping->i_mmap_mutex); - if (insert) { - /* - * Put into interval tree now, so instantiated pages - * are visible to arm/parisc __flush_dcache_page - * throughout; but we cannot insert into address - * space until vma start or end is updated. - */ - __vma_link_file(insert); - } - } - - vma_adjust_trans_huge(vma, start, end, adjust_next); - - anon_vma = vma->anon_vma; - if (!anon_vma && adjust_next) - anon_vma = next->anon_vma; - if (anon_vma) { - VM_BUG_ON(adjust_next && next->anon_vma && - anon_vma != next->anon_vma); - anon_vma_lock_write(anon_vma); - anon_vma_interval_tree_pre_update_vma(vma); - if (adjust_next) - anon_vma_interval_tree_pre_update_vma(next); - } - - if (root) { - flush_dcache_mmap_lock(mapping); - vma_interval_tree_remove(vma, root); - if (adjust_next) - vma_interval_tree_remove(next, root); - } - - if (start != vma->vm_start) { - vma->vm_start = start; - start_changed = true; - } - if (end != vma->vm_end) { - vma->vm_end = end; - end_changed = true; - } - vma->vm_pgoff = pgoff; - if (adjust_next) { - next->vm_start += adjust_next << PAGE_SHIFT; - next->vm_pgoff += adjust_next; - } - - if (root) { - if (adjust_next) - vma_interval_tree_insert(next, root); - vma_interval_tree_insert(vma, root); - flush_dcache_mmap_unlock(mapping); - } - - if (remove_next) { - /* - * vma_merge has merged next into vma, and needs - * us to remove next before dropping the locks. - */ - __vma_unlink(mm, next, vma); - if (file) - __remove_shared_vm_struct(next, file, mapping); - } else if (insert) { - /* - * split_vma has split insert from vma, and needs - * us to insert it before dropping the locks - * (it may either follow vma or precede it). - */ - __insert_vm_struct(mm, insert); - } else { - if (start_changed) - vma_gap_update(vma); - if (end_changed) { - if (!next) - mm->highest_vm_end = end; - else if (!adjust_next) - vma_gap_update(next); - } - } - - if (anon_vma) { - anon_vma_interval_tree_post_update_vma(vma); - if (adjust_next) - anon_vma_interval_tree_post_update_vma(next); - anon_vma_unlock_write(anon_vma); - } - if (mapping) - mutex_unlock(&mapping->i_mmap_mutex); - - if (root) { - uprobe_mmap(vma); - - if (adjust_next) - uprobe_mmap(next); - } - - if (remove_next) { - if (file) { - uprobe_munmap(next, next->vm_start, next->vm_end); - fput(file); - } - if (next->anon_vma) - anon_vma_merge(vma, next); - mm->map_count--; - vma_set_policy(vma, vma_policy(next)); - kmem_cache_free(vm_area_cachep, next); - /* - * In mprotect's case 6 (see comments on vma_merge), - * we must remove another next too. It would clutter - * up the code too much to do both in one go. - */ - next = vma->vm_next; - if (remove_next == 2) - goto again; - else if (next) - vma_gap_update(next); - else - mm->highest_vm_end = end; - } - if (insert && file) - uprobe_mmap(insert); + if (S_ISREG(inode->i_mode)) + return MAX_LFS_FILESIZE; - validate_mm(mm); + if (S_ISBLK(inode->i_mode)) + return MAX_LFS_FILESIZE; - return 0; -} + if (S_ISSOCK(inode->i_mode)) + return MAX_LFS_FILESIZE; -/* - * If the vma has a ->close operation then the driver probably needs to release - * per-vma resources, so we don't attempt to merge those. - */ -static inline int is_mergeable_vma(struct vm_area_struct *vma, - struct file *file, unsigned long vm_flags) -{ - if (vma->vm_flags ^ vm_flags) - return 0; - if (vma->vm_file != file) + /* Special "we do even unsigned file positions" case */ + if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) return 0; - if (vma->vm_ops && vma->vm_ops->close) - return 0; - return 1; -} -static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, - struct anon_vma *anon_vma2, - struct vm_area_struct *vma) -{ - /* - * The list_is_singular() test is to avoid merging VMA cloned from - * parents. This can improve scalability caused by anon_vma lock. - */ - if ((!anon_vma1 || !anon_vma2) && (!vma || - list_is_singular(&vma->anon_vma_chain))) - return 1; - return anon_vma1 == anon_vma2; + /* Yes, random drivers might want more. But I'm tired of buggy drivers */ + return ULONG_MAX; } -/* - * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) - * in front of (at a lower virtual address and file offset than) the vma. - * - * We cannot merge two vmas if they have differently assigned (non-NULL) - * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. - * - * We don't check here for the merged mmap wrapping around the end of pagecache - * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which - * wrap, nor mmaps which cover the final page at index -1UL. - */ -static int -can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) +static inline bool file_mmap_ok(struct file *file, struct inode *inode, + unsigned long pgoff, unsigned long len) { - if (is_mergeable_vma(vma, file, vm_flags) && - is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { - if (vma->vm_pgoff == vm_pgoff) - return 1; - } - return 0; -} + u64 maxsize = file_mmap_size_max(file, inode); -/* - * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) - * beyond (at a higher virtual address and file offset than) the vma. - * - * We cannot merge two vmas if they have differently assigned (non-NULL) - * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. - */ -static int -can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) -{ - if (is_mergeable_vma(vma, file, vm_flags) && - is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { - pgoff_t vm_pglen; - vm_pglen = vma_pages(vma); - if (vma->vm_pgoff + vm_pglen == vm_pgoff) - return 1; - } - return 0; + if (maxsize && len > maxsize) + return false; + maxsize -= len; + if (pgoff > maxsize >> PAGE_SHIFT) + return false; + return true; } -/* - * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out - * whether that can be merged with its predecessor or its successor. - * Or both (it neatly fills a hole). +/** + * do_mmap() - Perform a userland memory mapping into the current process + * address space of length @len with protection bits @prot, mmap flags @flags + * (from which VMA flags will be inferred), and any additional VMA flags to + * apply @vm_flags. If this is a file-backed mapping then the file is specified + * in @file and page offset into the file via @pgoff. * - * In most cases - when called for mmap, brk or mremap - [addr,end) is - * certain not to be mapped by the time vma_merge is called; but when - * called for mprotect, it is certain to be already mapped (either at - * an offset within prev, or at the start of next), and the flags of - * this area are about to be changed to vm_flags - and the no-change - * case has already been eliminated. + * This function does not perform security checks on the file and assumes, if + * @uf is non-NULL, the caller has provided a list head to track unmap events + * for userfaultfd @uf. * - * The following mprotect cases have to be considered, where AAAA is - * the area passed down from mprotect_fixup, never extending beyond one - * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: + * It also simply indicates whether memory population is required by setting + * @populate, which must be non-NULL, expecting the caller to actually perform + * this task itself if appropriate. * - * AAAA AAAA AAAA AAAA - * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX - * cannot merge might become might become might become - * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or - * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or - * mremap move: PPPPNNNNNNNN 8 - * AAAA - * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN - * might become case 1 below case 2 below case 3 below + * This function will invoke architecture-specific (and if provided and + * relevant, file system-specific) logic to determine the most appropriate + * unmapped area in which to place the mapping if not MAP_FIXED. * - * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX: - * mprotect_fixup updates vm_flags & vm_page_prot on successful return. - */ -struct vm_area_struct *vma_merge(struct mm_struct *mm, - struct vm_area_struct *prev, unsigned long addr, - unsigned long end, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, - pgoff_t pgoff, struct mempolicy *policy) -{ - pgoff_t pglen = (end - addr) >> PAGE_SHIFT; - struct vm_area_struct *area, *next; - int err; - - /* - * We later require that vma->vm_flags == vm_flags, - * so this tests vma->vm_flags & VM_SPECIAL, too. - */ - if (vm_flags & VM_SPECIAL) - return NULL; - - if (prev) - next = prev->vm_next; - else - next = mm->mmap; - area = next; - if (next && next->vm_end == end) /* cases 6, 7, 8 */ - next = next->vm_next; - - /* - * Can it merge with the predecessor? - */ - if (prev && prev->vm_end == addr && - mpol_equal(vma_policy(prev), policy) && - can_vma_merge_after(prev, vm_flags, - anon_vma, file, pgoff)) { - /* - * OK, it can. Can we now merge in the successor as well? - */ - if (next && end == next->vm_start && - mpol_equal(policy, vma_policy(next)) && - can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen) && - is_mergeable_anon_vma(prev->anon_vma, - next->anon_vma, NULL)) { - /* cases 1, 6 */ - err = vma_adjust(prev, prev->vm_start, - next->vm_end, prev->vm_pgoff, NULL); - } else /* cases 2, 5, 7 */ - err = vma_adjust(prev, prev->vm_start, - end, prev->vm_pgoff, NULL); - if (err) - return NULL; - khugepaged_enter_vma_merge(prev); - return prev; - } - - /* - * Can this new request be merged in front of next? - */ - if (next && end == next->vm_start && - mpol_equal(policy, vma_policy(next)) && - can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen)) { - if (prev && addr < prev->vm_end) /* case 4 */ - err = vma_adjust(prev, prev->vm_start, - addr, prev->vm_pgoff, NULL); - else /* cases 3, 8 */ - err = vma_adjust(area, addr, next->vm_end, - next->vm_pgoff - pglen, NULL); - if (err) - return NULL; - khugepaged_enter_vma_merge(area); - return area; - } - - return NULL; -} - -/* - * Rough compatbility check to quickly see if it's even worth looking - * at sharing an anon_vma. + * Callers which require userland mmap() behaviour should invoke vm_mmap(), + * which is also exported for module use. * - * They need to have the same vm_file, and the flags can only differ - * in things that mprotect may change. + * Those which require this behaviour less security checks, userfaultfd and + * populate behaviour, and who handle the mmap write lock themselves, should + * call this function. * - * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that - * we can merge the two vma's. For example, we refuse to merge a vma if - * there is a vm_ops->close() function, because that indicates that the - * driver is doing some kind of reference counting. But that doesn't - * really matter for the anon_vma sharing case. - */ -static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) -{ - return a->vm_end == b->vm_start && - mpol_equal(vma_policy(a), vma_policy(b)) && - a->vm_file == b->vm_file && - !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && - b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); -} - -/* - * Do some basic sanity checking to see if we can re-use the anon_vma - * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be - * the same as 'old', the other will be the new one that is trying - * to share the anon_vma. + * Note that the returned address may reside within a merged VMA if an + * appropriate merge were to take place, so it doesn't necessarily specify the + * start of a VMA, rather only the start of a valid mapped range of length + * @len bytes, rounded down to the nearest page size. * - * NOTE! This runs with mm_sem held for reading, so it is possible that - * the anon_vma of 'old' is concurrently in the process of being set up - * by another page fault trying to merge _that_. But that's ok: if it - * is being set up, that automatically means that it will be a singleton - * acceptable for merging, so we can do all of this optimistically. But - * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. + * The caller must write-lock current->mm->mmap_lock. * - * IOW: that the "list_is_singular()" test on the anon_vma_chain only - * matters for the 'stable anon_vma' case (ie the thing we want to avoid - * is to return an anon_vma that is "complex" due to having gone through - * a fork). + * @file: An optional struct file pointer describing the file which is to be + * mapped, if a file-backed mapping. + * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the + * address at which to perform this mapping. See mmap (2) for details. Must be + * page-aligned. + * @len: The length of the mapping. Will be page-aligned and must be at least 1 + * page in size. + * @prot: Protection bits describing access required to the mapping. See mmap + * (2) for details. + * @flags: Flags specifying how the mapping should be performed, see mmap (2) + * for details. + * @vm_flags: VMA flags which should be set by default, or 0 otherwise. + * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise. + * @populate: A pointer to a value which will be set to 0 if no population of + * the range is required, or the number of bytes to populate if it is. Must be + * non-NULL. See mmap (2) for details as to under what circumstances population + * of the range occurs. + * @uf: An optional pointer to a list head to track userfaultfd unmap events + * should unmapping events arise. If provided, it is up to the caller to manage + * this. * - * We also make sure that the two vma's are compatible (adjacent, - * and with the same memory policies). That's all stable, even with just - * a read lock on the mm_sem. + * Returns: Either an error, or the address at which the requested mapping has + * been performed. */ -static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) -{ - if (anon_vma_compatible(a, b)) { - struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); - - if (anon_vma && list_is_singular(&old->anon_vma_chain)) - return anon_vma; - } - return NULL; -} - -/* - * find_mergeable_anon_vma is used by anon_vma_prepare, to check - * neighbouring vmas for a suitable anon_vma, before it goes off - * to allocate a new anon_vma. It checks because a repetitive - * sequence of mprotects and faults may otherwise lead to distinct - * anon_vmas being allocated, preventing vma merge in subsequent - * mprotect. - */ -struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) -{ - struct anon_vma *anon_vma; - struct vm_area_struct *near; - - near = vma->vm_next; - if (!near) - goto try_prev; - - anon_vma = reusable_anon_vma(near, vma, near); - if (anon_vma) - return anon_vma; -try_prev: - near = vma->vm_prev; - if (!near) - goto none; - - anon_vma = reusable_anon_vma(near, near, vma); - if (anon_vma) - return anon_vma; -none: - /* - * There's no absolute need to look only at touching neighbours: - * we could search further afield for "compatible" anon_vmas. - * But it would probably just be a waste of time searching, - * or lead to too many vmas hanging off the same anon_vma. - * We're trying to allow mprotect remerging later on, - * not trying to minimize memory used for anon_vmas. - */ - return NULL; -} - -#ifdef CONFIG_PROC_FS -void vm_stat_account(struct mm_struct *mm, unsigned long flags, - struct file *file, long pages) -{ - const unsigned long stack_flags - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); - - mm->total_vm += pages; - - if (file) { - mm->shared_vm += pages; - if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) - mm->exec_vm += pages; - } else if (flags & stack_flags) - mm->stack_vm += pages; -} -#endif /* CONFIG_PROC_FS */ - -/* - * If a hint addr is less than mmap_min_addr change hint to be as - * low as possible but still greater than mmap_min_addr - */ -static inline unsigned long round_hint_to_min(unsigned long hint) -{ - hint &= PAGE_MASK; - if (((void *)hint != NULL) && - (hint < mmap_min_addr)) - return PAGE_ALIGN(mmap_min_addr); - return hint; -} - -/* - * The caller must hold down_write(¤t->mm->mmap_sem). - */ - -unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, +unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long pgoff, - unsigned long *populate) + unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { - struct mm_struct * mm = current->mm; - struct inode *inode; - vm_flags_t vm_flags; + struct mm_struct *mm = current->mm; + int pkey = 0; *populate = 0; + mmap_assert_write_locked(mm); + + if (!len) + return -EINVAL; + /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec - * mounted, in which case we dont add PROT_EXEC.) + * mounted, in which case we don't add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) - if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) + if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; - if (!len) - return -EINVAL; + /* force arch specific MAP_FIXED handling in get_unmapped_area */ + if (flags & MAP_FIXED_NOREPLACE) + flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); @@ -1230,48 +371,86 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) - return -EOVERFLOW; + return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - /* Obtain the address to map to. we verify (or select) it and ensure - * that it represents a valid section of the address space. + /* + * addr is returned from get_unmapped_area, + * There are two cases: + * 1> MAP_FIXED == false + * unallocated memory, no need to check sealing. + * 1> MAP_FIXED == true + * sealing is checked inside mmap_region when + * do_vmi_munmap is called. */ - addr = get_unmapped_area(file, addr, len, pgoff, flags); - if (addr & ~PAGE_MASK) - return addr; + + if (prot == PROT_EXEC) { + pkey = execute_only_pkey(mm); + if (pkey < 0) + pkey = 0; + } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ - vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ + addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); + if (IS_ERR_VALUE(addr)) + return addr; + + if (flags & MAP_FIXED_NOREPLACE) { + if (find_vma_intersection(mm, addr, addr + len)) + return -EEXIST; + } + if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; - /* mlock MCL_FUTURE? */ - if (vm_flags & VM_LOCKED) { - unsigned long locked, lock_limit; - locked = len >> PAGE_SHIFT; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } - - inode = file ? file_inode(file) : NULL; + if (!mlock_future_ok(mm, vm_flags, len)) + return -EAGAIN; if (file) { + struct inode *inode = file_inode(file); + unsigned long flags_mask; + int err; + + if (!file_mmap_ok(file, inode, pgoff, len)) + return -EOVERFLOW; + + flags_mask = LEGACY_MAP_MASK; + if (file->f_op->fop_flags & FOP_MMAP_SYNC) + flags_mask |= MAP_SYNC; + switch (flags & MAP_TYPE) { case MAP_SHARED: - if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) - return -EACCES; + /* + * Force use of MAP_SHARED_VALIDATE with non-legacy + * flags. E.g. MAP_SYNC is dangerous to use with + * MAP_SHARED as you don't know which consistency model + * you will get. We silently ignore unsupported flags + * with MAP_SHARED to preserve backward compatibility. + */ + flags &= LEGACY_MAP_MASK; + fallthrough; + case MAP_SHARED_VALIDATE: + if (flags & ~flags_mask) + return -EOPNOTSUPP; + if (prot & PROT_WRITE) { + if (!(file->f_mode & FMODE_WRITE)) + return -EACCES; + if (IS_SWAPFILE(file->f_mapping->host)) + return -ETXTBSY; + } /* * Make sure we don't allow writing to an append-only @@ -1280,42 +459,77 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; - /* - * Make sure there are no mandatory locks on the file. - */ - if (locks_verify_locked(inode)) - return -EAGAIN; - vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); - - /* fall through */ + fallthrough; case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; - if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { + if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } - if (!file->f_op || !file->f_op->mmap) + if (!can_mmap_file(file)) return -ENODEV; + if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) + return -EINVAL; break; default: return -EINVAL; } + + /* + * Check to see if we are violating any seals and update VMA + * flags if necessary to avoid future seal violations. + */ + err = memfd_check_seals_mmap(file, &vm_flags); + if (err) + return (unsigned long)err; } else { switch (flags & MAP_TYPE) { case MAP_SHARED: + if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) + return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; + case MAP_DROPPABLE: + if (VM_DROPPABLE == VM_NONE) + return -ENOTSUPP; + /* + * A locked or stack area makes no sense to be droppable. + * + * Also, since droppable pages can just go away at any time + * it makes no sense to copy them on fork or dump them. + * + * And don't attempt to combine with hugetlb for now. + */ + if (flags & (MAP_LOCKED | MAP_HUGETLB)) + return -EINVAL; + if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) + return -EINVAL; + + vm_flags |= VM_DROPPABLE; + + /* + * If the pages can be dropped, then it doesn't make + * sense to reserve them. + */ + vm_flags |= VM_NORESERVE; + + /* + * Likewise, they're volatile enough that they + * shouldn't survive forks or coredumps. + */ + vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; + fallthrough; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. @@ -1341,7 +555,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, vm_flags |= VM_NORESERVE; } - addr = mmap_region(file, addr, len, vm_flags, pgoff); + addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) @@ -1349,28 +563,28 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, return addr; } -SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, - unsigned long, fd, unsigned long, pgoff) +unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) { struct file *file = NULL; - unsigned long retval = -EBADF; + unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) - goto out; - if (is_file_hugepages(file)) + return -EBADF; + if (is_file_hugepages(file)) { len = ALIGN(len, huge_page_size(hstate_file(file))); - retval = -EINVAL; - if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) + } else if (unlikely(flags & MAP_HUGETLB)) { + retval = -EINVAL; goto out_fput; + } } else if (flags & MAP_HUGETLB) { - struct user_struct *user = NULL; struct hstate *hs; - hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK); + hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; @@ -1378,27 +592,29 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called - * A dummy user value is used because we are not locking - * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, - &user, HUGETLB_ANONHUGE_INODE, + HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); -out: return retval; } +SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, pgoff) +{ + return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); +} + #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; @@ -1415,424 +631,46 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - if (a.offset & ~PAGE_MASK) + if (offset_in_page(a.offset)) return -EINVAL; - return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, - a.offset >> PAGE_SHIFT); + return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* - * Some shared mappigns will want the pages marked read-only - * to track write events. If so, we'll downgrade vm_page_prot - * to the private version (using protection_map[] without the - * VM_SHARED bit). + * Determine if the allocation needs to ensure that there is no + * existing mapping within it's guard gaps, for use as start_gap. */ -int vma_wants_writenotify(struct vm_area_struct *vma) +static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) { - vm_flags_t vm_flags = vma->vm_flags; - - /* If it was private or non-writable, the write bit is already clear */ - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) - return 0; + if (vm_flags & VM_SHADOW_STACK) + return PAGE_SIZE; - /* The backer wishes to know when pages are first written to? */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) - return 1; - - /* The open routine did something to the protections already? */ - if (pgprot_val(vma->vm_page_prot) != - pgprot_val(vm_get_page_prot(vm_flags))) - return 0; - - /* Specialty mapping? */ - if (vm_flags & VM_PFNMAP) - return 0; - - /* Can the mapping track the dirty pages? */ - return vma->vm_file && vma->vm_file->f_mapping && - mapping_cap_account_dirty(vma->vm_file->f_mapping); + return 0; } /* - * We account for memory if it's a private writeable mapping, - * not hugepages and VM_NORESERVE wasn't set. + * Search for an unmapped address range. + * + * We are looking for a range that: + * - does not intersect with any VMA; + * - is contained within the [low_limit, high_limit) interval; + * - is at least the desired size. + * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) */ -static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) -{ - /* - * hugetlb has its own accounting separate from the core VM - * VM_HUGETLB may not be set yet so we cannot check for that flag. - */ - if (file && is_file_hugepages(file)) - return 0; - - return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; -} - -unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) +unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev; - int correct_wcount = 0; - int error; - struct rb_node **rb_link, *rb_parent; - unsigned long charged = 0; - struct inode *inode = file ? file_inode(file) : NULL; - - /* Check against address space limit. */ - if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { - unsigned long nr_pages; - - /* - * MAP_FIXED may remove pages of mappings that intersects with - * requested mapping. Account for the pages it would unmap. - */ - if (!(vm_flags & MAP_FIXED)) - return -ENOMEM; - - nr_pages = count_vma_pages_range(mm, addr, addr + len); - - if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) - return -ENOMEM; - } - - /* Clear old maps */ - error = -ENOMEM; -munmap_back: - if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { - if (do_munmap(mm, addr, len)) - return -ENOMEM; - goto munmap_back; - } - - /* - * Private writable mapping: check memory availability - */ - if (accountable_mapping(file, vm_flags)) { - charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory_mm(mm, charged)) - return -ENOMEM; - vm_flags |= VM_ACCOUNT; - } - - /* - * Can we just expand an old mapping? - */ - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); - if (vma) - goto out; - - /* - * Determine the object being mapped and call the appropriate - * specific mapper. the address has already been validated, but - * not unmapped, but the maps are removed from the list. - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (!vma) { - error = -ENOMEM; - goto unacct_error; - } - - vma->vm_mm = mm; - vma->vm_start = addr; - vma->vm_end = addr + len; - vma->vm_flags = vm_flags; - vma->vm_page_prot = vm_get_page_prot(vm_flags); - vma->vm_pgoff = pgoff; - INIT_LIST_HEAD(&vma->anon_vma_chain); - - error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */ - - if (file) { - if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) - goto free_vma; - if (vm_flags & VM_DENYWRITE) { - error = deny_write_access(file); - if (error) - goto free_vma; - correct_wcount = 1; - } - vma->vm_file = get_file(file); - error = file->f_op->mmap(file, vma); - if (error) - goto unmap_and_free_vma; - - /* Can addr have changed?? - * - * Answer: Yes, several device drivers can do it in their - * f_op->mmap method. -DaveM - * Bug: If addr is changed, prev, rb_link, rb_parent should - * be updated for vma_link() - */ - WARN_ON_ONCE(addr != vma->vm_start); - - addr = vma->vm_start; - pgoff = vma->vm_pgoff; - vm_flags = vma->vm_flags; - } else if (vm_flags & VM_SHARED) { - if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) - goto free_vma; - error = shmem_zero_setup(vma); - if (error) - goto free_vma; - } - - if (vma_wants_writenotify(vma)) { - pgprot_t pprot = vma->vm_page_prot; - - /* Can vma->vm_page_prot have changed?? - * - * Answer: Yes, drivers may have changed it in their - * f_op->mmap method. - * - * Ensures that vmas marked as uncached stay that way. - */ - vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); - if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot))) - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - } - - vma_link(mm, vma, prev, rb_link, rb_parent); - file = vma->vm_file; - - /* Once vma denies write, undo our temporary denial count */ - if (correct_wcount) - atomic_inc(&inode->i_writecount); -out: - perf_event_mmap(vma); - - vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); - if (vm_flags & VM_LOCKED) { - if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || - vma == get_gate_vma(current->mm))) - mm->locked_vm += (len >> PAGE_SHIFT); - else - vma->vm_flags &= ~VM_LOCKED; - } + unsigned long addr; - if (file) - uprobe_mmap(vma); + if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) + addr = unmapped_area_topdown(info); + else + addr = unmapped_area(info); + trace_vm_unmapped_area(addr, info); return addr; - -unmap_and_free_vma: - if (correct_wcount) - atomic_inc(&inode->i_writecount); - vma->vm_file = NULL; - fput(file); - - /* Undo any partial mapping done by a device driver. */ - unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); - charged = 0; -free_vma: - kmem_cache_free(vm_area_cachep, vma); -unacct_error: - if (charged) - vm_unacct_memory(charged); - return error; -} - -unsigned long unmapped_area(struct vm_unmapped_area_info *info) -{ - /* - * We implement the search by looking for an rbtree node that - * immediately follows a suitable gap. That is, - * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; - * - gap_end = vma->vm_start >= info->low_limit + length; - * - gap_end - gap_start >= length - */ - - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long length, low_limit, high_limit, gap_start, gap_end; - - /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask; - if (length < info->length) - return -ENOMEM; - - /* Adjust search limits by the desired length */ - if (info->high_limit < length) - return -ENOMEM; - high_limit = info->high_limit - length; - - if (info->low_limit > high_limit) - return -ENOMEM; - low_limit = info->low_limit + length; - - /* Check if rbtree root looks promising */ - if (RB_EMPTY_ROOT(&mm->mm_rb)) - goto check_highest; - vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); - if (vma->rb_subtree_gap < length) - goto check_highest; - - while (true) { - /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; - if (gap_end >= low_limit && vma->vm_rb.rb_left) { - struct vm_area_struct *left = - rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb); - if (left->rb_subtree_gap >= length) { - vma = left; - continue; - } - } - - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; -check_current: - /* Check if current node has a suitable gap */ - if (gap_start > high_limit) - return -ENOMEM; - if (gap_end >= low_limit && gap_end - gap_start >= length) - goto found; - - /* Visit right subtree if it looks promising */ - if (vma->vm_rb.rb_right) { - struct vm_area_struct *right = - rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb); - if (right->rb_subtree_gap >= length) { - vma = right; - continue; - } - } - - /* Go back up the rbtree to find next candidate node */ - while (true) { - struct rb_node *prev = &vma->vm_rb; - if (!rb_parent(prev)) - goto check_highest; - vma = rb_entry(rb_parent(prev), - struct vm_area_struct, vm_rb); - if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; - goto check_current; - } - } - } - -check_highest: - /* Check highest gap, which does not precede any rbtree node */ - gap_start = mm->highest_vm_end; - gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ - if (gap_start > high_limit) - return -ENOMEM; - -found: - /* We found a suitable gap. Clip it with the original low_limit. */ - if (gap_start < info->low_limit) - gap_start = info->low_limit; - - /* Adjust gap address to the desired alignment */ - gap_start += (info->align_offset - gap_start) & info->align_mask; - - VM_BUG_ON(gap_start + info->length > info->high_limit); - VM_BUG_ON(gap_start + info->length > gap_end); - return gap_start; -} - -unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) -{ - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long length, low_limit, high_limit, gap_start, gap_end; - - /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask; - if (length < info->length) - return -ENOMEM; - - /* - * Adjust search limits by the desired length. - * See implementation comment at top of unmapped_area(). - */ - gap_end = info->high_limit; - if (gap_end < length) - return -ENOMEM; - high_limit = gap_end - length; - - if (info->low_limit > high_limit) - return -ENOMEM; - low_limit = info->low_limit + length; - - /* Check highest gap, which does not precede any rbtree node */ - gap_start = mm->highest_vm_end; - if (gap_start <= high_limit) - goto found_highest; - - /* Check if rbtree root looks promising */ - if (RB_EMPTY_ROOT(&mm->mm_rb)) - return -ENOMEM; - vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); - if (vma->rb_subtree_gap < length) - return -ENOMEM; - - while (true) { - /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; - if (gap_start <= high_limit && vma->vm_rb.rb_right) { - struct vm_area_struct *right = - rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb); - if (right->rb_subtree_gap >= length) { - vma = right; - continue; - } - } - -check_current: - /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; - if (gap_end < low_limit) - return -ENOMEM; - if (gap_start <= high_limit && gap_end - gap_start >= length) - goto found; - - /* Visit left subtree if it looks promising */ - if (vma->vm_rb.rb_left) { - struct vm_area_struct *left = - rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb); - if (left->rb_subtree_gap >= length) { - vma = left; - continue; - } - } - - /* Go back up the rbtree to find next candidate node */ - while (true) { - struct rb_node *prev = &vma->vm_rb; - if (!rb_parent(prev)) - return -ENOMEM; - vma = rb_entry(rb_parent(prev), - struct vm_area_struct, vm_rb); - if (prev == vma->vm_rb.rb_right) { - gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; - goto check_current; - } - } - } - -found: - /* We found a suitable gap. Clip it with the original high_limit. */ - if (gap_end > info->high_limit) - gap_end = info->high_limit; - -found_highest: - /* Compute highest gap address at the desired alignment */ - gap_end -= info->length; - gap_end -= (gap_end - info->align_offset) & info->align_mask; - - VM_BUG_ON(gap_end < info->low_limit); - VM_BUG_ON(gap_end < gap_start); - return gap_end; } /* Get an address range which is currently unmapped. @@ -1846,16 +684,17 @@ found_highest: * * This function "knows" that -ENOMEM has the bits set. */ -#ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long -arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) +generic_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - struct vm_unmapped_area_info info; + struct vm_area_struct *vma, *prev; + struct vm_unmapped_area_info info = {}; + const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); - if (len > TASK_SIZE) + if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) @@ -1863,38 +702,49 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + vma = find_vma_prev(mm, addr, &prev); + if (mmap_end - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } - info.flags = 0; info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = TASK_SIZE; - info.align_mask = 0; + info.low_limit = mm->mmap_base; + info.high_limit = mmap_end; + info.start_gap = stack_guard_placement(vm_flags); + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); return vm_unmapped_area(&info); } -#endif + +#ifndef HAVE_ARCH_UNMAPPED_AREA +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) +{ + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, + vm_flags); +} +#endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ -#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) +generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; - unsigned long addr = addr0; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; + const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); /* requested length too big for entire address space */ - if (len > TASK_SIZE) + if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) @@ -1903,17 +753,20 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + vma = find_vma_prev(mm, addr, &prev); + if (mmap_end - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; - info.high_limit = mm->mmap_base; - info.align_mask = 0; + info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); + info.start_gap = stack_guard_placement(vm_flags); + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); addr = vm_unmapped_area(&info); /* @@ -1922,24 +775,45 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * can happen with large stack limits and large mmap() * allocations. */ - if (addr & ~PAGE_MASK) { + if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = TASK_SIZE; + info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } + +#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) +{ + return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, + vm_flags); +} #endif +unsigned long mm_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) +{ + if (mm_flags_test(MMF_TOPDOWN, current->mm)) + return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, + flags, vm_flags); + return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); +} + unsigned long -get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags) +__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { unsigned long (*get_area)(struct file *, unsigned long, - unsigned long, unsigned long, unsigned long); + unsigned long, unsigned long, unsigned long) + = NULL; unsigned long error = arch_mmap_check(addr, len, flags); if (error) @@ -1949,306 +823,140 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (len > TASK_SIZE) return -ENOMEM; - get_area = current->mm->get_unmapped_area; - if (file && file->f_op && file->f_op->get_unmapped_area) - get_area = file->f_op->get_unmapped_area; - addr = get_area(file, addr, len, pgoff, flags); + if (file) { + if (file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + } else if (flags & MAP_SHARED) { + /* + * mmap_region() will call shmem_zero_setup() to create a file, + * so use shmem's get_unmapped_area in case it can be huge. + */ + get_area = shmem_get_unmapped_area; + } + + /* Always treat pgoff as zero for anonymous memory. */ + if (!file) + pgoff = 0; + + if (get_area) { + addr = get_area(file, addr, len, pgoff, flags); + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file + && !addr /* no hint */ + && IS_ALIGNED(len, PMD_SIZE)) { + /* Ensures that larger anonymous mappings are THP aligned. */ + addr = thp_get_unmapped_area_vmflags(file, addr, len, + pgoff, flags, vm_flags); + } else { + addr = mm_get_unmapped_area_vmflags(file, addr, len, + pgoff, flags, vm_flags); + } if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; - if (addr & ~PAGE_MASK) + if (offset_in_page(addr)) return -EINVAL; - addr = arch_rebalance_pgtables(addr, len); error = security_mmap_addr(addr); return error ? error : addr; } -EXPORT_SYMBOL(get_unmapped_area); - -/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ -struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +unsigned long +mm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) { - struct vm_area_struct *vma = NULL; - - /* Check the cache first. */ - /* (Cache hit rate is typically around 35%.) */ - vma = ACCESS_ONCE(mm->mmap_cache); - if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { - struct rb_node *rb_node; + return mm_get_unmapped_area_vmflags(file, addr, len, pgoff, flags, 0); +} +EXPORT_SYMBOL(mm_get_unmapped_area); - rb_node = mm->mm_rb.rb_node; - vma = NULL; +/** + * find_vma_intersection() - Look up the first VMA which intersects the interval + * @mm: The process address space. + * @start_addr: The inclusive start user address. + * @end_addr: The exclusive end user address. + * + * Returns: The first VMA within the provided range, %NULL otherwise. Assumes + * start_addr < end_addr. + */ +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) +{ + unsigned long index = start_addr; - while (rb_node) { - struct vm_area_struct *vma_tmp; + mmap_assert_locked(mm); + return mt_find(&mm->mm_mt, &index, end_addr - 1); +} +EXPORT_SYMBOL(find_vma_intersection); - vma_tmp = rb_entry(rb_node, - struct vm_area_struct, vm_rb); +/** + * find_vma() - Find the VMA for a given address, or the next VMA. + * @mm: The mm_struct to check + * @addr: The address + * + * Returns: The VMA associated with addr, or the next VMA. + * May return %NULL in the case of no VMA at addr or above. + */ +struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +{ + unsigned long index = addr; - if (vma_tmp->vm_end > addr) { - vma = vma_tmp; - if (vma_tmp->vm_start <= addr) - break; - rb_node = rb_node->rb_left; - } else - rb_node = rb_node->rb_right; - } - if (vma) - mm->mmap_cache = vma; - } - return vma; + mmap_assert_locked(mm); + return mt_find(&mm->mm_mt, &index, ULONG_MAX); } - EXPORT_SYMBOL(find_vma); -/* - * Same as find_vma, but also return a pointer to the previous VMA in *pprev. +/** + * find_vma_prev() - Find the VMA for a given address, or the next vma and + * set %pprev to the previous VMA, if any. + * @mm: The mm_struct to check + * @addr: The address + * @pprev: The pointer to set to the previous VMA + * + * Note that RCU lock is missing here since the external mmap_lock() is used + * instead. + * + * Returns: The VMA associated with @addr, or the next vma. + * May return %NULL in the case of no vma at addr or above. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, addr); - vma = find_vma(mm, addr); - if (vma) { - *pprev = vma->vm_prev; - } else { - struct rb_node *rb_node = mm->mm_rb.rb_node; - *pprev = NULL; - while (rb_node) { - *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); - rb_node = rb_node->rb_right; - } - } + vma = vma_iter_load(&vmi); + *pprev = vma_prev(&vmi); + if (!vma) + vma = vma_next(&vmi); return vma; } -/* - * Verify that the stack growth is acceptable and - * update accounting. This is shared with both the - * grow-up and grow-down cases. - */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) -{ - struct mm_struct *mm = vma->vm_mm; - struct rlimit *rlim = current->signal->rlim; - unsigned long new_start; - - /* address space limit tests */ - if (!may_expand_vm(mm, grow)) - return -ENOMEM; - - /* Stack limit test */ - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) - return -ENOMEM; - - /* mlock limit tests */ - if (vma->vm_flags & VM_LOCKED) { - unsigned long locked; - unsigned long limit; - locked = mm->locked_vm + grow; - limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); - limit >>= PAGE_SHIFT; - if (locked > limit && !capable(CAP_IPC_LOCK)) - return -ENOMEM; - } - - /* Check to ensure the stack will not grow into a hugetlb-only region */ - new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : - vma->vm_end - size; - if (is_hugepage_only_range(vma->vm_mm, new_start, size)) - return -EFAULT; - - /* - * Overcommit.. This must be the final test, as it will - * update security statistics. - */ - if (security_vm_enough_memory_mm(mm, grow)) - return -ENOMEM; +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; - /* Ok, everything looks good - let it rip */ - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; - vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); - return 0; -} - -#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) -/* - * PA-RISC uses this for its stack; IA64 for its Register Backing Store. - * vma is the last one with address > vma->vm_end. Have to extend vma. - */ -int expand_upwards(struct vm_area_struct *vma, unsigned long address) +static int __init cmdline_parse_stack_guard_gap(char *p) { - int error; - - if (!(vma->vm_flags & VM_GROWSUP)) - return -EFAULT; - - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - vma_lock_anon_vma(vma); - - /* - * vma->vm_start/vm_end cannot change under us because the caller - * is required to hold the mmap_sem in read mode. We need the - * anon_vma lock to serialize against concurrent expand_stacks. - * Also guard against wrapping around to address 0. - */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else { - vma_unlock_anon_vma(vma); - return -ENOMEM; - } - error = 0; - - /* Somebody else might have raced and expanded it already */ - if (address > vma->vm_end) { - unsigned long size, grow; - - size = address - vma->vm_start; - grow = (address - vma->vm_end) >> PAGE_SHIFT; - - error = -ENOMEM; - if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem - * lock here, so we need to protect against - * concurrent vma expansions. - * vma_lock_anon_vma() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. - */ - spin_lock(&vma->vm_mm->page_table_lock); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_end = address; - anon_vma_interval_tree_post_update_vma(vma); - if (vma->vm_next) - vma_gap_update(vma->vm_next); - else - vma->vm_mm->highest_vm_end = address; - spin_unlock(&vma->vm_mm->page_table_lock); - - perf_event_mmap(vma); - } - } - } - vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); - validate_mm(vma->vm_mm); - return error; -} -#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ - -/* - * vma is the first one with address < vma->vm_start. Have to extend vma. - */ -int expand_downwards(struct vm_area_struct *vma, - unsigned long address) -{ - int error; - - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - - address &= PAGE_MASK; - error = security_mmap_addr(address); - if (error) - return error; - - vma_lock_anon_vma(vma); + unsigned long val; + char *endptr; - /* - * vma->vm_start/vm_end cannot change under us because the caller - * is required to hold the mmap_sem in read mode. We need the - * anon_vma lock to serialize against concurrent expand_stacks. - */ + val = simple_strtoul(p, &endptr, 10); + if (!*endptr) + stack_guard_gap = val << PAGE_SHIFT; - /* Somebody else might have raced and expanded it already */ - if (address < vma->vm_start) { - unsigned long size, grow; - - size = vma->vm_end - address; - grow = (vma->vm_start - address) >> PAGE_SHIFT; - - error = -ENOMEM; - if (grow <= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem - * lock here, so we need to protect against - * concurrent vma expansions. - * vma_lock_anon_vma() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. - */ - spin_lock(&vma->vm_mm->page_table_lock); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; - anon_vma_interval_tree_post_update_vma(vma); - vma_gap_update(vma); - spin_unlock(&vma->vm_mm->page_table_lock); - - perf_event_mmap(vma); - } - } - } - vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); - validate_mm(vma->vm_mm); - return error; + return 1; } +__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ #ifdef CONFIG_STACK_GROWSUP -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { - struct vm_area_struct *next; - - address &= PAGE_MASK; - next = vma->vm_next; - if (next && next->vm_start == address + PAGE_SIZE) { - if (!(next->vm_flags & VM_GROWSUP)) - return -ENOMEM; - } return expand_upwards(vma, address); } -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; @@ -2256,431 +964,288 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; - if (!prev || expand_stack(prev, addr)) + if (!prev) + return NULL; + if (expand_stack_locked(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) - __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL); + populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { - struct vm_area_struct *prev; - - address &= PAGE_MASK; - prev = vma->vm_prev; - if (prev && prev->vm_end == address) { - if (!(prev->vm_flags & VM_GROWSDOWN)) - return -ENOMEM; - } return expand_downwards(vma, address); } -struct vm_area_struct * -find_extend_vma(struct mm_struct * mm, unsigned long addr) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { - struct vm_area_struct * vma; + struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; - vma = find_vma(mm,addr); + vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; - if (!(vma->vm_flags & VM_GROWSDOWN)) - return NULL; start = vma->vm_start; - if (expand_stack(vma, addr)) + if (expand_stack_locked(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) - __mlock_vma_pages_range(vma, addr, start, NULL); + populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif -/* - * Ok - we have the memory areas we should free on the vma list, - * so release them, and do the vma updates. - * - * Called with the mm semaphore held. - */ -static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) -{ - unsigned long nr_accounted = 0; - - /* Update high watermark before we lower total_vm */ - update_hiwater_vm(mm); - do { - long nrpages = vma_pages(vma); - - if (vma->vm_flags & VM_ACCOUNT) - nr_accounted += nrpages; - vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); - vma = remove_vma(vma); - } while (vma); - vm_unacct_memory(nr_accounted); - validate_mm(mm); -} +#if defined(CONFIG_STACK_GROWSUP) -/* - * Get rid of page table information in the indicated region. - * - * Called with the mm semaphore held. - */ -static void unmap_region(struct mm_struct *mm, - struct vm_area_struct *vma, struct vm_area_struct *prev, - unsigned long start, unsigned long end) -{ - struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; - struct mmu_gather tlb; +#define vma_expand_up(vma,addr) expand_upwards(vma, addr) +#define vma_expand_down(vma, addr) (-EFAULT) - lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); - update_hiwater_rss(mm); - unmap_vmas(&tlb, vma, start, end); - free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, - next ? next->vm_start : USER_PGTABLES_CEILING); - tlb_finish_mmu(&tlb, start, end); -} +#else -/* - * Create a list of vma's touched by the unmap, removing them from the mm's - * vma list as we go.. - */ -static void -detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, unsigned long end) -{ - struct vm_area_struct **insertion_point; - struct vm_area_struct *tail_vma = NULL; +#define vma_expand_up(vma,addr) (-EFAULT) +#define vma_expand_down(vma, addr) expand_downwards(vma, addr) - insertion_point = (prev ? &prev->vm_next : &mm->mmap); - vma->vm_prev = NULL; - do { - vma_rb_erase(vma, &mm->mm_rb); - mm->map_count--; - tail_vma = vma; - vma = vma->vm_next; - } while (vma && vma->vm_start < end); - *insertion_point = vma; - if (vma) { - vma->vm_prev = prev; - vma_gap_update(vma); - } else - mm->highest_vm_end = prev ? prev->vm_end : 0; - tail_vma->vm_next = NULL; - mm->mmap_cache = NULL; /* Kill the cache. */ -} +#endif /* - * __split_vma() bypasses sysctl_max_map_count checking. We use this on the - * munmap path where it doesn't make sense to fail. + * expand_stack(): legacy interface for page faulting. Don't use unless + * you have to. + * + * This is called with the mm locked for reading, drops the lock, takes + * the lock for writing, tries to look up a vma again, expands it if + * necessary, and downgrades the lock to reading again. + * + * If no vma is found or it can't be expanded, it returns NULL and has + * dropped the lock. */ -static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, - unsigned long addr, int new_below) +struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) { - struct mempolicy *pol; - struct vm_area_struct *new; - int err = -ENOMEM; - - if (is_vm_hugetlb_page(vma) && (addr & - ~(huge_page_mask(hstate_vma(vma))))) - return -EINVAL; - - new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); - if (!new) - goto out_err; - - /* most fields are the same, copy all, and then fixup */ - *new = *vma; + struct vm_area_struct *vma, *prev; - INIT_LIST_HEAD(&new->anon_vma_chain); + mmap_read_unlock(mm); + if (mmap_write_lock_killable(mm)) + return NULL; - if (new_below) - new->vm_end = addr; - else { - new->vm_start = addr; - new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); - } + vma = find_vma_prev(mm, addr, &prev); + if (vma && vma->vm_start <= addr) + goto success; - pol = mpol_dup(vma_policy(vma)); - if (IS_ERR(pol)) { - err = PTR_ERR(pol); - goto out_free_vma; + if (prev && !vma_expand_up(prev, addr)) { + vma = prev; + goto success; } - vma_set_policy(new, pol); - - if (anon_vma_clone(new, vma)) - goto out_free_mpol; - if (new->vm_file) - get_file(new->vm_file); + if (vma && !vma_expand_down(vma, addr)) + goto success; - if (new->vm_ops && new->vm_ops->open) - new->vm_ops->open(new); - - if (new_below) - err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + - ((addr - new->vm_start) >> PAGE_SHIFT), new); - else - err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); - - /* Success. */ - if (!err) - return 0; + mmap_write_unlock(mm); + return NULL; - /* Clean everything up if vma_adjust failed. */ - if (new->vm_ops && new->vm_ops->close) - new->vm_ops->close(new); - if (new->vm_file) - fput(new->vm_file); - unlink_anon_vmas(new); - out_free_mpol: - mpol_put(pol); - out_free_vma: - kmem_cache_free(vm_area_cachep, new); - out_err: - return err; +success: + mmap_write_downgrade(mm); + return vma; } -/* - * Split a vma into two pieces at address 'addr', a new vma is allocated - * either for the first part or the tail. +/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. + * @mm: The mm_struct + * @start: The start address to munmap + * @len: The length to be munmapped. + * @uf: The userfaultfd list_head + * + * Return: 0 on success, error otherwise. */ -int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, int new_below) +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, + struct list_head *uf) { - if (mm->map_count >= sysctl_max_map_count) - return -ENOMEM; + VMA_ITERATOR(vmi, mm, start); - return __split_vma(mm, vma, addr, new_below); + return do_vmi_munmap(&vmi, mm, start, len, uf, false); } -/* Munmap is split into 2 main parts -- this part which finds - * what needs doing, and the areas themselves, which do the - * work. This now handles partial unmappings. - * Jeremy Fitzhardinge <jeremy@goop.org> - */ -int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +int vm_munmap(unsigned long start, size_t len) { - unsigned long end; - struct vm_area_struct *vma, *prev, *last; + return __vm_munmap(start, len, false); +} +EXPORT_SYMBOL(vm_munmap); - if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) - return -EINVAL; +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +{ + addr = untagged_addr(addr); + return __vm_munmap(addr, len, true); +} - if ((len = PAGE_ALIGN(len)) == 0) - return -EINVAL; - /* Find the first overlapping VMA */ - vma = find_vma(mm, start); - if (!vma) - return 0; - prev = vma->vm_prev; - /* we have start < vma->vm_end */ +/* + * Emulation of deprecated remap_file_pages() syscall. + */ +SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + unsigned long, prot, unsigned long, pgoff, unsigned long, flags) +{ - /* if it doesn't overlap, we have nothing.. */ - end = start + len; - if (vma->vm_start >= end) - return 0; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long populate = 0; + unsigned long ret = -EINVAL; + struct file *file; + vm_flags_t vm_flags; - /* - * If we need to split any vma, do it now to save pain later. - * - * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially - * unmapped vm_area_struct will remain in use: so lower split_vma - * places tmp vma above, and higher split_vma places tmp vma below. - */ - if (start > vma->vm_start) { - int error; + pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", + current->comm, current->pid); - /* - * Make sure that map_count on return from munmap() will - * not exceed its limit; but let map_count go just above - * its limit temporarily, to help free resources as expected. - */ - if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) - return -ENOMEM; + if (prot) + return ret; + start = start & PAGE_MASK; + size = size & PAGE_MASK; - error = __split_vma(mm, vma, start, 0); - if (error) - return error; - prev = vma; - } + if (start + size <= start) + return ret; - /* Does it split the last one? */ - last = find_vma(mm, end); - if (last && end > last->vm_start) { - int error = __split_vma(mm, last, end, 1); - if (error) - return error; - } - vma = prev? prev->vm_next: mm->mmap; + /* Does pgoff wrap? */ + if (pgoff + (size >> PAGE_SHIFT) < pgoff) + return ret; - /* - * unlock any mlock()ed ranges before detaching vmas - */ - if (mm->locked_vm) { - struct vm_area_struct *tmp = vma; - while (tmp && tmp->vm_start < end) { - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); - } - tmp = tmp->vm_next; - } - } + if (mmap_read_lock_killable(mm)) + return -EINTR; /* - * Remove the vma's, and unmap the actual pages + * Look up VMA under read lock first so we can perform the security + * without holding locks (which can be problematic). We reacquire a + * write lock later and check nothing changed underneath us. */ - detach_vmas_to_be_unmapped(mm, vma, prev, end); - unmap_region(mm, vma, prev, start, end); + vma = vma_lookup(mm, start); - /* Fix up all other VM information */ - remove_vma_list(mm, vma); + if (!vma || !(vma->vm_flags & VM_SHARED)) { + mmap_read_unlock(mm); + return -EINVAL; + } - return 0; -} + prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; + prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; + prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; -int vm_munmap(unsigned long start, size_t len) -{ - int ret; - struct mm_struct *mm = current->mm; + flags &= MAP_NONBLOCK; + flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; + if (vma->vm_flags & VM_LOCKED) + flags |= MAP_LOCKED; - down_write(&mm->mmap_sem); - ret = do_munmap(mm, start, len); - up_write(&mm->mmap_sem); - return ret; -} -EXPORT_SYMBOL(vm_munmap); + /* Save vm_flags used to calculate prot and flags, and recheck later. */ + vm_flags = vma->vm_flags; + file = get_file(vma->vm_file); -SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) -{ - profile_munmap(addr); - return vm_munmap(addr, len); -} + mmap_read_unlock(mm); -static inline void verify_mm_writelocked(struct mm_struct *mm) -{ -#ifdef CONFIG_DEBUG_VM - if (unlikely(down_read_trylock(&mm->mmap_sem))) { - WARN_ON(1); - up_read(&mm->mmap_sem); + /* Call outside mmap_lock to be consistent with other callers. */ + ret = security_mmap_file(file, prot, flags); + if (ret) { + fput(file); + return ret; } -#endif -} -/* - * this is really a simplified "do_mmap". it only handles - * anonymous maps. eventually we may be able to do some - * brk-specific accounting here. - */ -static unsigned long do_brk(unsigned long addr, unsigned long len) -{ - struct mm_struct * mm = current->mm; - struct vm_area_struct * vma, * prev; - unsigned long flags; - struct rb_node ** rb_link, * rb_parent; - pgoff_t pgoff = addr >> PAGE_SHIFT; - int error; + ret = -EINVAL; - len = PAGE_ALIGN(len); - if (!len) - return addr; + /* OK security check passed, take write lock + let it rip. */ + if (mmap_write_lock_killable(mm)) { + fput(file); + return -EINTR; + } - flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + vma = vma_lookup(mm, start); - error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); - if (error & ~PAGE_MASK) - return error; + if (!vma) + goto out; - /* - * mlock MCL_FUTURE? - */ - if (mm->def_flags & VM_LOCKED) { - unsigned long locked, lock_limit; - locked = len >> PAGE_SHIFT; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } + /* Make sure things didn't change under us. */ + if (vma->vm_flags != vm_flags) + goto out; + if (vma->vm_file != file) + goto out; - /* - * mm->mmap_sem is required to protect against another thread - * changing the mappings in case we sleep. - */ - verify_mm_writelocked(mm); + if (start + size > vma->vm_end) { + VMA_ITERATOR(vmi, mm, vma->vm_end); + struct vm_area_struct *next, *prev = vma; - /* - * Clear old maps. this also does some error checking for us - */ - munmap_back: - if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { - if (do_munmap(mm, addr, len)) - return -ENOMEM; - goto munmap_back; - } + for_each_vma_range(vmi, next, start + size) { + /* hole between vmas ? */ + if (next->vm_start != prev->vm_end) + goto out; - /* Check against address space limits *after* clearing old maps... */ - if (!may_expand_vm(mm, len >> PAGE_SHIFT)) - return -ENOMEM; + if (next->vm_file != vma->vm_file) + goto out; - if (mm->map_count > sysctl_max_map_count) - return -ENOMEM; + if (next->vm_flags != vma->vm_flags) + goto out; - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) - return -ENOMEM; + if (start + size <= next->vm_end) + break; - /* Can we just expand an old private anonymous mapping? */ - vma = vma_merge(mm, prev, addr, addr + len, flags, - NULL, NULL, pgoff, NULL); - if (vma) - goto out; + prev = next; + } - /* - * create a vma struct for an anonymous mapping - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (!vma) { - vm_unacct_memory(len >> PAGE_SHIFT); - return -ENOMEM; + if (!next) + goto out; } - INIT_LIST_HEAD(&vma->anon_vma_chain); - vma->vm_mm = mm; - vma->vm_start = addr; - vma->vm_end = addr + len; - vma->vm_pgoff = pgoff; - vma->vm_flags = flags; - vma->vm_page_prot = vm_get_page_prot(flags); - vma_link(mm, vma, prev, rb_link, rb_parent); + ret = do_mmap(vma->vm_file, start, size, + prot, flags, 0, pgoff, &populate, NULL); out: - perf_event_mmap(vma); - mm->total_vm += len >> PAGE_SHIFT; - if (flags & VM_LOCKED) - mm->locked_vm += (len >> PAGE_SHIFT); - return addr; + mmap_write_unlock(mm); + fput(file); + if (populate) + mm_populate(ret, populate); + if (!IS_ERR_VALUE(ret)) + ret = 0; + return ret; } -unsigned long vm_brk(unsigned long addr, unsigned long len) +int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; - unsigned long ret; + struct vm_area_struct *vma = NULL; + unsigned long len; + int ret; bool populate; + LIST_HEAD(uf); + VMA_ITERATOR(vmi, mm, addr); + + len = PAGE_ALIGN(request); + if (len < request) + return -ENOMEM; + if (!len) + return 0; + + /* Until we need other flags, refuse anything except VM_EXEC. */ + if ((vm_flags & (~VM_EXEC)) != 0) + return -EINVAL; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + ret = check_brk_limits(addr, len); + if (ret) + goto limits_failed; + + ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); + if (ret) + goto munmap_failed; - down_write(&mm->mmap_sem); - ret = do_brk(addr, len); + vma = vma_prev(&vmi); + ret = do_brk_flags(&vmi, vma, addr, len, vm_flags); populate = ((mm->def_flags & VM_LOCKED) != 0); - up_write(&mm->mmap_sem); - if (populate) + mmap_write_unlock(mm); + userfaultfd_unmap_complete(mm, &uf); + if (populate && !ret) mm_populate(addr, len); return ret; + +munmap_failed: +limits_failed: + mmap_write_unlock(mm); + return ret; } -EXPORT_SYMBOL(vm_brk); +EXPORT_SYMBOL(vm_brk_flags); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) @@ -2688,199 +1253,175 @@ void exit_mmap(struct mm_struct *mm) struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; + VMA_ITERATOR(vmi, mm, 0); + int count = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); - if (mm->locked_vm) { - vma = mm->mmap; - while (vma) { - if (vma->vm_flags & VM_LOCKED) - munlock_vma_pages_all(vma); - vma = vma->vm_next; - } - } - + mmap_read_lock(mm); arch_exit_mmap(mm); - vma = mm->mmap; - if (!vma) /* Can happen if dup_mmap() received an OOM */ - return; + vma = vma_next(&vmi); + if (!vma || unlikely(xa_is_zero(vma))) { + /* Can happen if dup_mmap() received an OOM */ + mmap_read_unlock(mm); + mmap_write_lock(mm); + goto destroy; + } - lru_add_drain(); flush_cache_mm(mm); - tlb_gather_mmu(&tlb, mm, 1); + tlb_gather_mmu_fullmm(&tlb, mm); /* update_hiwater_rss(mm) here? but nobody should be looking */ - /* Use -1 here to ensure all VMAs in the mm are unmapped */ - unmap_vmas(&tlb, vma, 0, -1); + /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ + unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX); + mmap_read_unlock(mm); - free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); - tlb_finish_mmu(&tlb, 0, -1); + /* + * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper + * because the memory has been already freed. + */ + mm_flags_set(MMF_OOM_SKIP, mm); + mmap_write_lock(mm); + mt_clear_in_rcu(&mm->mm_mt); + vma_iter_set(&vmi, vma->vm_end); + free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, + USER_PGTABLES_CEILING, true); + tlb_finish_mmu(&tlb); /* - * Walk the list again, actually closing and freeing it, - * with preemption enabled, without holding any MM locks. + * Walk the list again, actually closing and freeing it, with preemption + * enabled, without holding any MM locks besides the unreachable + * mmap_write_lock. */ - while (vma) { + vma_iter_set(&vmi, vma->vm_end); + do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - vma = remove_vma(vma); - } + vma_mark_detached(vma); + remove_vma(vma); + count++; + cond_resched(); + vma = vma_next(&vmi); + } while (vma && likely(!xa_is_zero(vma))); + + BUG_ON(count != mm->map_count); + + trace_exit_mmap(mm); +destroy: + __mt_destroy(&mm->mm_mt); + mmap_write_unlock(mm); vm_unacct_memory(nr_accounted); - - WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -/* Insert vm structure into process list sorted by address - * and into the inode's i_mmap tree. If vm_file is non-NULL - * then i_mmap_mutex is taken here. +/* + * Return true if the calling process may expand its vm space by the passed + * number of pages */ -int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) +bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { - struct vm_area_struct *prev; - struct rb_node **rb_link, *rb_parent; + if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) + return false; - /* - * The vm_pgoff of a purely anonymous vma should be irrelevant - * until its first write fault, when page's anon_vma and index - * are set. But now set the vm_pgoff it will almost certainly - * end up with (unless mremap moves it elsewhere before that - * first wfault), so /proc/pid/maps tells a consistent story. - * - * By setting it to reflect the virtual start address of the - * vma, merges and splits can happen in a seamless way, just - * using the existing file pgoff checks and manipulations. - * Similarly in do_mmap_pgoff and in do_brk. - */ - if (!vma->vm_file) { - BUG_ON(vma->anon_vma); - vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; + if (is_data_mapping(flags) && + mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { + /* Workaround for Valgrind */ + if (rlimit(RLIMIT_DATA) == 0 && + mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) + return true; + + pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", + current->comm, current->pid, + (mm->data_vm + npages) << PAGE_SHIFT, + rlimit(RLIMIT_DATA), + ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); + + if (!ignore_rlimit_data) + return false; } - if (find_vma_links(mm, vma->vm_start, vma->vm_end, - &prev, &rb_link, &rb_parent)) - return -ENOMEM; - if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory_mm(mm, vma_pages(vma))) - return -ENOMEM; - vma_link(mm, vma, prev, rb_link, rb_parent); - return 0; + return true; } +void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) +{ + WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); + + if (is_exec_mapping(flags)) + mm->exec_vm += npages; + else if (is_stack_mapping(flags)) + mm->stack_vm += npages; + else if (is_data_mapping(flags)) + mm->data_vm += npages; +} + +static vm_fault_t special_mapping_fault(struct vm_fault *vmf); + /* - * Copy the vma structure to a new location in the same mm, - * prior to moving page table entries, to effect an mremap move. + * Close hook, called for unmap() and on the old vma for mremap(). + * + * Having a close hook prevents vma merging regardless of flags. */ -struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, - unsigned long addr, unsigned long len, pgoff_t pgoff, - bool *need_rmap_locks) +static void special_mapping_close(struct vm_area_struct *vma) { - struct vm_area_struct *vma = *vmap; - unsigned long vma_start = vma->vm_start; - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *new_vma, *prev; - struct rb_node **rb_link, *rb_parent; - struct mempolicy *pol; - bool faulted_in_anon_vma = true; - - /* - * If anonymous vma has not yet been faulted, update new pgoff - * to match new location, to increase its chance of merging. - */ - if (unlikely(!vma->vm_file && !vma->anon_vma)) { - pgoff = addr >> PAGE_SHIFT; - faulted_in_anon_vma = false; - } + const struct vm_special_mapping *sm = vma->vm_private_data; - if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) - return NULL; /* should never get here */ - new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); - if (new_vma) { - /* - * Source vma may have been merged into new_vma - */ - if (unlikely(vma_start >= new_vma->vm_start && - vma_start < new_vma->vm_end)) { - /* - * The only way we can get a vma_merge with - * self during an mremap is if the vma hasn't - * been faulted in yet and we were allowed to - * reset the dst vma->vm_pgoff to the - * destination address of the mremap to allow - * the merge to happen. mremap must change the - * vm_pgoff linearity between src and dst vmas - * (in turn preventing a vma_merge) to be - * safe. It is only safe to keep the vm_pgoff - * linear if there are no pages mapped yet. - */ - VM_BUG_ON(faulted_in_anon_vma); - *vmap = vma = new_vma; - } - *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); - } else { - new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); - if (new_vma) { - *new_vma = *vma; - new_vma->vm_start = addr; - new_vma->vm_end = addr + len; - new_vma->vm_pgoff = pgoff; - pol = mpol_dup(vma_policy(vma)); - if (IS_ERR(pol)) - goto out_free_vma; - vma_set_policy(new_vma, pol); - INIT_LIST_HEAD(&new_vma->anon_vma_chain); - if (anon_vma_clone(new_vma, vma)) - goto out_free_mempol; - if (new_vma->vm_file) - get_file(new_vma->vm_file); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); - vma_link(mm, new_vma, prev, rb_link, rb_parent); - *need_rmap_locks = false; - } - } - return new_vma; + if (sm->close) + sm->close(sm, vma); +} - out_free_mempol: - mpol_put(pol); - out_free_vma: - kmem_cache_free(vm_area_cachep, new_vma); - return NULL; +static const char *special_mapping_name(struct vm_area_struct *vma) +{ + return ((struct vm_special_mapping *)vma->vm_private_data)->name; } -/* - * Return true if the calling process may expand its vm space by the passed - * number of pages - */ -int may_expand_vm(struct mm_struct *mm, unsigned long npages) +static int special_mapping_mremap(struct vm_area_struct *new_vma) { - unsigned long cur = mm->total_vm; /* pages */ - unsigned long lim; + struct vm_special_mapping *sm = new_vma->vm_private_data; - lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; + if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) + return -EFAULT; - if (cur + npages > lim) - return 0; - return 1; + if (sm->mremap) + return sm->mremap(sm, new_vma); + + return 0; } +static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) +{ + /* + * Forbid splitting special mappings - kernel has expectations over + * the number of pages in mapping. Together with VM_DONTEXPAND + * the size of vma should stay the same over the special mapping's + * lifetime. + */ + return -EINVAL; +} + +static const struct vm_operations_struct special_mapping_vmops = { + .close = special_mapping_close, + .fault = special_mapping_fault, + .mremap = special_mapping_mremap, + .name = special_mapping_name, + /* vDSO code relies that VVAR can't be accessed remotely */ + .access = NULL, + .may_split = special_mapping_split, +}; -static int special_mapping_fault(struct vm_area_struct *vma, - struct vm_fault *vmf) +static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; + struct vm_special_mapping *sm = vma->vm_private_data; - /* - * special mappings have no vm_file, and in that case, the mm - * uses vm_pgoff internally. So we have to subtract it from here. - * We are allowed to do this because we are the mm; do not copy - * this code into drivers! - */ - pgoff = vmf->pgoff - vma->vm_pgoff; + if (sm->fault) + return sm->fault(sm, vmf->vma, vmf); - for (pages = vma->vm_private_data; pgoff && *pages; ++pages) + pages = sm->pages; + + for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { @@ -2893,238 +1434,133 @@ static int special_mapping_fault(struct vm_area_struct *vma, return VM_FAULT_SIGBUS; } -/* - * Having a close hook prevents vma merging regardless of flags. - */ -static void special_mapping_close(struct vm_area_struct *vma) -{ -} - -static const struct vm_operations_struct special_mapping_vmops = { - .close = special_mapping_close, - .fault = special_mapping_fault, -}; - -/* - * Called with mm->mmap_sem held for writing. - * Insert a new vma covering the given region, with the given flags. - * Its pages are supplied by the given array of struct page *. - * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. - * The region past the last page supplied will always produce SIGBUS. - * The array pointer and the pages it points to are assumed to stay alive - * for as long as this mapping might exist. - */ -int install_special_mapping(struct mm_struct *mm, - unsigned long addr, unsigned long len, - unsigned long vm_flags, struct page **pages) +static struct vm_area_struct *__install_special_mapping( + struct mm_struct *mm, + unsigned long addr, unsigned long len, + vm_flags_t vm_flags, void *priv, + const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) - return -ENOMEM; - - INIT_LIST_HEAD(&vma->anon_vma_chain); - vma->vm_mm = mm; - vma->vm_start = addr; - vma->vm_end = addr + len; + return ERR_PTR(-ENOMEM); - vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; + vma_set_range(vma, addr, addr + len, 0); + vm_flags |= mm->def_flags | VM_DONTEXPAND; + if (pgtable_supports_soft_dirty()) + vm_flags |= VM_SOFTDIRTY; + vm_flags_init(vma, vm_flags & ~VM_LOCKED_MASK); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - vma->vm_ops = &special_mapping_vmops; - vma->vm_private_data = pages; + vma->vm_ops = ops; + vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; - mm->total_vm += len >> PAGE_SHIFT; + vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); - return 0; + return vma; out: - kmem_cache_free(vm_area_cachep, vma); - return ret; -} - -static DEFINE_MUTEX(mm_all_locks_mutex); - -static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) -{ - if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { - /* - * The LSB of head.next can't change from under us - * because we hold the mm_all_locks_mutex. - */ - down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); - /* - * We can safely modify head.next after taking the - * anon_vma->root->rwsem. If some other vma in this mm shares - * the same anon_vma we won't take it again. - * - * No need of atomic instructions here, head.next - * can't change from under us thanks to the - * anon_vma->root->rwsem. - */ - if (__test_and_set_bit(0, (unsigned long *) - &anon_vma->root->rb_root.rb_node)) - BUG(); - } + vm_area_free(vma); + return ERR_PTR(ret); } -static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) +bool vma_is_special_mapping(const struct vm_area_struct *vma, + const struct vm_special_mapping *sm) { - if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { - /* - * AS_MM_ALL_LOCKS can't change from under us because - * we hold the mm_all_locks_mutex. - * - * Operations on ->flags have to be atomic because - * even if AS_MM_ALL_LOCKS is stable thanks to the - * mm_all_locks_mutex, there may be other cpus - * changing other bitflags in parallel to us. - */ - if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) - BUG(); - mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem); - } + return vma->vm_private_data == sm && + vma->vm_ops == &special_mapping_vmops; } /* - * This operation locks against the VM for all pte/vma/mm related - * operations that could ever happen on a certain mm. This includes - * vmtruncate, try_to_unmap, and all page faults. - * - * The caller must take the mmap_sem in write mode before calling - * mm_take_all_locks(). The caller isn't allowed to release the - * mmap_sem until mm_drop_all_locks() returns. - * - * mmap_sem in write mode is required in order to block all operations - * that could modify pagetables and free pages without need of - * altering the vma layout (for example populate_range() with - * nonlinear vmas). It's also needed in write mode to avoid new - * anon_vmas to be associated with existing vmas. - * - * A single task can't take more than one mm_take_all_locks() in a row - * or it would deadlock. - * - * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in - * mapping->flags avoid to take the same lock twice, if more than one - * vma in this mm is backed by the same anon_vma or address_space. - * - * We can take all the locks in random order because the VM code - * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never - * takes more than one of them in a row. Secondly we're protected - * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. - * - * mm_take_all_locks() and mm_drop_all_locks are expensive operations - * that may have to take thousand of locks. - * - * mm_take_all_locks() can fail if it's interrupted by signals. + * Called with mm->mmap_lock held for writing. + * Insert a new vma covering the given region, with the given flags. + * Its pages are supplied by the given array of struct page *. + * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. + * The region past the last page supplied will always produce SIGBUS. + * The array pointer and the pages it points to are assumed to stay alive + * for as long as this mapping might exist. */ -int mm_take_all_locks(struct mm_struct *mm) +struct vm_area_struct *_install_special_mapping( + struct mm_struct *mm, + unsigned long addr, unsigned long len, + vm_flags_t vm_flags, const struct vm_special_mapping *spec) { - struct vm_area_struct *vma; - struct anon_vma_chain *avc; - - BUG_ON(down_read_trylock(&mm->mmap_sem)); - - mutex_lock(&mm_all_locks_mutex); - - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (signal_pending(current)) - goto out_unlock; - if (vma->vm_file && vma->vm_file->f_mapping) - vm_lock_mapping(mm, vma->vm_file->f_mapping); - } - - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (signal_pending(current)) - goto out_unlock; - if (vma->anon_vma) - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - vm_lock_anon_vma(mm, avc->anon_vma); - } - - return 0; - -out_unlock: - mm_drop_all_locks(mm); - return -EINTR; + return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, + &special_mapping_vmops); } -static void vm_unlock_anon_vma(struct anon_vma *anon_vma) -{ - if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { - /* - * The LSB of head.next can't change to 0 from under - * us because we hold the mm_all_locks_mutex. - * - * We must however clear the bitflag before unlocking - * the vma so the users using the anon_vma->rb_root will - * never see our bitflag. - * - * No need of atomic instructions here, head.next - * can't change from under us until we release the - * anon_vma->root->rwsem. - */ - if (!__test_and_clear_bit(0, (unsigned long *) - &anon_vma->root->rb_root.rb_node)) - BUG(); - anon_vma_unlock_write(anon_vma); - } -} - -static void vm_unlock_mapping(struct address_space *mapping) -{ - if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { - /* - * AS_MM_ALL_LOCKS can't change to 0 from under us - * because we hold the mm_all_locks_mutex. - */ - mutex_unlock(&mapping->i_mmap_mutex); - if (!test_and_clear_bit(AS_MM_ALL_LOCKS, - &mapping->flags)) - BUG(); - } -} - -/* - * The mmap_sem cannot be released by the caller until - * mm_drop_all_locks() returns. - */ -void mm_drop_all_locks(struct mm_struct *mm) -{ - struct vm_area_struct *vma; - struct anon_vma_chain *avc; - - BUG_ON(down_read_trylock(&mm->mmap_sem)); - BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); - - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (vma->anon_vma) - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - vm_unlock_anon_vma(avc->anon_vma); - if (vma->vm_file && vma->vm_file->f_mapping) - vm_unlock_mapping(vma->vm_file->f_mapping); - } +#ifdef CONFIG_SYSCTL +#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ + defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) +int sysctl_legacy_va_layout; +#endif - mutex_unlock(&mm_all_locks_mutex); -} +static const struct ctl_table mmap_table[] = { + { + .procname = "max_map_count", + .data = &sysctl_max_map_count, + .maxlen = sizeof(sysctl_max_map_count), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, +#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ + defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) + { + .procname = "legacy_va_layout", + .data = &sysctl_legacy_va_layout, + .maxlen = sizeof(sysctl_legacy_va_layout), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS + { + .procname = "mmap_rnd_bits", + .data = &mmap_rnd_bits, + .maxlen = sizeof(mmap_rnd_bits), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *)&mmap_rnd_bits_min, + .extra2 = (void *)&mmap_rnd_bits_max, + }, +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS + { + .procname = "mmap_rnd_compat_bits", + .data = &mmap_rnd_compat_bits, + .maxlen = sizeof(mmap_rnd_compat_bits), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *)&mmap_rnd_compat_bits_min, + .extra2 = (void *)&mmap_rnd_compat_bits_max, + }, +#endif +}; +#endif /* CONFIG_SYSCTL */ /* - * initialise the VMA slab + * initialise the percpu counter for VM, initialise VMA state. */ void __init mmap_init(void) { int ret; - ret = percpu_counter_init(&vm_committed_as, 0); + ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); +#ifdef CONFIG_SYSCTL + register_sysctl_init("vm", mmap_table); +#endif + vma_state_init(); } /* @@ -3141,12 +1577,12 @@ static int init_user_reserve(void) { unsigned long free_kbytes; - free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); + free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); - sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); + sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); return 0; } -module_init(init_user_reserve) +subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. @@ -3162,12 +1598,12 @@ static int init_admin_reserve(void) { unsigned long free_kbytes; - free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); + free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); - sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); + sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); return 0; } -module_init(init_admin_reserve) +subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. @@ -3196,17 +1632,17 @@ static int reserve_mem_notifier(struct notifier_block *nb, case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; - if (0 < tmp && tmp < (1UL << 17)) + if (tmp > 0 && tmp < SZ_128K) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; - if (0 < tmp && tmp < (1UL << 13)) + if (tmp > 0 && tmp < SZ_8K) init_admin_reserve(); break; case MEM_OFFLINE: - free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); + free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); @@ -3226,15 +1662,221 @@ static int reserve_mem_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static struct notifier_block reserve_mem_nb = { - .notifier_call = reserve_mem_notifier, -}; - static int __meminit init_reserve_notifier(void) { - if (register_hotmemory_notifier(&reserve_mem_nb)) - printk("Failed registering memory add/remove notifier for admin reserve"); + if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) + pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } -module_init(init_reserve_notifier) +subsys_initcall(init_reserve_notifier); + +/* + * Obtain a read lock on mm->mmap_lock, if the specified address is below the + * start of the VMA, the intent is to perform a write, and it is a + * downward-growing stack, then attempt to expand the stack to contain it. + * + * This function is intended only for obtaining an argument page from an ELF + * image, and is almost certainly NOT what you want to use for any other + * purpose. + * + * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the + * VMA referenced must not be linked in any user-visible tree, i.e. it must be a + * new VMA being mapped. + * + * The function assumes that addr is either contained within the VMA or below + * it, and makes no attempt to validate this value beyond that. + * + * Returns true if the read lock was obtained and a stack was perhaps expanded, + * false if the stack expansion failed. + * + * On stack expansion the function temporarily acquires an mmap write lock + * before downgrading it. + */ +bool mmap_read_lock_maybe_expand(struct mm_struct *mm, + struct vm_area_struct *new_vma, + unsigned long addr, bool write) +{ + if (!write || addr >= new_vma->vm_start) { + mmap_read_lock(mm); + return true; + } + + if (!(new_vma->vm_flags & VM_GROWSDOWN)) + return false; + + mmap_write_lock(mm); + if (expand_downwards(new_vma, addr)) { + mmap_write_unlock(mm); + return false; + } + + mmap_write_downgrade(mm); + return true; +} + +__latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +{ + struct vm_area_struct *mpnt, *tmp; + int retval; + unsigned long charge = 0; + LIST_HEAD(uf); + VMA_ITERATOR(vmi, mm, 0); + + if (mmap_write_lock_killable(oldmm)) + return -EINTR; + flush_cache_dup_mm(oldmm); + uprobe_dup_mmap(oldmm, mm); + /* + * Not linked in yet - no deadlock potential: + */ + mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); + + /* No ordering required: file already has been exposed. */ + dup_mm_exe_file(mm, oldmm); + + mm->total_vm = oldmm->total_vm; + mm->data_vm = oldmm->data_vm; + mm->exec_vm = oldmm->exec_vm; + mm->stack_vm = oldmm->stack_vm; + + /* Use __mt_dup() to efficiently build an identical maple tree. */ + retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL); + if (unlikely(retval)) + goto out; + + mt_clear_in_rcu(vmi.mas.tree); + for_each_vma(vmi, mpnt) { + struct file *file; + + retval = vma_start_write_killable(mpnt); + if (retval < 0) + goto loop_out; + if (mpnt->vm_flags & VM_DONTCOPY) { + retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, + mpnt->vm_end, GFP_KERNEL); + if (retval) + goto loop_out; + + vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); + continue; + } + charge = 0; + if (mpnt->vm_flags & VM_ACCOUNT) { + unsigned long len = vma_pages(mpnt); + + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ + goto fail_nomem; + charge = len; + } + + tmp = vm_area_dup(mpnt); + if (!tmp) + goto fail_nomem; + retval = vma_dup_policy(mpnt, tmp); + if (retval) + goto fail_nomem_policy; + tmp->vm_mm = mm; + retval = dup_userfaultfd(tmp, &uf); + if (retval) + goto fail_nomem_anon_vma_fork; + if (tmp->vm_flags & VM_WIPEONFORK) { + /* + * VM_WIPEONFORK gets a clean slate in the child. + * Don't prepare anon_vma until fault since we don't + * copy page for current vma. + */ + tmp->anon_vma = NULL; + } else if (anon_vma_fork(tmp, mpnt)) + goto fail_nomem_anon_vma_fork; + vm_flags_clear(tmp, VM_LOCKED_MASK); + /* + * Copy/update hugetlb private vma information. + */ + if (is_vm_hugetlb_page(tmp)) + hugetlb_dup_vma_private(tmp); + + /* + * Link the vma into the MT. After using __mt_dup(), memory + * allocation is not necessary here, so it cannot fail. + */ + vma_iter_bulk_store(&vmi, tmp); + + mm->map_count++; + + if (tmp->vm_ops && tmp->vm_ops->open) + tmp->vm_ops->open(tmp); + + file = tmp->vm_file; + if (file) { + struct address_space *mapping = file->f_mapping; + + get_file(file); + i_mmap_lock_write(mapping); + if (vma_is_shared_maywrite(tmp)) + mapping_allow_writable(mapping); + flush_dcache_mmap_lock(mapping); + /* insert tmp into the share list, just after mpnt */ + vma_interval_tree_insert_after(tmp, mpnt, + &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); + i_mmap_unlock_write(mapping); + } + + if (!(tmp->vm_flags & VM_WIPEONFORK)) + retval = copy_page_range(tmp, mpnt); + + if (retval) { + mpnt = vma_next(&vmi); + goto loop_out; + } + } + /* a new mm has just been created */ + retval = arch_dup_mmap(oldmm, mm); +loop_out: + vma_iter_free(&vmi); + if (!retval) { + mt_set_in_rcu(vmi.mas.tree); + ksm_fork(mm, oldmm); + khugepaged_fork(mm, oldmm); + } else { + + /* + * The entire maple tree has already been duplicated. If the + * mmap duplication fails, mark the failure point with + * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, + * stop releasing VMAs that have not been duplicated after this + * point. + */ + if (mpnt) { + mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); + mas_store(&vmi.mas, XA_ZERO_ENTRY); + /* Avoid OOM iterating a broken tree */ + mm_flags_set(MMF_OOM_SKIP, mm); + } + /* + * The mm_struct is going to exit, but the locks will be dropped + * first. Set the mm_struct as unstable is advisable as it is + * not fully initialised. + */ + mm_flags_set(MMF_UNSTABLE, mm); + } +out: + mmap_write_unlock(mm); + flush_tlb_mm(oldmm); + mmap_write_unlock(oldmm); + if (!retval) + dup_userfaultfd_complete(&uf); + else + dup_userfaultfd_fail(&uf); + return retval; + +fail_nomem_anon_vma_fork: + mpol_put(vma_policy(tmp)); +fail_nomem_policy: + vm_area_free(tmp); +fail_nomem: + retval = -ENOMEM; + vm_unacct_memory(charge); + goto loop_out; +} |
