diff options
Diffstat (limited to 'mm/mmap.c')
| -rw-r--r-- | mm/mmap.c | 929 |
1 files changed, 341 insertions, 588 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 386429f7db5a..4bdb9ffa9e25 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -47,6 +47,7 @@ #include <linux/oom.h> #include <linux/sched/mm.h> #include <linux/ksm.h> +#include <linux/memfd.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> @@ -79,7 +80,7 @@ core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { - unsigned long vm_flags = vma->vm_flags; + vm_flags_t vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); @@ -110,8 +111,7 @@ static int check_brk_limits(unsigned long addr, unsigned long len) return mlock_future_ok(current->mm, current->mm->def_flags, len) ? 0 : -EAGAIN; } -static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, - unsigned long addr, unsigned long request, unsigned long flags); + SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long newbrk, oldbrk, origbrk; @@ -127,18 +127,15 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) origbrk = mm->brk; + min_brk = mm->start_brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ - if (current->brk_randomized) - min_brk = mm->start_brk; - else + if (!current->brk_randomized) min_brk = mm->end_data; -#else - min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; @@ -228,12 +225,12 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return hint; } -bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, +bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, unsigned long bytes) { unsigned long locked_pages, limit_pages; - if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) + if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) return true; locked_pages = bytes >> PAGE_SHIFT; @@ -277,8 +274,62 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, return true; } -/* +/** + * do_mmap() - Perform a userland memory mapping into the current process + * address space of length @len with protection bits @prot, mmap flags @flags + * (from which VMA flags will be inferred), and any additional VMA flags to + * apply @vm_flags. If this is a file-backed mapping then the file is specified + * in @file and page offset into the file via @pgoff. + * + * This function does not perform security checks on the file and assumes, if + * @uf is non-NULL, the caller has provided a list head to track unmap events + * for userfaultfd @uf. + * + * It also simply indicates whether memory population is required by setting + * @populate, which must be non-NULL, expecting the caller to actually perform + * this task itself if appropriate. + * + * This function will invoke architecture-specific (and if provided and + * relevant, file system-specific) logic to determine the most appropriate + * unmapped area in which to place the mapping if not MAP_FIXED. + * + * Callers which require userland mmap() behaviour should invoke vm_mmap(), + * which is also exported for module use. + * + * Those which require this behaviour less security checks, userfaultfd and + * populate behaviour, and who handle the mmap write lock themselves, should + * call this function. + * + * Note that the returned address may reside within a merged VMA if an + * appropriate merge were to take place, so it doesn't necessarily specify the + * start of a VMA, rather only the start of a valid mapped range of length + * @len bytes, rounded down to the nearest page size. + * * The caller must write-lock current->mm->mmap_lock. + * + * @file: An optional struct file pointer describing the file which is to be + * mapped, if a file-backed mapping. + * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the + * address at which to perform this mapping. See mmap (2) for details. Must be + * page-aligned. + * @len: The length of the mapping. Will be page-aligned and must be at least 1 + * page in size. + * @prot: Protection bits describing access required to the mapping. See mmap + * (2) for details. + * @flags: Flags specifying how the mapping should be performed, see mmap (2) + * for details. + * @vm_flags: VMA flags which should be set by default, or 0 otherwise. + * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise. + * @populate: A pointer to a value which will be set to 0 if no population of + * the range is required, or the number of bytes to populate if it is. Must be + * non-NULL. See mmap (2) for details as to under what circumstances population + * of the range occurs. + * @uf: An optional pointer to a list head to track userfaultfd unmap events + * should unmapping events arise. If provided, it is up to the caller to manage + * this. + * + * Returns: Either an error, or the address at which the requested mapping has + * been performed. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -291,6 +342,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr, *populate = 0; + mmap_assert_write_locked(mm); + if (!len) return -EINVAL; @@ -369,6 +422,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; + int err; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; @@ -418,7 +472,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags &= ~VM_MAYEXEC; } - if (!file->f_op->mmap) + if (!can_mmap_file(file)) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; @@ -427,6 +481,14 @@ unsigned long do_mmap(struct file *file, unsigned long addr, default: return -EINVAL; } + + /* + * Check to see if we are violating any seals and update VMA + * flags if necessary to avoid future seal violations. + */ + err = memfd_check_seals_mmap(file, &vm_flags); + if (err) + return (unsigned long)err; } else { switch (flags & MAP_TYPE) { case MAP_SHARED: @@ -577,115 +639,6 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ -/** - * unmapped_area() - Find an area between the low_limit and the high_limit with - * the correct alignment and offset, all from @info. Note: current->mm is used - * for the search. - * - * @info: The unmapped area information including the range [low_limit - - * high_limit), the alignment offset and mask. - * - * Return: A memory address or -ENOMEM. - */ -static unsigned long unmapped_area(struct vm_unmapped_area_info *info) -{ - unsigned long length, gap; - unsigned long low_limit, high_limit; - struct vm_area_struct *tmp; - VMA_ITERATOR(vmi, current->mm, 0); - - /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask + info->start_gap; - if (length < info->length) - return -ENOMEM; - - low_limit = info->low_limit; - if (low_limit < mmap_min_addr) - low_limit = mmap_min_addr; - high_limit = info->high_limit; -retry: - if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) - return -ENOMEM; - - /* - * Adjust for the gap first so it doesn't interfere with the - * later alignment. The first step is the minimum needed to - * fulill the start gap, the next steps is the minimum to align - * that. It is the minimum needed to fulill both. - */ - gap = vma_iter_addr(&vmi) + info->start_gap; - gap += (info->align_offset - gap) & info->align_mask; - tmp = vma_next(&vmi); - if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ - if (vm_start_gap(tmp) < gap + length - 1) { - low_limit = tmp->vm_end; - vma_iter_reset(&vmi); - goto retry; - } - } else { - tmp = vma_prev(&vmi); - if (tmp && vm_end_gap(tmp) > gap) { - low_limit = vm_end_gap(tmp); - vma_iter_reset(&vmi); - goto retry; - } - } - - return gap; -} - -/** - * unmapped_area_topdown() - Find an area between the low_limit and the - * high_limit with the correct alignment and offset at the highest available - * address, all from @info. Note: current->mm is used for the search. - * - * @info: The unmapped area information including the range [low_limit - - * high_limit), the alignment offset and mask. - * - * Return: A memory address or -ENOMEM. - */ -static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) -{ - unsigned long length, gap, gap_end; - unsigned long low_limit, high_limit; - struct vm_area_struct *tmp; - VMA_ITERATOR(vmi, current->mm, 0); - - /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask + info->start_gap; - if (length < info->length) - return -ENOMEM; - - low_limit = info->low_limit; - if (low_limit < mmap_min_addr) - low_limit = mmap_min_addr; - high_limit = info->high_limit; -retry: - if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) - return -ENOMEM; - - gap = vma_iter_end(&vmi) - info->length; - gap -= (gap - info->align_offset) & info->align_mask; - gap_end = vma_iter_end(&vmi); - tmp = vma_next(&vmi); - if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ - if (vm_start_gap(tmp) < gap_end) { - high_limit = vm_start_gap(tmp); - vma_iter_reset(&vmi); - goto retry; - } - } else { - tmp = vma_prev(&vmi); - if (tmp && vm_end_gap(tmp) > gap) { - high_limit = tmp->vm_start; - vma_iter_reset(&vmi); - goto retry; - } - } - - return gap; -} - /* * Determine if the allocation needs to ensure that there is no * existing mapping within it's guard gaps, for use as start_gap. @@ -844,12 +797,11 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, } #endif -unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags, - vm_flags_t vm_flags) +unsigned long mm_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { - if (test_bit(MMF_TOPDOWN, &mm->flags)) + if (mm_flags_test(MMF_TOPDOWN, current->mm)) return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, vm_flags); return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); @@ -888,13 +840,14 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (get_area) { addr = get_area(file, addr, len, pgoff, flags); - } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file + && !addr /* no hint */ && IS_ALIGNED(len, PMD_SIZE)) { /* Ensures that larger anonymous mappings are THP aligned. */ addr = thp_get_unmapped_area_vmflags(file, addr, len, pgoff, flags, vm_flags); } else { - addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, + addr = mm_get_unmapped_area_vmflags(file, addr, len, pgoff, flags, vm_flags); } if (IS_ERR_VALUE(addr)) @@ -910,13 +863,10 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, } unsigned long -mm_get_unmapped_area(struct mm_struct *mm, struct file *file, - unsigned long addr, unsigned long len, +mm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - if (test_bit(MMF_TOPDOWN, &mm->flags)) - return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0); - return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); + return mm_get_unmapped_area_vmflags(file, addr, len, pgoff, flags, 0); } EXPORT_SYMBOL(mm_get_unmapped_area); @@ -984,211 +934,6 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, return vma; } -/* - * Verify that the stack growth is acceptable and - * update accounting. This is shared with both the - * grow-up and grow-down cases. - */ -static int acct_stack_growth(struct vm_area_struct *vma, - unsigned long size, unsigned long grow) -{ - struct mm_struct *mm = vma->vm_mm; - unsigned long new_start; - - /* address space limit tests */ - if (!may_expand_vm(mm, vma->vm_flags, grow)) - return -ENOMEM; - - /* Stack limit test */ - if (size > rlimit(RLIMIT_STACK)) - return -ENOMEM; - - /* mlock limit tests */ - if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) - return -ENOMEM; - - /* Check to ensure the stack will not grow into a hugetlb-only region */ - new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : - vma->vm_end - size; - if (is_hugepage_only_range(vma->vm_mm, new_start, size)) - return -EFAULT; - - /* - * Overcommit.. This must be the final test, as it will - * update security statistics. - */ - if (security_vm_enough_memory_mm(mm, grow)) - return -ENOMEM; - - return 0; -} - -#if defined(CONFIG_STACK_GROWSUP) -/* - * PA-RISC uses this for its stack. - * vma is the last one with address > vma->vm_end. Have to extend vma. - */ -static int expand_upwards(struct vm_area_struct *vma, unsigned long address) -{ - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *next; - unsigned long gap_addr; - int error = 0; - VMA_ITERATOR(vmi, mm, vma->vm_start); - - if (!(vma->vm_flags & VM_GROWSUP)) - return -EFAULT; - - mmap_assert_write_locked(mm); - - /* Guard against exceeding limits of the address space. */ - address &= PAGE_MASK; - if (address >= (TASK_SIZE & PAGE_MASK)) - return -ENOMEM; - address += PAGE_SIZE; - - /* Enforce stack_guard_gap */ - gap_addr = address + stack_guard_gap; - - /* Guard against overflow */ - if (gap_addr < address || gap_addr > TASK_SIZE) - gap_addr = TASK_SIZE; - - next = find_vma_intersection(mm, vma->vm_end, gap_addr); - if (next && vma_is_accessible(next)) { - if (!(next->vm_flags & VM_GROWSUP)) - return -ENOMEM; - /* Check that both stack segments have the same anon_vma? */ - } - - if (next) - vma_iter_prev_range_limit(&vmi, address); - - vma_iter_config(&vmi, vma->vm_start, address); - if (vma_iter_prealloc(&vmi, vma)) - return -ENOMEM; - - /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) { - vma_iter_free(&vmi); - return -ENOMEM; - } - - /* Lock the VMA before expanding to prevent concurrent page faults */ - vma_start_write(vma); - /* We update the anon VMA tree. */ - anon_vma_lock_write(vma->anon_vma); - - /* Somebody else might have raced and expanded it already */ - if (address > vma->vm_end) { - unsigned long size, grow; - - size = address - vma->vm_start; - grow = (address - vma->vm_end) >> PAGE_SHIFT; - - error = -ENOMEM; - if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; - vm_stat_account(mm, vma->vm_flags, grow); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_end = address; - /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); - anon_vma_interval_tree_post_update_vma(vma); - - perf_event_mmap(vma); - } - } - } - anon_vma_unlock_write(vma->anon_vma); - vma_iter_free(&vmi); - validate_mm(mm); - return error; -} -#endif /* CONFIG_STACK_GROWSUP */ - -/* - * vma is the first one with address < vma->vm_start. Have to extend vma. - * mmap_lock held for writing. - */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address) -{ - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *prev; - int error = 0; - VMA_ITERATOR(vmi, mm, vma->vm_start); - - if (!(vma->vm_flags & VM_GROWSDOWN)) - return -EFAULT; - - mmap_assert_write_locked(mm); - - address &= PAGE_MASK; - if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) - return -EPERM; - - /* Enforce stack_guard_gap */ - prev = vma_prev(&vmi); - /* Check that both stack segments have the same anon_vma? */ - if (prev) { - if (!(prev->vm_flags & VM_GROWSDOWN) && - vma_is_accessible(prev) && - (address - prev->vm_end < stack_guard_gap)) - return -ENOMEM; - } - - if (prev) - vma_iter_next_range_limit(&vmi, vma->vm_start); - - vma_iter_config(&vmi, address, vma->vm_end); - if (vma_iter_prealloc(&vmi, vma)) - return -ENOMEM; - - /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) { - vma_iter_free(&vmi); - return -ENOMEM; - } - - /* Lock the VMA before expanding to prevent concurrent page faults */ - vma_start_write(vma); - /* We update the anon VMA tree. */ - anon_vma_lock_write(vma->anon_vma); - - /* Somebody else might have raced and expanded it already */ - if (address < vma->vm_start) { - unsigned long size, grow; - - size = vma->vm_end - address; - grow = (vma->vm_start - address) >> PAGE_SHIFT; - - error = -ENOMEM; - if (grow <= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; - vm_stat_account(mm, vma->vm_flags, grow); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; - /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); - anon_vma_interval_tree_post_update_vma(vma); - - perf_event_mmap(vma); - } - } - } - anon_vma_unlock_write(vma->anon_vma); - vma_iter_free(&vmi); - validate_mm(mm); - return error; -} - /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; @@ -1320,58 +1065,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_vmi_munmap(&vmi, mm, start, len, uf, false); } -unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf) -{ - unsigned long ret; - bool writable_file_mapping = false; - - /* Check to see if MDWE is applicable. */ - if (map_deny_write_exec(vm_flags, vm_flags)) - return -EACCES; - - /* Allow architectures to sanity-check the vm_flags. */ - if (!arch_validate_flags(vm_flags)) - return -EINVAL; - - /* Map writable and ensure this isn't a sealed memfd. */ - if (file && is_shared_maywrite(vm_flags)) { - int error = mapping_map_writable(file->f_mapping); - - if (error) - return error; - writable_file_mapping = true; - } - - ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); - - /* Clear our write mapping regardless of error. */ - if (writable_file_mapping) - mapping_unmap_writable(file->f_mapping); - - validate_mm(current->mm); - return ret; -} - -static int __vm_munmap(unsigned long start, size_t len, bool unlock) -{ - int ret; - struct mm_struct *mm = current->mm; - LIST_HEAD(uf); - VMA_ITERATOR(vmi, mm, start); - - if (mmap_write_lock_killable(mm)) - return -EINTR; - - ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); - if (ret || !unlock) - mmap_write_unlock(mm); - - userfaultfd_unmap_complete(mm, &uf); - return ret; -} - int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); @@ -1507,89 +1200,7 @@ out: return ret; } -/* - * do_brk_flags() - Increase the brk vma if the flags match. - * @vmi: The vma iterator - * @addr: The start address - * @len: The length of the increase - * @vma: The vma, - * @flags: The VMA Flags - * - * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags - * do not match then create a new anonymous VMA. Eventually we may be able to - * do some brk-specific accounting here. - */ -static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long addr, unsigned long len, unsigned long flags) -{ - struct mm_struct *mm = current->mm; - - /* - * Check against address space limits by the changed size - * Note: This happens *after* clearing old mappings in some code paths. - */ - flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) - return -ENOMEM; - - if (mm->map_count > sysctl_max_map_count) - return -ENOMEM; - - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) - return -ENOMEM; - - /* - * Expand the existing vma if possible; Note that singular lists do not - * occur after forking, so the expand will only happen on new VMAs. - */ - if (vma && vma->vm_end == addr) { - VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); - - vmg.prev = vma; - /* vmi is positioned at prev, which this mode expects. */ - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; - - if (vma_merge_new_range(&vmg)) - goto out; - else if (vmg_nomem(&vmg)) - goto unacct_fail; - } - - if (vma) - vma_iter_next_range(vmi); - /* create a vma struct for an anonymous mapping */ - vma = vm_area_alloc(mm); - if (!vma) - goto unacct_fail; - - vma_set_anonymous(vma); - vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); - vm_flags_init(vma, flags); - vma->vm_page_prot = vm_get_page_prot(flags); - vma_start_write(vma); - if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) - goto mas_store_fail; - - mm->map_count++; - validate_mm(mm); - ksm_add_vma(vma); -out: - perf_event_mmap(vma); - mm->total_vm += len >> PAGE_SHIFT; - mm->data_vm += len >> PAGE_SHIFT; - if (flags & VM_LOCKED) - mm->locked_vm += (len >> PAGE_SHIFT); - vm_flags_set(vma, VM_SOFTDIRTY); - return 0; - -mas_store_fail: - vm_area_free(vma); -unacct_fail: - vm_unacct_memory(len >> PAGE_SHIFT); - return -ENOMEM; -} - -int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) +int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; @@ -1606,7 +1217,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) return 0; /* Until we need other flags, refuse anything except VM_EXEC. */ - if ((flags & (~VM_EXEC)) != 0) + if ((vm_flags & (~VM_EXEC)) != 0) return -EINVAL; if (mmap_write_lock_killable(mm)) @@ -1621,7 +1232,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) goto munmap_failed; vma = vma_prev(&vmi); - ret = do_brk_flags(&vmi, vma, addr, len, flags); + ret = do_brk_flags(&vmi, vma, addr, len, vm_flags); populate = ((mm->def_flags & VM_LOCKED) != 0); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); @@ -1659,19 +1270,18 @@ void exit_mmap(struct mm_struct *mm) goto destroy; } - lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu_fullmm(&tlb, mm); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ - unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); + unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX); mmap_read_unlock(mm); /* * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper * because the memory has been already freed. */ - set_bit(MMF_OOM_SKIP, &mm->flags); + mm_flags_set(MMF_OOM_SKIP, mm); mmap_write_lock(mm); mt_clear_in_rcu(&mm->mm_mt); vma_iter_set(&vmi, vma->vm_end); @@ -1688,7 +1298,8 @@ void exit_mmap(struct mm_struct *mm) do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - remove_vma(vma, /* unreachable = */ true); + vma_mark_detached(vma); + remove_vma(vma); count++; cond_resched(); vma = vma_next(&vmi); @@ -1703,48 +1314,6 @@ destroy: vm_unacct_memory(nr_accounted); } -/* Insert vm structure into process list sorted by address - * and into the inode's i_mmap tree. If vm_file is non-NULL - * then i_mmap_rwsem is taken here. - */ -int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) -{ - unsigned long charged = vma_pages(vma); - - - if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) - return -ENOMEM; - - if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory_mm(mm, charged)) - return -ENOMEM; - - /* - * The vm_pgoff of a purely anonymous vma should be irrelevant - * until its first write fault, when page's anon_vma and index - * are set. But now set the vm_pgoff it will almost certainly - * end up with (unless mremap moves it elsewhere before that - * first wfault), so /proc/pid/maps tells a consistent story. - * - * By setting it to reflect the virtual start address of the - * vma, merges and splits can happen in a seamless way, just - * using the existing file pgoff checks and manipulations. - * Similarly in do_mmap and in do_brk_flags. - */ - if (vma_is_anonymous(vma)) { - BUG_ON(vma->anon_vma); - vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; - } - - if (vma_link(mm, vma)) { - if (vma->vm_flags & VM_ACCOUNT) - vm_unacct_memory(charged); - return -ENOMEM; - } - - return 0; -} - /* * Return true if the calling process may expand its vm space by the passed * number of pages @@ -1868,7 +1437,7 @@ static vm_fault_t special_mapping_fault(struct vm_fault *vmf) static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, - unsigned long vm_flags, void *priv, + vm_flags_t vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; @@ -1879,8 +1448,10 @@ static struct vm_area_struct *__install_special_mapping( return ERR_PTR(-ENOMEM); vma_set_range(vma, addr, addr + len, 0); - vm_flags_init(vma, (vm_flags | mm->def_flags | - VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); + vm_flags |= mm->def_flags | VM_DONTEXPAND; + if (pgtable_supports_soft_dirty()) + vm_flags |= VM_SOFTDIRTY; + vm_flags_init(vma, vm_flags & ~VM_LOCKED_MASK); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; @@ -1920,14 +1491,65 @@ bool vma_is_special_mapping(const struct vm_area_struct *vma, struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, - unsigned long vm_flags, const struct vm_special_mapping *spec) + vm_flags_t vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } +#ifdef CONFIG_SYSCTL +#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ + defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) +int sysctl_legacy_va_layout; +#endif + +static const struct ctl_table mmap_table[] = { + { + .procname = "max_map_count", + .data = &sysctl_max_map_count, + .maxlen = sizeof(sysctl_max_map_count), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, +#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ + defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) + { + .procname = "legacy_va_layout", + .data = &sysctl_legacy_va_layout, + .maxlen = sizeof(sysctl_legacy_va_layout), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS + { + .procname = "mmap_rnd_bits", + .data = &mmap_rnd_bits, + .maxlen = sizeof(mmap_rnd_bits), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *)&mmap_rnd_bits_min, + .extra2 = (void *)&mmap_rnd_bits_max, + }, +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS + { + .procname = "mmap_rnd_compat_bits", + .data = &mmap_rnd_compat_bits, + .maxlen = sizeof(mmap_rnd_compat_bits), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *)&mmap_rnd_compat_bits_min, + .extra2 = (void *)&mmap_rnd_compat_bits_max, + }, +#endif +}; +#endif /* CONFIG_SYSCTL */ + /* - * initialise the percpu counter for VM + * initialise the percpu counter for VM, initialise VMA state. */ void __init mmap_init(void) { @@ -1935,6 +1557,10 @@ void __init mmap_init(void) ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); +#ifdef CONFIG_SYSCTL + register_sysctl_init("vm", mmap_table); +#endif + vma_state_init(); } /* @@ -2046,84 +1672,211 @@ static int __meminit init_reserve_notifier(void) subsys_initcall(init_reserve_notifier); /* - * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between - * this VMA and its relocated range, which will now reside at [vma->vm_start - - * shift, vma->vm_end - shift). + * Obtain a read lock on mm->mmap_lock, if the specified address is below the + * start of the VMA, the intent is to perform a write, and it is a + * downward-growing stack, then attempt to expand the stack to contain it. + * + * This function is intended only for obtaining an argument page from an ELF + * image, and is almost certainly NOT what you want to use for any other + * purpose. + * + * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the + * VMA referenced must not be linked in any user-visible tree, i.e. it must be a + * new VMA being mapped. + * + * The function assumes that addr is either contained within the VMA or below + * it, and makes no attempt to validate this value beyond that. * - * This function is almost certainly NOT what you want for anything other than - * early executable temporary stack relocation. + * Returns true if the read lock was obtained and a stack was perhaps expanded, + * false if the stack expansion failed. + * + * On stack expansion the function temporarily acquires an mmap write lock + * before downgrading it. */ -int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) +bool mmap_read_lock_maybe_expand(struct mm_struct *mm, + struct vm_area_struct *new_vma, + unsigned long addr, bool write) { - /* - * The process proceeds as follows: - * - * 1) Use shift to calculate the new vma endpoints. - * 2) Extend vma to cover both the old and new ranges. This ensures the - * arguments passed to subsequent functions are consistent. - * 3) Move vma's page tables to the new range. - * 4) Free up any cleared pgd range. - * 5) Shrink the vma to cover only the new range. - */ + if (!write || addr >= new_vma->vm_start) { + mmap_read_lock(mm); + return true; + } - struct mm_struct *mm = vma->vm_mm; - unsigned long old_start = vma->vm_start; - unsigned long old_end = vma->vm_end; - unsigned long length = old_end - old_start; - unsigned long new_start = old_start - shift; - unsigned long new_end = old_end - shift; - VMA_ITERATOR(vmi, mm, new_start); - VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); - struct vm_area_struct *next; - struct mmu_gather tlb; + if (!(new_vma->vm_flags & VM_GROWSDOWN)) + return false; - BUG_ON(new_start > new_end); + mmap_write_lock(mm); + if (expand_downwards(new_vma, addr)) { + mmap_write_unlock(mm); + return false; + } - /* - * ensure there are no vmas between where we want to go - * and where we are - */ - if (vma != vma_next(&vmi)) - return -EFAULT; + mmap_write_downgrade(mm); + return true; +} - vma_iter_prev_range(&vmi); - /* - * cover the whole range: [new_start, old_end) - */ - vmg.vma = vma; - if (vma_expand(&vmg)) - return -ENOMEM; +__latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +{ + struct vm_area_struct *mpnt, *tmp; + int retval; + unsigned long charge = 0; + LIST_HEAD(uf); + VMA_ITERATOR(vmi, mm, 0); + if (mmap_write_lock_killable(oldmm)) + return -EINTR; + flush_cache_dup_mm(oldmm); + uprobe_dup_mmap(oldmm, mm); /* - * move the page tables downwards, on failure we rely on - * process cleanup to remove whatever mess we made. + * Not linked in yet - no deadlock potential: */ - if (length != move_page_tables(vma, old_start, - vma, new_start, length, false, true)) - return -ENOMEM; + mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); + + /* No ordering required: file already has been exposed. */ + dup_mm_exe_file(mm, oldmm); + + mm->total_vm = oldmm->total_vm; + mm->data_vm = oldmm->data_vm; + mm->exec_vm = oldmm->exec_vm; + mm->stack_vm = oldmm->stack_vm; + + /* Use __mt_dup() to efficiently build an identical maple tree. */ + retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL); + if (unlikely(retval)) + goto out; - lru_add_drain(); - tlb_gather_mmu(&tlb, mm); - next = vma_next(&vmi); - if (new_end > old_start) { + mt_clear_in_rcu(vmi.mas.tree); + for_each_vma(vmi, mpnt) { + struct file *file; + + retval = vma_start_write_killable(mpnt); + if (retval < 0) + goto loop_out; + if (mpnt->vm_flags & VM_DONTCOPY) { + retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, + mpnt->vm_end, GFP_KERNEL); + if (retval) + goto loop_out; + + vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); + continue; + } + charge = 0; + if (mpnt->vm_flags & VM_ACCOUNT) { + unsigned long len = vma_pages(mpnt); + + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ + goto fail_nomem; + charge = len; + } + + tmp = vm_area_dup(mpnt); + if (!tmp) + goto fail_nomem; + retval = vma_dup_policy(mpnt, tmp); + if (retval) + goto fail_nomem_policy; + tmp->vm_mm = mm; + retval = dup_userfaultfd(tmp, &uf); + if (retval) + goto fail_nomem_anon_vma_fork; + if (tmp->vm_flags & VM_WIPEONFORK) { + /* + * VM_WIPEONFORK gets a clean slate in the child. + * Don't prepare anon_vma until fault since we don't + * copy page for current vma. + */ + tmp->anon_vma = NULL; + } else if (anon_vma_fork(tmp, mpnt)) + goto fail_nomem_anon_vma_fork; + vm_flags_clear(tmp, VM_LOCKED_MASK); /* - * when the old and new regions overlap clear from new_end. + * Copy/update hugetlb private vma information. */ - free_pgd_range(&tlb, new_end, old_end, new_end, - next ? next->vm_start : USER_PGTABLES_CEILING); + if (is_vm_hugetlb_page(tmp)) + hugetlb_dup_vma_private(tmp); + + /* + * Link the vma into the MT. After using __mt_dup(), memory + * allocation is not necessary here, so it cannot fail. + */ + vma_iter_bulk_store(&vmi, tmp); + + mm->map_count++; + + if (tmp->vm_ops && tmp->vm_ops->open) + tmp->vm_ops->open(tmp); + + file = tmp->vm_file; + if (file) { + struct address_space *mapping = file->f_mapping; + + get_file(file); + i_mmap_lock_write(mapping); + if (vma_is_shared_maywrite(tmp)) + mapping_allow_writable(mapping); + flush_dcache_mmap_lock(mapping); + /* insert tmp into the share list, just after mpnt */ + vma_interval_tree_insert_after(tmp, mpnt, + &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); + i_mmap_unlock_write(mapping); + } + + if (!(tmp->vm_flags & VM_WIPEONFORK)) + retval = copy_page_range(tmp, mpnt); + + if (retval) { + mpnt = vma_next(&vmi); + goto loop_out; + } + } + /* a new mm has just been created */ + retval = arch_dup_mmap(oldmm, mm); +loop_out: + vma_iter_free(&vmi); + if (!retval) { + mt_set_in_rcu(vmi.mas.tree); + ksm_fork(mm, oldmm); + khugepaged_fork(mm, oldmm); } else { + /* - * otherwise, clean from old_start; this is done to not touch - * the address space in [new_end, old_start) some architectures - * have constraints on va-space that make this illegal (IA64) - - * for the others its just a little faster. + * The entire maple tree has already been duplicated. If the + * mmap duplication fails, mark the failure point with + * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, + * stop releasing VMAs that have not been duplicated after this + * point. */ - free_pgd_range(&tlb, old_start, old_end, new_end, - next ? next->vm_start : USER_PGTABLES_CEILING); + if (mpnt) { + mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); + mas_store(&vmi.mas, XA_ZERO_ENTRY); + /* Avoid OOM iterating a broken tree */ + mm_flags_set(MMF_OOM_SKIP, mm); + } + /* + * The mm_struct is going to exit, but the locks will be dropped + * first. Set the mm_struct as unstable is advisable as it is + * not fully initialised. + */ + mm_flags_set(MMF_UNSTABLE, mm); } - tlb_finish_mmu(&tlb); +out: + mmap_write_unlock(mm); + flush_tlb_mm(oldmm); + mmap_write_unlock(oldmm); + if (!retval) + dup_userfaultfd_complete(&uf); + else + dup_userfaultfd_fail(&uf); + return retval; - vma_prev(&vmi); - /* Shrink the vma to just the new range */ - return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); +fail_nomem_anon_vma_fork: + mpol_put(vma_policy(tmp)); +fail_nomem_policy: + vm_area_free(tmp); +fail_nomem: + retval = -ENOMEM; + vm_unacct_memory(charge); + goto loop_out; } |
