summaryrefslogtreecommitdiff
path: root/mm/util.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/util.c')
-rw-r--r--mm/util.c885
1 files changed, 630 insertions, 255 deletions
diff --git a/mm/util.c b/mm/util.c
index 7e43369064c8..97cae40c0209 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -12,6 +12,7 @@
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/sysctl.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
@@ -23,10 +24,15 @@
#include <linux/processor.h>
#include <linux/sizes.h>
#include <linux/compat.h>
+#include <linux/fsnotify.h>
+#include <linux/page_idle.h>
#include <linux/uaccess.h>
+#include <kunit/visibility.h>
+
#include "internal.h"
+#include "swap.h"
/**
* kfree_const - conditionally free memory
@@ -42,26 +48,41 @@ void kfree_const(const void *x)
EXPORT_SYMBOL(kfree_const);
/**
- * kstrdup - allocate space for and copy an existing string
- * @s: the string to duplicate
+ * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated.
+ * @s: The data to copy
+ * @len: The size of the data, not including the NUL terminator
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
- * Return: newly allocated copy of @s or %NULL in case of error
+ * Return: newly allocated copy of @s with NUL-termination or %NULL in
+ * case of error
*/
-char *kstrdup(const char *s, gfp_t gfp)
+static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
- size_t len;
char *buf;
- if (!s)
+ /* '+1' for the NUL terminator */
+ buf = kmalloc_track_caller(len + 1, gfp);
+ if (!buf)
return NULL;
- len = strlen(s) + 1;
- buf = kmalloc_track_caller(len, gfp);
- if (buf)
- memcpy(buf, s, len);
+ memcpy(buf, s, len);
+ /* Ensure the buf is always NUL-terminated, regardless of @s. */
+ buf[len] = '\0';
return buf;
}
+
+/**
+ * kstrdup - allocate space for and copy an existing string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Return: newly allocated copy of @s or %NULL in case of error
+ */
+noinline
+char *kstrdup(const char *s, gfp_t gfp)
+{
+ return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL;
+}
EXPORT_SYMBOL(kstrdup);
/**
@@ -96,19 +117,7 @@ EXPORT_SYMBOL(kstrdup_const);
*/
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
- size_t len;
- char *buf;
-
- if (!s)
- return NULL;
-
- len = strnlen(s, max);
- buf = kmalloc_track_caller(len+1, gfp);
- if (buf) {
- memcpy(buf, s, len);
- buf[len] = '\0';
- }
- return buf;
+ return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL;
}
EXPORT_SYMBOL(kstrndup);
@@ -119,18 +128,57 @@ EXPORT_SYMBOL(kstrndup);
* @len: memory region length
* @gfp: GFP mask to use
*
- * Return: newly allocated copy of @src or %NULL in case of error
+ * Return: newly allocated copy of @src or %NULL in case of error,
+ * result is physically contiguous. Use kfree() to free.
+ */
+void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
+{
+ void *p;
+
+ p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+EXPORT_SYMBOL(kmemdup_noprof);
+
+/**
+ * kmemdup_array - duplicate a given array.
+ *
+ * @src: array to duplicate.
+ * @count: number of elements to duplicate from array.
+ * @element_size: size of each element of array.
+ * @gfp: GFP mask to use.
+ *
+ * Return: duplicated array of @src or %NULL in case of error,
+ * result is physically contiguous. Use kfree() to free.
+ */
+void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
+{
+ return kmemdup(src, size_mul(element_size, count), gfp);
+}
+EXPORT_SYMBOL(kmemdup_array);
+
+/**
+ * kvmemdup - duplicate region of memory
+ *
+ * @src: memory region to duplicate
+ * @len: memory region length
+ * @gfp: GFP mask to use
+ *
+ * Return: newly allocated copy of @src or %NULL in case of error,
+ * result may be not physically contiguous. Use kvfree() to free.
*/
-void *kmemdup(const void *src, size_t len, gfp_t gfp)
+void *kvmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
- p = kmalloc_track_caller(len, gfp);
+ p = kvmalloc(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}
-EXPORT_SYMBOL(kmemdup);
+EXPORT_SYMBOL(kvmemdup);
/**
* kmemdup_nul - Create a NUL-terminated string from unterminated data
@@ -143,19 +191,19 @@ EXPORT_SYMBOL(kmemdup);
*/
char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
- char *buf;
+ return s ? __kmemdup_nul(s, len, gfp) : NULL;
+}
+EXPORT_SYMBOL(kmemdup_nul);
- if (!s)
- return NULL;
+static kmem_buckets *user_buckets __ro_after_init;
- buf = kmalloc_track_caller(len + 1, gfp);
- if (buf) {
- memcpy(buf, s, len);
- buf[len] = '\0';
- }
- return buf;
+static int __init init_user_buckets(void)
+{
+ user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
+
+ return 0;
}
-EXPORT_SYMBOL(kmemdup_nul);
+subsys_initcall(init_user_buckets);
/**
* memdup_user - duplicate memory region from user space
@@ -170,7 +218,7 @@ void *memdup_user(const void __user *src, size_t len)
{
void *p;
- p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
+ p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -196,7 +244,7 @@ void *vmemdup_user(const void __user *src, size_t len)
{
void *p;
- p = kvmalloc(len, GFP_USER);
+ p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -252,12 +300,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
{
char *p;
- /*
- * Always use GFP_KERNEL, since copy_from_user() can sleep and
- * cause pagefault, which makes it pointless to use GFP_NOFS
- * or GFP_ATOMIC.
- */
- p = kmalloc_track_caller(len + 1, GFP_KERNEL);
+ p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -271,40 +314,8 @@ void *memdup_user_nul(const void __user *src, size_t len)
}
EXPORT_SYMBOL(memdup_user_nul);
-void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev)
-{
- struct vm_area_struct *next;
-
- vma->vm_prev = prev;
- if (prev) {
- next = prev->vm_next;
- prev->vm_next = vma;
- } else {
- next = mm->mmap;
- mm->mmap = vma;
- }
- vma->vm_next = next;
- if (next)
- next->vm_prev = vma;
-}
-
-void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- struct vm_area_struct *prev, *next;
-
- next = vma->vm_next;
- prev = vma->vm_prev;
- if (prev)
- prev->vm_next = next;
- else
- mm->mmap = next;
- if (next)
- next->vm_prev = prev;
-}
-
/* Check if the vma is being used as a stack by this task */
-int vma_is_stack_for_current(struct vm_area_struct *vma)
+int vma_is_stack_for_current(const struct vm_area_struct *vma)
{
struct task_struct * __maybe_unused t = current;
@@ -343,8 +354,40 @@ unsigned long randomize_stack_top(unsigned long stack_top)
#endif
}
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
+ */
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
+ }
+
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
+}
+
#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
-unsigned long arch_randomize_brk(struct mm_struct *mm)
+unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
{
/* Is the current task 32bit ? */
if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
@@ -367,12 +410,15 @@ unsigned long arch_mmap_rnd(void)
return rnd << PAGE_SHIFT;
}
-static int mmap_is_legacy(struct rlimit *rlim_stack)
+static int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
- if (rlim_stack->rlim_cur == RLIM_INFINITY)
+ /* On parisc the stack always grows up - so a unlimited stack should
+ * not be an indicator to use the legacy memory layout. */
+ if (rlim_stack->rlim_cur == RLIM_INFINITY &&
+ !IS_ENABLED(CONFIG_STACK_GROWSUP))
return 1;
return sysctl_legacy_va_layout;
@@ -385,8 +431,17 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP / 6 * 5)
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+static unsigned long mmap_base(const unsigned long rnd, const struct rlimit *rlim_stack)
{
+#ifdef CONFIG_STACK_GROWSUP
+ /*
+ * For an upwards growing stack the calculation is much simpler.
+ * Memory for the maximum stack size is reserved at the top of the
+ * task. mmap_base starts directly below the stack and grows
+ * downwards.
+ */
+ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
+#else
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_guard_gap;
@@ -398,15 +453,16 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
if (gap + pad > gap)
gap += pad;
- if (gap < MIN_GAP)
+ if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(STACK_TOP - gap - rnd);
+#endif
}
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
@@ -415,19 +471,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
+ mm_flags_clear(MMF_TOPDOWN, mm);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm_flags_set(MMF_TOPDOWN, mm);
}
}
#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
+ mm_flags_clear(MMF_TOPDOWN, mm);
}
#endif
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
+#endif
/**
* __account_locked_vm - account locked pages to an mm's locked_vm
@@ -445,7 +504,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
* * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
*/
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
- struct task_struct *task, bool bypass_rlim)
+ const struct task_struct *task, bool bypass_rlim)
{
unsigned long locked_vm, limit;
int ret = 0;
@@ -507,16 +566,19 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
{
+ loff_t off = (loff_t)pgoff << PAGE_SHIFT;
unsigned long ret;
struct mm_struct *mm = current->mm;
unsigned long populate;
LIST_HEAD(uf);
ret = security_mmap_file(file, prot, flag);
+ if (!ret)
+ ret = fsnotify_mmap_perm(file, prot, off, len);
if (!ret) {
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
+ ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
&uf);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
@@ -526,6 +588,23 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
return ret;
}
+/*
+ * Perform a userland memory mapping into the current process address space. See
+ * the comment for do_mmap() for more details on this operation in general.
+ *
+ * This differs from do_mmap() in that:
+ *
+ * a. An offset parameter is provided rather than pgoff, which is both checked
+ * for overflow and page alignment.
+ * b. mmap locking is performed on the caller's behalf.
+ * c. Userfaultfd unmap events and memory population are handled.
+ *
+ * This means that this function performs essentially the same work as if
+ * userland were invoking mmap (2).
+ *
+ * Returns either an error, or the address at which the requested mapping has
+ * been performed.
+ */
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
@@ -540,153 +619,62 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
EXPORT_SYMBOL(vm_mmap);
/**
- * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
- * failure, fall back to non-contiguous (vmalloc) allocation.
- * @size: size of the request.
- * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
- * @node: numa node to allocate from
- *
- * Uses kmalloc to get the memory but if the allocation fails then falls back
- * to the vmalloc allocator. Use kvfree for freeing the memory.
- *
- * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
- * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
- * preferable to the vmalloc fallback, due to visible performance drawbacks.
- *
- * Return: pointer to the allocated memory of %NULL in case of failure
+ * __vmalloc_array - allocate memory for a virtually contiguous array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
*/
-void *kvmalloc_node(size_t size, gfp_t flags, int node)
+void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
{
- gfp_t kmalloc_flags = flags;
- void *ret;
-
- /*
- * We want to attempt a large physically contiguous block first because
- * it is less likely to fragment multiple larger blocks and therefore
- * contribute to a long term fragmentation less than vmalloc fallback.
- * However make sure that larger requests are not too disruptive - no
- * OOM killer and no allocation failure warnings as we have a fallback.
- */
- if (size > PAGE_SIZE) {
- kmalloc_flags |= __GFP_NOWARN;
+ size_t bytes;
- if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
- kmalloc_flags |= __GFP_NORETRY;
-
- /* nofail semantic is implemented by the vmalloc fallback */
- kmalloc_flags &= ~__GFP_NOFAIL;
- }
-
- ret = kmalloc_node(size, kmalloc_flags, node);
-
- /*
- * It doesn't really make sense to fallback to vmalloc for sub page
- * requests
- */
- if (ret || size <= PAGE_SIZE)
- return ret;
-
- /* Don't even allow crazy sizes */
- if (WARN_ON_ONCE(size > INT_MAX))
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
-
- return __vmalloc_node(size, 1, flags, node,
- __builtin_return_address(0));
+ return __vmalloc_noprof(bytes, flags);
}
-EXPORT_SYMBOL(kvmalloc_node);
+EXPORT_SYMBOL(__vmalloc_array_noprof);
/**
- * kvfree() - Free memory.
- * @addr: Pointer to allocated memory.
- *
- * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
- * It is slightly more efficient to use kfree() or vfree() if you are certain
- * that you know which one to use.
- *
- * Context: Either preemptible task context or not-NMI interrupt.
+ * vmalloc_array - allocate memory for a virtually contiguous array.
+ * @n: number of elements.
+ * @size: element size.
*/
-void kvfree(const void *addr)
+void *vmalloc_array_noprof(size_t n, size_t size)
{
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
+ return __vmalloc_array_noprof(n, size, GFP_KERNEL);
}
-EXPORT_SYMBOL(kvfree);
+EXPORT_SYMBOL(vmalloc_array_noprof);
/**
- * kvfree_sensitive - Free a data object containing sensitive information.
- * @addr: address of the data object to be freed.
- * @len: length of the data object.
- *
- * Use the special memzero_explicit() function to clear the content of a
- * kvmalloc'ed object containing sensitive data to make sure that the
- * compiler won't optimize out the data clearing.
+ * __vcalloc - allocate and zero memory for a virtually contiguous array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
*/
-void kvfree_sensitive(const void *addr, size_t len)
+void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{
- if (likely(!ZERO_OR_NULL_PTR(addr))) {
- memzero_explicit((void *)addr, len);
- kvfree(addr);
- }
-}
-EXPORT_SYMBOL(kvfree_sensitive);
-
-void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
-{
- void *newp;
-
- if (oldsize >= newsize)
- return (void *)p;
- newp = kvmalloc(newsize, flags);
- if (!newp)
- return NULL;
- memcpy(newp, p, oldsize);
- kvfree(p);
- return newp;
-}
-EXPORT_SYMBOL(kvrealloc);
-
-/* Neutral page->mapping pointer to address_space or anon_vma or other */
-void *page_rmapping(struct page *page)
-{
- return folio_raw_mapping(page_folio(page));
+ return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
}
+EXPORT_SYMBOL(__vcalloc_noprof);
/**
- * folio_mapped - Is this folio mapped into userspace?
- * @folio: The folio.
- *
- * Return: True if any page in this folio is referenced by user page tables.
+ * vcalloc - allocate and zero memory for a virtually contiguous array.
+ * @n: number of elements.
+ * @size: element size.
*/
-bool folio_mapped(struct folio *folio)
+void *vcalloc_noprof(size_t n, size_t size)
{
- long i, nr;
-
- if (!folio_test_large(folio))
- return atomic_read(&folio->_mapcount) >= 0;
- if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
- return true;
- if (folio_test_hugetlb(folio))
- return false;
-
- nr = folio_nr_pages(folio);
- for (i = 0; i < nr; i++) {
- if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
- return true;
- }
- return false;
+ return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
}
-EXPORT_SYMBOL(folio_mapped);
+EXPORT_SYMBOL(vcalloc_noprof);
-struct anon_vma *page_anon_vma(struct page *page)
+struct anon_vma *folio_anon_vma(const struct folio *folio)
{
- struct folio *folio = page_folio(page);
unsigned long mapping = (unsigned long)folio->mapping;
- if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+ if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
return NULL;
- return (void *)(mapping - PAGE_MAPPING_ANON);
+ return (void *)(mapping - FOLIO_MAPPING_ANON);
}
/**
@@ -701,7 +689,7 @@ struct anon_vma *page_anon_vma(struct page *page)
* You can call this for folios which aren't in the swap cache or page
* cache and it will return NULL.
*/
-struct address_space *folio_mapping(struct folio *folio)
+struct address_space *folio_mapping(const struct folio *folio)
{
struct address_space *mapping;
@@ -710,36 +698,16 @@ struct address_space *folio_mapping(struct folio *folio)
return NULL;
if (unlikely(folio_test_swapcache(folio)))
- return swap_address_space(folio_swap_entry(folio));
+ return swap_address_space(folio->swap);
mapping = folio->mapping;
- if ((unsigned long)mapping & PAGE_MAPPING_ANON)
+ if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS)
return NULL;
- return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
+ return mapping;
}
EXPORT_SYMBOL(folio_mapping);
-/* Slow path of page_mapcount() for compound pages */
-int __page_mapcount(struct page *page)
-{
- int ret;
-
- ret = atomic_read(&page->_mapcount) + 1;
- /*
- * For file THP page->_mapcount contains total number of mapping
- * of the page: no need to look into compound_mapcount.
- */
- if (!PageAnon(page) && !PageHuge(page))
- return ret;
- page = compound_head(page);
- ret += atomic_read(compound_mapcount_ptr(page)) + 1;
- if (PageDoubleMap(page))
- ret--;
- return ret;
-}
-EXPORT_SYMBOL_GPL(__page_mapcount);
-
/**
* folio_copy - Copy the contents of one folio to another.
* @dst: Folio to copy to.
@@ -762,16 +730,36 @@ void folio_copy(struct folio *dst, struct folio *src)
cond_resched();
}
}
+EXPORT_SYMBOL(folio_copy);
+
+int folio_mc_copy(struct folio *dst, struct folio *src)
+{
+ long nr = folio_nr_pages(src);
+ long i = 0;
+
+ for (;;) {
+ if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
+ return -EHWPOISON;
+ if (++i == nr)
+ break;
+ cond_resched();
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(folio_mc_copy);
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
-int sysctl_overcommit_ratio __read_mostly = 50;
-unsigned long sysctl_overcommit_kbytes __read_mostly;
+static int sysctl_overcommit_ratio __read_mostly = 50;
+static unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
-int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+#ifdef CONFIG_SYSCTL
+
+static int overcommit_ratio_handler(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -786,8 +774,8 @@ static void sync_overcommit_as(struct work_struct *dummy)
percpu_counter_sync(&vm_committed_as);
}
-int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+static int overcommit_policy_handler(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int new_policy = -1;
@@ -822,8 +810,8 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
return ret;
}
-int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+static int overcommit_kbytes_handler(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -833,6 +821,54 @@ int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
return ret;
}
+static const struct ctl_table util_sysctl_table[] = {
+ {
+ .procname = "overcommit_memory",
+ .data = &sysctl_overcommit_memory,
+ .maxlen = sizeof(sysctl_overcommit_memory),
+ .mode = 0644,
+ .proc_handler = overcommit_policy_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "overcommit_ratio",
+ .data = &sysctl_overcommit_ratio,
+ .maxlen = sizeof(sysctl_overcommit_ratio),
+ .mode = 0644,
+ .proc_handler = overcommit_ratio_handler,
+ },
+ {
+ .procname = "overcommit_kbytes",
+ .data = &sysctl_overcommit_kbytes,
+ .maxlen = sizeof(sysctl_overcommit_kbytes),
+ .mode = 0644,
+ .proc_handler = overcommit_kbytes_handler,
+ },
+ {
+ .procname = "user_reserve_kbytes",
+ .data = &sysctl_user_reserve_kbytes,
+ .maxlen = sizeof(sysctl_user_reserve_kbytes),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "admin_reserve_kbytes",
+ .data = &sysctl_admin_reserve_kbytes,
+ .maxlen = sizeof(sysctl_admin_reserve_kbytes),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+};
+
+static int __init init_vm_util_sysctls(void)
+{
+ register_sysctl_init("vm", util_sysctl_table);
+ return 0;
+}
+subsys_initcall(init_vm_util_sysctls);
+#endif /* CONFIG_SYSCTL */
+
/*
* Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
*/
@@ -881,7 +917,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
* succeed and -ENOMEM implies there is not.
*
* We currently support three overcommit policies, which are set via the
- * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
+ * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
*
* Strict overcommit modes added 2002 Feb 26 by Alan Cox.
* Additional code 2002 Jul 20 by Robert Love.
@@ -891,9 +927,10 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
* Note this is a helper function intended to be used by LSMs which
* wish to use this logic.
*/
-int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin)
{
long allowed;
+ unsigned long bytes_failed;
vm_acct_memory(pages);
@@ -928,6 +965,9 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
return 0;
error:
+ bytes_failed = pages << PAGE_SHIFT;
+ pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
+ __func__, current->pid, current->comm, bytes_failed);
vm_unacct_memory(pages);
return -ENOMEM;
@@ -997,11 +1037,11 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
char *addr1, *addr2;
int ret;
- addr1 = kmap_atomic(page1);
- addr2 = kmap_atomic(page2);
+ addr1 = kmap_local_page(page1);
+ addr2 = kmap_local_page(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
- kunmap_atomic(addr2);
- kunmap_atomic(addr1);
+ kunmap_local(addr2);
+ kunmap_local(addr1);
return ret;
}
@@ -1021,15 +1061,15 @@ void mem_dump_obj(void *object)
{
const char *type;
- if (kmem_valid_obj(object)) {
- kmem_dump_obj(object);
+ if (kmem_dump_obj(object))
return;
- }
if (vmalloc_dump_obj(object))
return;
- if (virt_addr_valid(object))
+ if (is_vmalloc_addr(object))
+ type = "vmalloc memory";
+ else if (virt_addr_valid(object))
type = "non-slab/vmalloc memory";
else if (object == NULL)
type = "NULL pointer";
@@ -1083,7 +1123,7 @@ void page_offline_end(void)
}
EXPORT_SYMBOL(page_offline_end);
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+#ifndef flush_dcache_folio
void flush_dcache_folio(struct folio *folio)
{
long i, nr = folio_nr_pages(folio);
@@ -1093,3 +1133,338 @@ void flush_dcache_folio(struct folio *folio)
}
EXPORT_SYMBOL(flush_dcache_folio);
#endif
+
+/**
+ * __compat_vma_mmap() - See description for compat_vma_mmap()
+ * for details. This is the same operation, only with a specific file operations
+ * struct which may or may not be the same as vma->vm_file->f_op.
+ * @f_op: The file operations whose .mmap_prepare() hook is specified.
+ * @file: The file which backs or will back the mapping.
+ * @vma: The VMA to apply the .mmap_prepare() hook to.
+ * Returns: 0 on success or error.
+ */
+int __compat_vma_mmap(const struct file_operations *f_op,
+ struct file *file, struct vm_area_struct *vma)
+{
+ struct vm_area_desc desc = {
+ .mm = vma->vm_mm,
+ .file = file,
+ .start = vma->vm_start,
+ .end = vma->vm_end,
+
+ .pgoff = vma->vm_pgoff,
+ .vm_file = vma->vm_file,
+ .vm_flags = vma->vm_flags,
+ .page_prot = vma->vm_page_prot,
+
+ .action.type = MMAP_NOTHING, /* Default */
+ };
+ int err;
+
+ err = f_op->mmap_prepare(&desc);
+ if (err)
+ return err;
+
+ mmap_action_prepare(&desc.action, &desc);
+ set_vma_from_desc(vma, &desc);
+ return mmap_action_complete(&desc.action, vma);
+}
+EXPORT_SYMBOL(__compat_vma_mmap);
+
+/**
+ * compat_vma_mmap() - Apply the file's .mmap_prepare() hook to an
+ * existing VMA and execute any requested actions.
+ * @file: The file which possesss an f_op->mmap_prepare() hook.
+ * @vma: The VMA to apply the .mmap_prepare() hook to.
+ *
+ * Ordinarily, .mmap_prepare() is invoked directly upon mmap(). However, certain
+ * stacked filesystems invoke a nested mmap hook of an underlying file.
+ *
+ * Until all filesystems are converted to use .mmap_prepare(), we must be
+ * conservative and continue to invoke these stacked filesystems using the
+ * deprecated .mmap() hook.
+ *
+ * However we have a problem if the underlying file system possesses an
+ * .mmap_prepare() hook, as we are in a different context when we invoke the
+ * .mmap() hook, already having a VMA to deal with.
+ *
+ * compat_vma_mmap() is a compatibility function that takes VMA state,
+ * establishes a struct vm_area_desc descriptor, passes to the underlying
+ * .mmap_prepare() hook and applies any changes performed by it.
+ *
+ * Once the conversion of filesystems is complete this function will no longer
+ * be required and will be removed.
+ *
+ * Returns: 0 on success or error.
+ */
+int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return __compat_vma_mmap(file->f_op, file, vma);
+}
+EXPORT_SYMBOL(compat_vma_mmap);
+
+static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
+ const struct page *page)
+{
+ /*
+ * Only the first page of a high-order buddy page has PageBuddy() set.
+ * So we have to check manually whether this page is part of a high-
+ * order buddy page.
+ */
+ if (PageBuddy(page))
+ ps->flags |= PAGE_SNAPSHOT_PG_BUDDY;
+ else if (page_count(page) == 0 && is_free_buddy_page(page))
+ ps->flags |= PAGE_SNAPSHOT_PG_BUDDY;
+
+ if (folio_test_idle(folio))
+ ps->flags |= PAGE_SNAPSHOT_PG_IDLE;
+}
+
+/**
+ * snapshot_page() - Create a snapshot of a struct page
+ * @ps: Pointer to a struct page_snapshot to store the page snapshot
+ * @page: The page to snapshot
+ *
+ * Create a snapshot of the page and store both its struct page and struct
+ * folio representations in @ps.
+ *
+ * A snapshot is marked as "faithful" if the compound state of @page was
+ * stable and allowed safe reconstruction of the folio representation. In
+ * rare cases where this is not possible (e.g. due to folio splitting),
+ * snapshot_page() falls back to treating @page as a single page and the
+ * snapshot is marked as "unfaithful". The snapshot_page_is_faithful()
+ * helper can be used to check for this condition.
+ */
+void snapshot_page(struct page_snapshot *ps, const struct page *page)
+{
+ unsigned long head, nr_pages = 1;
+ struct folio *foliop;
+ int loops = 5;
+
+ ps->pfn = page_to_pfn(page);
+ ps->flags = PAGE_SNAPSHOT_FAITHFUL;
+
+again:
+ memset(&ps->folio_snapshot, 0, sizeof(struct folio));
+ memcpy(&ps->page_snapshot, page, sizeof(*page));
+ head = ps->page_snapshot.compound_head;
+ if ((head & 1) == 0) {
+ ps->idx = 0;
+ foliop = (struct folio *)&ps->page_snapshot;
+ if (!folio_test_large(foliop)) {
+ set_ps_flags(ps, page_folio(page), page);
+ memcpy(&ps->folio_snapshot, foliop,
+ sizeof(struct page));
+ return;
+ }
+ foliop = (struct folio *)page;
+ } else {
+ foliop = (struct folio *)(head - 1);
+ ps->idx = folio_page_idx(foliop, page);
+ }
+
+ if (ps->idx < MAX_FOLIO_NR_PAGES) {
+ memcpy(&ps->folio_snapshot, foliop, 2 * sizeof(struct page));
+ nr_pages = folio_nr_pages(&ps->folio_snapshot);
+ if (nr_pages > 1)
+ memcpy(&ps->folio_snapshot.__page_2, &foliop->__page_2,
+ sizeof(struct page));
+ set_ps_flags(ps, foliop, page);
+ }
+
+ if (ps->idx > nr_pages) {
+ if (loops-- > 0)
+ goto again;
+ clear_compound_head(&ps->page_snapshot);
+ foliop = (struct folio *)&ps->page_snapshot;
+ memcpy(&ps->folio_snapshot, foliop, sizeof(struct page));
+ ps->flags = 0;
+ ps->idx = 0;
+ }
+}
+
+static int mmap_action_finish(struct mmap_action *action,
+ const struct vm_area_struct *vma, int err)
+{
+ /*
+ * If an error occurs, unmap the VMA altogether and return an error. We
+ * only clear the newly allocated VMA, since this function is only
+ * invoked if we do NOT merge, so we only clean up the VMA we created.
+ */
+ if (err) {
+ const size_t len = vma_pages(vma) << PAGE_SHIFT;
+
+ do_munmap(current->mm, vma->vm_start, len, NULL);
+
+ if (action->error_hook) {
+ /* We may want to filter the error. */
+ err = action->error_hook(err);
+
+ /* The caller should not clear the error. */
+ VM_WARN_ON_ONCE(!err);
+ }
+ return err;
+ }
+
+ if (action->success_hook)
+ return action->success_hook(vma);
+
+ return 0;
+}
+
+#ifdef CONFIG_MMU
+/**
+ * mmap_action_prepare - Perform preparatory setup for an VMA descriptor
+ * action which need to be performed.
+ * @desc: The VMA descriptor to prepare for @action.
+ * @action: The action to perform.
+ */
+void mmap_action_prepare(struct mmap_action *action,
+ struct vm_area_desc *desc)
+{
+ switch (action->type) {
+ case MMAP_NOTHING:
+ break;
+ case MMAP_REMAP_PFN:
+ remap_pfn_range_prepare(desc, action->remap.start_pfn);
+ break;
+ case MMAP_IO_REMAP_PFN:
+ io_remap_pfn_range_prepare(desc, action->remap.start_pfn,
+ action->remap.size);
+ break;
+ }
+}
+EXPORT_SYMBOL(mmap_action_prepare);
+
+/**
+ * mmap_action_complete - Execute VMA descriptor action.
+ * @action: The action to perform.
+ * @vma: The VMA to perform the action upon.
+ *
+ * Similar to mmap_action_prepare().
+ *
+ * Return: 0 on success, or error, at which point the VMA will be unmapped.
+ */
+int mmap_action_complete(struct mmap_action *action,
+ struct vm_area_struct *vma)
+{
+ int err = 0;
+
+ switch (action->type) {
+ case MMAP_NOTHING:
+ break;
+ case MMAP_REMAP_PFN:
+ err = remap_pfn_range_complete(vma, action->remap.start,
+ action->remap.start_pfn, action->remap.size,
+ action->remap.pgprot);
+ break;
+ case MMAP_IO_REMAP_PFN:
+ err = io_remap_pfn_range_complete(vma, action->remap.start,
+ action->remap.start_pfn, action->remap.size,
+ action->remap.pgprot);
+ break;
+ }
+
+ return mmap_action_finish(action, vma, err);
+}
+EXPORT_SYMBOL(mmap_action_complete);
+#else
+void mmap_action_prepare(struct mmap_action *action,
+ struct vm_area_desc *desc)
+{
+ switch (action->type) {
+ case MMAP_NOTHING:
+ break;
+ case MMAP_REMAP_PFN:
+ case MMAP_IO_REMAP_PFN:
+ WARN_ON_ONCE(1); /* nommu cannot handle these. */
+ break;
+ }
+}
+EXPORT_SYMBOL(mmap_action_prepare);
+
+int mmap_action_complete(struct mmap_action *action,
+ struct vm_area_struct *vma)
+{
+ int err = 0;
+
+ switch (action->type) {
+ case MMAP_NOTHING:
+ break;
+ case MMAP_REMAP_PFN:
+ case MMAP_IO_REMAP_PFN:
+ WARN_ON_ONCE(1); /* nommu cannot handle this. */
+
+ err = -EINVAL;
+ break;
+ }
+
+ return mmap_action_finish(action, vma, err);
+}
+EXPORT_SYMBOL(mmap_action_complete);
+#endif
+
+#ifdef CONFIG_MMU
+/**
+ * folio_pte_batch - detect a PTE batch for a large folio
+ * @folio: The large folio to detect a PTE batch for.
+ * @ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @max_nr: The maximum number of table entries to consider.
+ *
+ * This is a simplified variant of folio_pte_batch_flags().
+ *
+ * Detect a PTE batch: consecutive (present) PTEs that map consecutive
+ * pages of the same large folio in a single VMA and a single page table.
+ *
+ * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
+ * the accessed bit, writable bit, dirt-bit and soft-dirty bit.
+ *
+ * ptep must map any page of the folio. max_nr must be at least one and
+ * must be limited by the caller so scanning cannot exceed a single VMA and
+ * a single page table.
+ *
+ * Return: the number of table entries in the batch.
+ */
+unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
+ unsigned int max_nr)
+{
+ return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0);
+}
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+/**
+ * page_range_contiguous - test whether the page range is contiguous
+ * @page: the start of the page range.
+ * @nr_pages: the number of pages in the range.
+ *
+ * Test whether the page range is contiguous, such that they can be iterated
+ * naively, corresponding to iterating a contiguous PFN range.
+ *
+ * This function should primarily only be used for debug checks, or when
+ * working with page ranges that are not naturally contiguous (e.g., pages
+ * within a folio are).
+ *
+ * Returns true if contiguous, otherwise false.
+ */
+bool page_range_contiguous(const struct page *page, unsigned long nr_pages)
+{
+ const unsigned long start_pfn = page_to_pfn(page);
+ const unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long pfn;
+
+ /*
+ * The memmap is allocated per memory section, so no need to check
+ * within the first section. However, we need to check each other
+ * spanned memory section once, making sure the first page in a
+ * section could similarly be reached by just iterating pages.
+ */
+ for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION);
+ pfn < end_pfn; pfn += PAGES_PER_SECTION)
+ if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn)))
+ return false;
+ return true;
+}
+EXPORT_SYMBOL(page_range_contiguous);
+#endif