summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c6238
1 files changed, 4201 insertions, 2037 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 243e710c6039..f7c565f11a98 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1,12 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mm.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/numa_balancing.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/mmu_notifier.h>
@@ -14,24 +16,45 @@
#include <linux/swap.h>
#include <linux/shrinker.h>
#include <linux/mm_inline.h>
-#include <linux/kthread.h>
+#include <linux/swapops.h>
+#include <linux/backing-dev.h>
+#include <linux/dax.h>
+#include <linux/mm_types.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
#include <linux/mman.h>
+#include <linux/memremap.h>
#include <linux/pagemap.h>
+#include <linux/debugfs.h>
#include <linux/migrate.h>
#include <linux/hashtable.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/page_idle.h>
+#include <linux/shmem_fs.h>
+#include <linux/oom.h>
+#include <linux/numa.h>
+#include <linux/page_owner.h>
+#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>
+#include <linux/compat.h>
+#include <linux/pgalloc.h>
+#include <linux/pgalloc_tag.h>
+#include <linux/pagewalk.h>
#include <asm/tlb.h>
-#include <asm/pgalloc.h>
#include "internal.h"
+#include "swap.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/thp.h>
/*
- * By default transparent hugepage support is enabled for all mappings
- * and khugepaged scans all mappings. Defrag is only invoked by
- * khugepaged hugepage allocations and by page faults inside
- * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
- * allocations.
+ * By default, transparent hugepage support is disabled in order to avoid
+ * risking an increased memory footprint for applications that are not
+ * guaranteed to benefit from it. When transparent hugepage support is
+ * enabled, it is for all mappings, and khugepaged scans all mappings.
+ * Defrag is invoked by khugepaged hugepage allocations and by page faults
+ * for all hugepage allocations.
*/
unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
@@ -40,169 +63,182 @@ unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
-/* default scan 8*512 pte (or vmas) every 30 second */
-static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
-static unsigned int khugepaged_pages_collapsed;
-static unsigned int khugepaged_full_scans;
-static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
-/* during fragmentation poll the hugepage allocator once every minute */
-static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
-static struct task_struct *khugepaged_thread __read_mostly;
-static DEFINE_MUTEX(khugepaged_mutex);
-static DEFINE_SPINLOCK(khugepaged_mm_lock);
-static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
-/*
- * default collapse hugepages if there is at least one pte mapped like
- * it would have happened if the vma was large enough during page
- * fault.
- */
-static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
-
-static int khugepaged(void *none);
-static int khugepaged_slab_init(void);
+static struct shrinker *deferred_split_shrinker;
+static unsigned long deferred_split_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+static unsigned long deferred_split_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
+static bool split_underused_thp = true;
-#define MM_SLOTS_HASH_BITS 10
-static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
+static atomic_t huge_zero_refcount;
+struct folio *huge_zero_folio __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
+unsigned long huge_anon_orders_always __read_mostly;
+unsigned long huge_anon_orders_madvise __read_mostly;
+unsigned long huge_anon_orders_inherit __read_mostly;
+static bool anon_orders_configured __initdata;
+
+static inline bool file_thp_enabled(struct vm_area_struct *vma)
+{
+ struct inode *inode;
-static struct kmem_cache *mm_slot_cache __read_mostly;
+ if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
+ return false;
-/**
- * struct mm_slot - hash lookup from mm to mm_slot
- * @hash: hash collision list
- * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
- * @mm: the mm that this information is valid for
- */
-struct mm_slot {
- struct hlist_node hash;
- struct list_head mm_node;
- struct mm_struct *mm;
-};
+ if (!vma->vm_file)
+ return false;
-/**
- * struct khugepaged_scan - cursor for scanning
- * @mm_head: the head of the mm list to scan
- * @mm_slot: the current mm_slot we are scanning
- * @address: the next address inside that to be scanned
- *
- * There is only the one khugepaged_scan instance of this cursor structure.
- */
-struct khugepaged_scan {
- struct list_head mm_head;
- struct mm_slot *mm_slot;
- unsigned long address;
-};
-static struct khugepaged_scan khugepaged_scan = {
- .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
-};
+ inode = file_inode(vma->vm_file);
+ return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
+}
-static int set_recommended_min_free_kbytes(void)
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders)
{
- struct zone *zone;
- int nr_zones = 0;
- unsigned long recommended_min;
+ const bool smaps = type == TVA_SMAPS;
+ const bool in_pf = type == TVA_PAGEFAULT;
+ const bool forced_collapse = type == TVA_FORCED_COLLAPSE;
+ unsigned long supported_orders;
+
+ /* Check the intersection of requested and supported orders. */
+ if (vma_is_anonymous(vma))
+ supported_orders = THP_ORDERS_ALL_ANON;
+ else if (vma_is_special_huge(vma))
+ supported_orders = THP_ORDERS_ALL_SPECIAL;
+ else
+ supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
- if (!khugepaged_enabled())
+ orders &= supported_orders;
+ if (!orders)
return 0;
- for_each_populated_zone(zone)
- nr_zones++;
+ if (!vma->vm_mm) /* vdso */
+ return 0;
+
+ if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags, forced_collapse))
+ return 0;
- /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
- recommended_min = pageblock_nr_pages * nr_zones * 2;
+ /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
+ if (vma_is_dax(vma))
+ return in_pf ? orders : 0;
/*
- * Make sure that on average at least two pageblocks are almost free
- * of another type, one for a migratetype to fall back to and a
- * second to avoid subsequent fallbacks of other types There are 3
- * MIGRATE_TYPES we care about.
+ * khugepaged special VMA and hugetlb VMA.
+ * Must be checked after dax since some dax mappings may have
+ * VM_MIXEDMAP set.
*/
- recommended_min += pageblock_nr_pages * nr_zones *
- MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
+ if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
+ return 0;
- /* don't ever allow to reserve more than 5% of the lowmem */
- recommended_min = min(recommended_min,
- (unsigned long) nr_free_buffer_pages() / 20);
- recommended_min <<= (PAGE_SHIFT-10);
+ /*
+ * Check alignment for file vma and size for both file and anon vma by
+ * filtering out the unsuitable orders.
+ *
+ * Skip the check for page fault. Huge fault does the check in fault
+ * handlers.
+ */
+ if (!in_pf) {
+ int order = highest_order(orders);
+ unsigned long addr;
+
+ while (orders) {
+ addr = vma->vm_end - (PAGE_SIZE << order);
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
- if (recommended_min > min_free_kbytes)
- min_free_kbytes = recommended_min;
- setup_per_zone_wmarks();
- return 0;
-}
-late_initcall(set_recommended_min_free_kbytes);
+ if (!orders)
+ return 0;
+ }
-static int start_khugepaged(void)
-{
- int err = 0;
- if (khugepaged_enabled()) {
- if (!khugepaged_thread)
- khugepaged_thread = kthread_run(khugepaged, NULL,
- "khugepaged");
- if (unlikely(IS_ERR(khugepaged_thread))) {
- printk(KERN_ERR
- "khugepaged: kthread_run(khugepaged) failed\n");
- err = PTR_ERR(khugepaged_thread);
- khugepaged_thread = NULL;
- }
+ /*
+ * Enabled via shmem mount options or sysfs settings.
+ * Must be done before hugepage flags check since shmem has its
+ * own flags.
+ */
+ if (!in_pf && shmem_file(vma->vm_file))
+ return orders & shmem_allowable_huge_orders(file_inode(vma->vm_file),
+ vma, vma->vm_pgoff, 0,
+ forced_collapse);
- if (!list_empty(&khugepaged_scan.mm_head))
- wake_up_interruptible(&khugepaged_wait);
+ if (!vma_is_anonymous(vma)) {
+ /*
+ * Enforce THP collapse requirements as necessary. Anonymous vmas
+ * were already handled in thp_vma_allowable_orders().
+ */
+ if (!forced_collapse &&
+ (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
+ !hugepage_global_always())))
+ return 0;
- set_recommended_min_free_kbytes();
- } else if (khugepaged_thread) {
- kthread_stop(khugepaged_thread);
- khugepaged_thread = NULL;
+ /*
+ * Trust that ->huge_fault() handlers know what they are doing
+ * in fault path.
+ */
+ if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
+ return orders;
+ /* Only regular file is valid in collapse path */
+ if (((!in_pf || smaps)) && file_thp_enabled(vma))
+ return orders;
+ return 0;
}
- return err;
-}
-
-static atomic_t huge_zero_refcount;
-static struct page *huge_zero_page __read_mostly;
+ if (vma_is_temporary_stack(vma))
+ return 0;
-static inline bool is_huge_zero_page(struct page *page)
-{
- return ACCESS_ONCE(huge_zero_page) == page;
-}
+ /*
+ * THPeligible bit of smaps should show 1 for proper VMAs even
+ * though anon_vma is not initialized yet.
+ *
+ * Allow page fault since anon_vma may be not initialized until
+ * the first page fault.
+ */
+ if (!vma->anon_vma)
+ return (smaps || in_pf) ? orders : 0;
-static inline bool is_huge_zero_pmd(pmd_t pmd)
-{
- return is_huge_zero_page(pmd_page(pmd));
+ return orders;
}
-static struct page *get_huge_zero_page(void)
+static bool get_huge_zero_folio(void)
{
- struct page *zero_page;
+ struct folio *zero_folio;
retry:
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
- return ACCESS_ONCE(huge_zero_page);
+ return true;
- zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+ zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &
+ ~__GFP_MOVABLE,
HPAGE_PMD_ORDER);
- if (!zero_page) {
+ if (!zero_folio) {
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
- return NULL;
+ return false;
}
- count_vm_event(THP_ZERO_PAGE_ALLOC);
+ /* Ensure zero folio won't have large_rmappable flag set. */
+ folio_clear_large_rmappable(zero_folio);
preempt_disable();
- if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+ if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
preempt_enable();
- __free_page(zero_page);
+ folio_put(zero_folio);
goto retry;
}
+ WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
preempt_enable();
- return ACCESS_ONCE(huge_zero_page);
+ count_vm_event(THP_ZERO_PAGE_ALLOC);
+ return true;
}
-static void put_huge_zero_page(void)
+static void put_huge_zero_folio(void)
{
/*
* Counter should never go to zero here. Only shrinker can put
@@ -211,108 +247,109 @@ static void put_huge_zero_page(void)
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
}
-static int shrink_huge_zero_page(struct shrinker *shrink,
- struct shrink_control *sc)
+struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
{
- if (!sc->nr_to_scan)
- /* we can free zero page only if last reference remains */
- return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+ if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
+ return huge_zero_folio;
- if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
- struct page *zero_page = xchg(&huge_zero_page, NULL);
- BUG_ON(zero_page == NULL);
- __free_page(zero_page);
- }
+ if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
+ return READ_ONCE(huge_zero_folio);
- return 0;
+ if (!get_huge_zero_folio())
+ return NULL;
+
+ if (mm_flags_test_and_set(MMF_HUGE_ZERO_FOLIO, mm))
+ put_huge_zero_folio();
+
+ return READ_ONCE(huge_zero_folio);
}
-static struct shrinker huge_zero_page_shrinker = {
- .shrink = shrink_huge_zero_page,
- .seeks = DEFAULT_SEEKS,
-};
+void mm_put_huge_zero_folio(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
+ return;
-#ifdef CONFIG_SYSFS
+ if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
+ put_huge_zero_folio();
+}
-static ssize_t double_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag enabled,
- enum transparent_hugepage_flag req_madv)
+static unsigned long shrink_huge_zero_folio_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
- if (test_bit(enabled, &transparent_hugepage_flags)) {
- VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
- return sprintf(buf, "[always] madvise never\n");
- } else if (test_bit(req_madv, &transparent_hugepage_flags))
- return sprintf(buf, "always [madvise] never\n");
- else
- return sprintf(buf, "always madvise [never]\n");
+ /* we can free zero page only if last reference remains */
+ return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
}
-static ssize_t double_flag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count,
- enum transparent_hugepage_flag enabled,
- enum transparent_hugepage_flag req_madv)
-{
- if (!memcmp("always", buf,
- min(sizeof("always")-1, count))) {
- set_bit(enabled, &transparent_hugepage_flags);
- clear_bit(req_madv, &transparent_hugepage_flags);
- } else if (!memcmp("madvise", buf,
- min(sizeof("madvise")-1, count))) {
- clear_bit(enabled, &transparent_hugepage_flags);
- set_bit(req_madv, &transparent_hugepage_flags);
- } else if (!memcmp("never", buf,
- min(sizeof("never")-1, count))) {
- clear_bit(enabled, &transparent_hugepage_flags);
- clear_bit(req_madv, &transparent_hugepage_flags);
- } else
- return -EINVAL;
- return count;
+static unsigned long shrink_huge_zero_folio_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
+ BUG_ON(zero_folio == NULL);
+ WRITE_ONCE(huge_zero_pfn, ~0UL);
+ folio_put(zero_folio);
+ return HPAGE_PMD_NR;
+ }
+
+ return 0;
}
+static struct shrinker *huge_zero_folio_shrinker;
+
+#ifdef CONFIG_SYSFS
static ssize_t enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return double_flag_show(kobj, attr, buf,
- TRANSPARENT_HUGEPAGE_FLAG,
- TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
+ const char *output;
+
+ if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
+ output = "[always] madvise never";
+ else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+ &transparent_hugepage_flags))
+ output = "always [madvise] never";
+ else
+ output = "always madvise [never]";
+
+ return sysfs_emit(buf, "%s\n", output);
}
+
static ssize_t enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- ssize_t ret;
-
- ret = double_flag_store(kobj, attr, buf, count,
- TRANSPARENT_HUGEPAGE_FLAG,
- TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
+ ssize_t ret = count;
+
+ if (sysfs_streq(buf, "always")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "never")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ } else
+ ret = -EINVAL;
if (ret > 0) {
- int err;
-
- mutex_lock(&khugepaged_mutex);
- err = start_khugepaged();
- mutex_unlock(&khugepaged_mutex);
-
+ int err = start_stop_khugepaged();
if (err)
ret = err;
}
-
return ret;
}
-static struct kobj_attribute enabled_attr =
- __ATTR(enabled, 0644, enabled_show, enabled_store);
-static ssize_t single_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag flag)
+static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
+
+ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag)
{
- return sprintf(buf, "%d\n",
- !!test_bit(flag, &transparent_hugepage_flags));
+ return sysfs_emit(buf, "%d\n",
+ !!test_bit(flag, &transparent_hugepage_flags));
}
-static ssize_t single_flag_store(struct kobject *kobj,
+ssize_t single_hugepage_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag)
@@ -334,262 +371,458 @@ static ssize_t single_flag_store(struct kobject *kobj,
return count;
}
-/*
- * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
- * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
- * memory just to allocate one more hugepage.
- */
static ssize_t defrag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return double_flag_show(kobj, attr, buf,
- TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
- TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
+ const char *output;
+
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
+ &transparent_hugepage_flags))
+ output = "[always] defer defer+madvise madvise never";
+ else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
+ &transparent_hugepage_flags))
+ output = "always [defer] defer+madvise madvise never";
+ else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
+ &transparent_hugepage_flags))
+ output = "always defer [defer+madvise] madvise never";
+ else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
+ &transparent_hugepage_flags))
+ output = "always defer defer+madvise [madvise] never";
+ else
+ output = "always defer defer+madvise madvise [never]";
+
+ return sysfs_emit(buf, "%s\n", output);
}
+
static ssize_t defrag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- return double_flag_store(kobj, attr, buf, count,
- TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
- TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
+ if (sysfs_streq(buf, "always")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "defer+madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "defer")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "never")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ } else
+ return -EINVAL;
+
+ return count;
}
-static struct kobj_attribute defrag_attr =
- __ATTR(defrag, 0644, defrag_show, defrag_store);
+static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
static ssize_t use_zero_page_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
- return single_flag_show(kobj, attr, buf,
- TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+ return single_hugepage_flag_show(kobj, attr, buf,
+ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
}
static ssize_t use_zero_page_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- return single_flag_store(kobj, attr, buf, count,
+ return single_hugepage_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
}
-static struct kobj_attribute use_zero_page_attr =
- __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
-#ifdef CONFIG_DEBUG_VM
-static ssize_t debug_cow_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
+
+static ssize_t hpage_pmd_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return single_flag_show(kobj, attr, buf,
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
+ return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
}
-static ssize_t debug_cow_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static struct kobj_attribute hpage_pmd_size_attr =
+ __ATTR_RO(hpage_pmd_size);
+
+static ssize_t split_underused_thp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return single_flag_store(kobj, attr, buf, count,
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
+ return sysfs_emit(buf, "%d\n", split_underused_thp);
}
-static struct kobj_attribute debug_cow_attr =
- __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
-#endif /* CONFIG_DEBUG_VM */
+
+static ssize_t split_underused_thp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = kstrtobool(buf, &split_underused_thp);
+
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static struct kobj_attribute split_underused_thp_attr = __ATTR(
+ shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
&use_zero_page_attr.attr,
-#ifdef CONFIG_DEBUG_VM
- &debug_cow_attr.attr,
+ &hpage_pmd_size_attr.attr,
+#ifdef CONFIG_SHMEM
+ &shmem_enabled_attr.attr,
#endif
+ &split_underused_thp_attr.attr,
NULL,
};
-static struct attribute_group hugepage_attr_group = {
+static const struct attribute_group hugepage_attr_group = {
.attrs = hugepage_attr,
};
-static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
+static void thpsize_release(struct kobject *kobj);
+static DEFINE_SPINLOCK(huge_anon_orders_lock);
+static LIST_HEAD(thpsize_list);
+
+static ssize_t anon_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
+ int order = to_thpsize(kobj)->order;
+ const char *output;
+
+ if (test_bit(order, &huge_anon_orders_always))
+ output = "[always] inherit madvise never";
+ else if (test_bit(order, &huge_anon_orders_inherit))
+ output = "always [inherit] madvise never";
+ else if (test_bit(order, &huge_anon_orders_madvise))
+ output = "always inherit [madvise] never";
+ else
+ output = "always inherit madvise [never]";
+
+ return sysfs_emit(buf, "%s\n", output);
}
-static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t anon_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
- unsigned long msecs;
- int err;
-
- err = strict_strtoul(buf, 10, &msecs);
- if (err || msecs > UINT_MAX)
- return -EINVAL;
+ int order = to_thpsize(kobj)->order;
+ ssize_t ret = count;
+
+ if (sysfs_streq(buf, "always")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_inherit);
+ clear_bit(order, &huge_anon_orders_madvise);
+ set_bit(order, &huge_anon_orders_always);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "inherit")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_madvise);
+ set_bit(order, &huge_anon_orders_inherit);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "madvise")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_inherit);
+ set_bit(order, &huge_anon_orders_madvise);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "never")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_inherit);
+ clear_bit(order, &huge_anon_orders_madvise);
+ spin_unlock(&huge_anon_orders_lock);
+ } else
+ ret = -EINVAL;
- khugepaged_scan_sleep_millisecs = msecs;
- wake_up_interruptible(&khugepaged_wait);
+ if (ret > 0) {
+ int err;
- return count;
+ err = start_stop_khugepaged();
+ if (err)
+ ret = err;
+ }
+ return ret;
}
-static struct kobj_attribute scan_sleep_millisecs_attr =
- __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
- scan_sleep_millisecs_store);
-static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
-}
+static struct kobj_attribute anon_enabled_attr =
+ __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
-static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long msecs;
- int err;
+static struct attribute *anon_ctrl_attrs[] = {
+ &anon_enabled_attr.attr,
+ NULL,
+};
- err = strict_strtoul(buf, 10, &msecs);
- if (err || msecs > UINT_MAX)
- return -EINVAL;
+static const struct attribute_group anon_ctrl_attr_grp = {
+ .attrs = anon_ctrl_attrs,
+};
- khugepaged_alloc_sleep_millisecs = msecs;
- wake_up_interruptible(&khugepaged_wait);
+static struct attribute *file_ctrl_attrs[] = {
+#ifdef CONFIG_SHMEM
+ &thpsize_shmem_enabled_attr.attr,
+#endif
+ NULL,
+};
- return count;
-}
-static struct kobj_attribute alloc_sleep_millisecs_attr =
- __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
- alloc_sleep_millisecs_store);
+static const struct attribute_group file_ctrl_attr_grp = {
+ .attrs = file_ctrl_attrs,
+};
-static ssize_t pages_to_scan_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
-}
-static ssize_t pages_to_scan_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- int err;
- unsigned long pages;
+static struct attribute *any_ctrl_attrs[] = {
+ NULL,
+};
- err = strict_strtoul(buf, 10, &pages);
- if (err || !pages || pages > UINT_MAX)
- return -EINVAL;
+static const struct attribute_group any_ctrl_attr_grp = {
+ .attrs = any_ctrl_attrs,
+};
- khugepaged_pages_to_scan = pages;
+static const struct kobj_type thpsize_ktype = {
+ .release = &thpsize_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
- return count;
-}
-static struct kobj_attribute pages_to_scan_attr =
- __ATTR(pages_to_scan, 0644, pages_to_scan_show,
- pages_to_scan_store);
+DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
-static ssize_t pages_collapsed_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
{
- return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
-}
-static struct kobj_attribute pages_collapsed_attr =
- __ATTR_RO(pages_collapsed);
+ unsigned long sum = 0;
+ int cpu;
-static ssize_t full_scans_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", khugepaged_full_scans);
-}
-static struct kobj_attribute full_scans_attr =
- __ATTR_RO(full_scans);
+ for_each_possible_cpu(cpu) {
+ struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
-static ssize_t khugepaged_defrag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return single_flag_show(kobj, attr, buf,
- TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
-}
-static ssize_t khugepaged_defrag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- return single_flag_store(kobj, attr, buf, count,
- TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+ sum += this->stats[order][item];
+ }
+
+ return sum;
}
-static struct kobj_attribute khugepaged_defrag_attr =
- __ATTR(defrag, 0644, khugepaged_defrag_show,
- khugepaged_defrag_store);
-/*
- * max_ptes_none controls if khugepaged should collapse hugepages over
- * any unmapped ptes in turn potentially increasing the memory
- * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
- * reduce the available free memory in the system as it
- * runs. Increasing max_ptes_none will instead potentially reduce the
- * free memory in the system during the khugepaged scan.
- */
-static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+#define DEFINE_MTHP_STAT_ATTR(_name, _index) \
+static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ int order = to_thpsize(kobj)->order; \
+ \
+ return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \
+} \
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
+DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
+DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
+DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
+#ifdef CONFIG_SHMEM
+DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
+DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
+#endif
+DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
+DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
+DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
+DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
+DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
+
+static struct attribute *anon_stats_attrs[] = {
+ &anon_fault_alloc_attr.attr,
+ &anon_fault_fallback_attr.attr,
+ &anon_fault_fallback_charge_attr.attr,
+#ifndef CONFIG_SHMEM
+ &zswpout_attr.attr,
+ &swpin_attr.attr,
+ &swpin_fallback_attr.attr,
+ &swpin_fallback_charge_attr.attr,
+ &swpout_attr.attr,
+ &swpout_fallback_attr.attr,
+#endif
+ &split_deferred_attr.attr,
+ &nr_anon_attr.attr,
+ &nr_anon_partially_mapped_attr.attr,
+ NULL,
+};
+
+static struct attribute_group anon_stats_attr_grp = {
+ .name = "stats",
+ .attrs = anon_stats_attrs,
+};
+
+static struct attribute *file_stats_attrs[] = {
+#ifdef CONFIG_SHMEM
+ &shmem_alloc_attr.attr,
+ &shmem_fallback_attr.attr,
+ &shmem_fallback_charge_attr.attr,
+#endif
+ NULL,
+};
+
+static struct attribute_group file_stats_attr_grp = {
+ .name = "stats",
+ .attrs = file_stats_attrs,
+};
+
+static struct attribute *any_stats_attrs[] = {
+#ifdef CONFIG_SHMEM
+ &zswpout_attr.attr,
+ &swpin_attr.attr,
+ &swpin_fallback_attr.attr,
+ &swpin_fallback_charge_attr.attr,
+ &swpout_attr.attr,
+ &swpout_fallback_attr.attr,
+#endif
+ &split_attr.attr,
+ &split_failed_attr.attr,
+ NULL,
+};
+
+static struct attribute_group any_stats_attr_grp = {
+ .name = "stats",
+ .attrs = any_stats_attrs,
+};
+
+static int sysfs_add_group(struct kobject *kobj,
+ const struct attribute_group *grp)
{
- return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
+ int ret = -ENOENT;
+
+ /*
+ * If the group is named, try to merge first, assuming the subdirectory
+ * was already created. This avoids the warning emitted by
+ * sysfs_create_group() if the directory already exists.
+ */
+ if (grp->name)
+ ret = sysfs_merge_group(kobj, grp);
+ if (ret)
+ ret = sysfs_create_group(kobj, grp);
+
+ return ret;
}
-static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+
+static struct thpsize *thpsize_create(int order, struct kobject *parent)
{
- int err;
- unsigned long max_ptes_none;
+ unsigned long size = (PAGE_SIZE << order) / SZ_1K;
+ struct thpsize *thpsize;
+ int ret = -ENOMEM;
- err = strict_strtoul(buf, 10, &max_ptes_none);
- if (err || max_ptes_none > HPAGE_PMD_NR-1)
- return -EINVAL;
+ thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
+ if (!thpsize)
+ goto err;
- khugepaged_max_ptes_none = max_ptes_none;
+ thpsize->order = order;
- return count;
+ ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
+ "hugepages-%lukB", size);
+ if (ret) {
+ kfree(thpsize);
+ goto err;
+ }
+
+
+ ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
+ if (ret)
+ goto err_put;
+
+ if (BIT(order) & THP_ORDERS_ALL_ANON) {
+ ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
+ if (ret)
+ goto err_put;
+ }
+
+ if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
+ ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
+ if (ret)
+ goto err_put;
+ }
+
+ return thpsize;
+err_put:
+ kobject_put(&thpsize->kobj);
+err:
+ return ERR_PTR(ret);
}
-static struct kobj_attribute khugepaged_max_ptes_none_attr =
- __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
- khugepaged_max_ptes_none_store);
-
-static struct attribute *khugepaged_attr[] = {
- &khugepaged_defrag_attr.attr,
- &khugepaged_max_ptes_none_attr.attr,
- &pages_to_scan_attr.attr,
- &pages_collapsed_attr.attr,
- &full_scans_attr.attr,
- &scan_sleep_millisecs_attr.attr,
- &alloc_sleep_millisecs_attr.attr,
- NULL,
-};
-static struct attribute_group khugepaged_attr_group = {
- .attrs = khugepaged_attr,
- .name = "khugepaged",
-};
+static void thpsize_release(struct kobject *kobj)
+{
+ kfree(to_thpsize(kobj));
+}
static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
{
int err;
+ struct thpsize *thpsize;
+ unsigned long orders;
+ int order;
+
+ /*
+ * Default to setting PMD-sized THP to inherit the global setting and
+ * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
+ * constant so we have to do this here.
+ */
+ if (!anon_orders_configured)
+ huge_anon_orders_inherit = BIT(PMD_ORDER);
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
- printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
+ pr_err("failed to create transparent hugepage kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
if (err) {
- printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
+ pr_err("failed to register transparent hugepage group\n");
goto delete_obj;
}
err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
if (err) {
- printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
+ pr_err("failed to register transparent hugepage group\n");
goto remove_hp_group;
}
+ orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
+ order = highest_order(orders);
+ while (orders) {
+ thpsize = thpsize_create(order, *hugepage_kobj);
+ if (IS_ERR(thpsize)) {
+ pr_err("failed to create thpsize for order %d\n", order);
+ err = PTR_ERR(thpsize);
+ goto remove_all;
+ }
+ list_add(&thpsize->node, &thpsize_list);
+ order = next_order(&orders, order);
+ }
+
return 0;
+remove_all:
+ hugepage_exit_sysfs(*hugepage_kobj);
+ return err;
remove_hp_group:
sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
delete_obj:
@@ -599,6 +832,13 @@ delete_obj:
static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
+ struct thpsize *thpsize, *tmp;
+
+ list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
+ list_del(&thpsize->node);
+ kobject_put(&thpsize->kobj);
+ }
+
sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
kobject_put(hugepage_kobj);
@@ -614,42 +854,104 @@ static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
}
#endif /* CONFIG_SYSFS */
+static int __init thp_shrinker_init(void)
+{
+ deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
+ SHRINKER_MEMCG_AWARE |
+ SHRINKER_NONSLAB,
+ "thp-deferred_split");
+ if (!deferred_split_shrinker)
+ return -ENOMEM;
+
+ deferred_split_shrinker->count_objects = deferred_split_count;
+ deferred_split_shrinker->scan_objects = deferred_split_scan;
+ shrinker_register(deferred_split_shrinker);
+
+ if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) {
+ /*
+ * Bump the reference of the huge_zero_folio and do not
+ * initialize the shrinker.
+ *
+ * huge_zero_folio will always be NULL on failure. We assume
+ * that get_huge_zero_folio() will most likely not fail as
+ * thp_shrinker_init() is invoked early on during boot.
+ */
+ if (!get_huge_zero_folio())
+ pr_warn("Allocating persistent huge zero folio failed\n");
+ return 0;
+ }
+
+ huge_zero_folio_shrinker = shrinker_alloc(0, "thp-zero");
+ if (!huge_zero_folio_shrinker) {
+ shrinker_free(deferred_split_shrinker);
+ return -ENOMEM;
+ }
+
+ huge_zero_folio_shrinker->count_objects = shrink_huge_zero_folio_count;
+ huge_zero_folio_shrinker->scan_objects = shrink_huge_zero_folio_scan;
+ shrinker_register(huge_zero_folio_shrinker);
+
+ return 0;
+}
+
+static void __init thp_shrinker_exit(void)
+{
+ shrinker_free(huge_zero_folio_shrinker);
+ shrinker_free(deferred_split_shrinker);
+}
+
static int __init hugepage_init(void)
{
int err;
struct kobject *hugepage_kobj;
if (!has_transparent_hugepage()) {
- transparent_hugepage_flags = 0;
+ transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
return -EINVAL;
}
+ /*
+ * hugepages can't be allocated by the buddy allocator
+ */
+ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
+
err = hugepage_init_sysfs(&hugepage_kobj);
if (err)
- return err;
+ goto err_sysfs;
- err = khugepaged_slab_init();
+ err = khugepaged_init();
if (err)
- goto out;
+ goto err_slab;
- register_shrinker(&huge_zero_page_shrinker);
+ err = thp_shrinker_init();
+ if (err)
+ goto err_shrinker;
/*
* By default disable transparent hugepages on smaller systems,
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
- if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
+ if (totalram_pages() < MB_TO_PAGES(512)) {
transparent_hugepage_flags = 0;
+ return 0;
+ }
- start_khugepaged();
+ err = start_stop_khugepaged();
+ if (err)
+ goto err_khugepaged;
return 0;
-out:
+err_khugepaged:
+ thp_shrinker_exit();
+err_shrinker:
+ khugepaged_destroy();
+err_slab:
hugepage_exit_sysfs(hugepage_kobj);
+err_sysfs:
return err;
}
-module_init(hugepage_init)
+subsys_initcall(hugepage_init);
static int __init setup_transparent_hugepage(char *str)
{
@@ -677,2123 +979,3985 @@ static int __init setup_transparent_hugepage(char *str)
}
out:
if (!ret)
- printk(KERN_WARNING
- "transparent_hugepage= cannot parse, ignored\n");
+ pr_warn("transparent_hugepage= cannot parse, ignored\n");
return ret;
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
+static char str_dup[PAGE_SIZE] __initdata;
+static int __init setup_thp_anon(char *str)
+{
+ char *token, *range, *policy, *subtoken;
+ unsigned long always, inherit, madvise;
+ char *start_size, *end_size;
+ int start, end, nr;
+ char *p;
+
+ if (!str || strlen(str) + 1 > PAGE_SIZE)
+ goto err;
+ strscpy(str_dup, str);
+
+ always = huge_anon_orders_always;
+ madvise = huge_anon_orders_madvise;
+ inherit = huge_anon_orders_inherit;
+ p = str_dup;
+ while ((token = strsep(&p, ";")) != NULL) {
+ range = strsep(&token, ":");
+ policy = token;
+
+ if (!policy)
+ goto err;
+
+ while ((subtoken = strsep(&range, ",")) != NULL) {
+ if (strchr(subtoken, '-')) {
+ start_size = strsep(&subtoken, "-");
+ end_size = subtoken;
+
+ start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON);
+ end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON);
+ } else {
+ start_size = end_size = subtoken;
+ start = end = get_order_from_str(subtoken,
+ THP_ORDERS_ALL_ANON);
+ }
+
+ if (start == -EINVAL) {
+ pr_err("invalid size %s in thp_anon boot parameter\n", start_size);
+ goto err;
+ }
+
+ if (end == -EINVAL) {
+ pr_err("invalid size %s in thp_anon boot parameter\n", end_size);
+ goto err;
+ }
+
+ if (start < 0 || end < 0 || start > end)
+ goto err;
+
+ nr = end - start + 1;
+ if (!strcmp(policy, "always")) {
+ bitmap_set(&always, start, nr);
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ } else if (!strcmp(policy, "madvise")) {
+ bitmap_set(&madvise, start, nr);
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else if (!strcmp(policy, "inherit")) {
+ bitmap_set(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else if (!strcmp(policy, "never")) {
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else {
+ pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
+ goto err;
+ }
+ }
+ }
+
+ huge_anon_orders_always = always;
+ huge_anon_orders_madvise = madvise;
+ huge_anon_orders_inherit = inherit;
+ anon_orders_configured = true;
+ return 1;
+
+err:
+ pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
+ return 0;
+}
+__setup("thp_anon=", setup_thp_anon);
+
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pmd = pmd_mkwrite(pmd);
+ pmd = pmd_mkwrite(pmd, vma);
return pmd;
}
-static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+static struct deferred_split *split_queue_node(int nid)
{
- pmd_t entry;
- entry = mk_pmd(page, vma->vm_page_prot);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- entry = pmd_mkhuge(entry);
- return entry;
+ struct pglist_data *pgdata = NODE_DATA(nid);
+
+ return &pgdata->deferred_split_queue;
}
-static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long haddr, pmd_t *pmd,
- struct page *page)
+#ifdef CONFIG_MEMCG
+static inline
+struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+ struct deferred_split *queue)
{
- pgtable_t pgtable;
+ if (mem_cgroup_disabled())
+ return NULL;
+ if (split_queue_node(folio_nid(folio)) == queue)
+ return NULL;
+ return container_of(queue, struct mem_cgroup, deferred_split_queue);
+}
- VM_BUG_ON(!PageCompound(page));
- pgtable = pte_alloc_one(mm, haddr);
- if (unlikely(!pgtable))
- return VM_FAULT_OOM;
+static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
+{
+ return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
+}
+#else
+static inline
+struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+ struct deferred_split *queue)
+{
+ return NULL;
+}
+
+static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
+{
+ return split_queue_node(nid);
+}
+#endif
+
+static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
+{
+ struct deferred_split *queue;
- clear_huge_page(page, haddr, HPAGE_PMD_NR);
+retry:
+ queue = memcg_split_queue(nid, memcg);
+ spin_lock(&queue->split_queue_lock);
/*
- * The memory barrier inside __SetPageUptodate makes sure that
- * clear_huge_page writes become visible before the set_pmd_at()
+ * There is a period between setting memcg to dying and reparenting
+ * deferred split queue, and during this period the THPs in the deferred
+ * split queue will be hidden from the shrinker side.
+ */
+ if (unlikely(memcg_is_dying(memcg))) {
+ spin_unlock(&queue->split_queue_lock);
+ memcg = parent_mem_cgroup(memcg);
+ goto retry;
+ }
+
+ return queue;
+}
+
+static struct deferred_split *
+split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
+{
+ struct deferred_split *queue;
+
+retry:
+ queue = memcg_split_queue(nid, memcg);
+ spin_lock_irqsave(&queue->split_queue_lock, *flags);
+ if (unlikely(memcg_is_dying(memcg))) {
+ spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
+ memcg = parent_mem_cgroup(memcg);
+ goto retry;
+ }
+
+ return queue;
+}
+
+static struct deferred_split *folio_split_queue_lock(struct folio *folio)
+{
+ return split_queue_lock(folio_nid(folio), folio_memcg(folio));
+}
+
+static struct deferred_split *
+folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
+{
+ return split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
+}
+
+static inline void split_queue_unlock(struct deferred_split *queue)
+{
+ spin_unlock(&queue->split_queue_lock);
+}
+
+static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&queue->split_queue_lock, flags);
+}
+
+static inline bool is_transparent_hugepage(const struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return false;
+
+ return is_huge_zero_folio(folio) ||
+ folio_test_large_rmappable(folio);
+}
+
+static unsigned long __thp_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len,
+ loff_t off, unsigned long flags, unsigned long size,
+ vm_flags_t vm_flags)
+{
+ loff_t off_end = off + len;
+ loff_t off_align = round_up(off, size);
+ unsigned long len_pad, ret, off_sub;
+
+ if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
+ return 0;
+
+ if (off_end <= off_align || (off_end - off_align) < size)
+ return 0;
+
+ len_pad = len + size;
+ if (len_pad < len || (off + len_pad) < off)
+ return 0;
+
+ ret = mm_get_unmapped_area_vmflags(filp, addr, len_pad,
+ off >> PAGE_SHIFT, flags, vm_flags);
+
+ /*
+ * The failure might be due to length padding. The caller will retry
+ * without the padding.
+ */
+ if (IS_ERR_VALUE(ret))
+ return 0;
+
+ /*
+ * Do not try to align to THP boundary if allocation at the address
+ * hint succeeds.
+ */
+ if (ret == addr)
+ return addr;
+
+ off_sub = (off - ret) & (size - 1);
+
+ if (mm_flags_test(MMF_TOPDOWN, current->mm) && !off_sub)
+ return ret + size;
+
+ ret += off_sub;
+ return ret;
+}
+
+unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
+{
+ unsigned long ret;
+ loff_t off = (loff_t)pgoff << PAGE_SHIFT;
+
+ ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
+ if (ret)
+ return ret;
+
+ return mm_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags,
+ vm_flags);
+}
+
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
+}
+EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
+
+static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ gfp_t gfp = vma_thp_gfp_mask(vma);
+ const int order = HPAGE_PMD_ORDER;
+ struct folio *folio;
+
+ folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
+
+ if (unlikely(!folio)) {
+ count_vm_event(THP_FAULT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
+ return NULL;
+ }
+
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
+ folio_put(folio);
+ count_vm_event(THP_FAULT_FALLBACK);
+ count_vm_event(THP_FAULT_FALLBACK_CHARGE);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+ return NULL;
+ }
+ folio_throttle_swaprate(folio, gfp);
+
+ /*
+ * When a folio is not zeroed during allocation (__GFP_ZERO not used)
+ * or user folios require special handling, folio_zero_user() is used to
+ * make sure that the page corresponding to the faulting address will be
+ * hot in the cache after zeroing.
+ */
+ if (user_alloc_needs_zeroing())
+ folio_zero_user(folio, addr);
+ /*
+ * The memory barrier inside __folio_mark_uptodate makes sure that
+ * folio_zero_user writes become visible before the set_pmd_at()
* write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
+ return folio;
+}
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_none(*pmd))) {
- spin_unlock(&mm->page_table_lock);
- mem_cgroup_uncharge_page(page);
- put_page(page);
- pte_free(mm, pgtable);
+void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
+ struct vm_area_struct *vma, unsigned long haddr)
+{
+ pmd_t entry;
+
+ entry = folio_mk_pmd(folio, vma->vm_page_prot);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
+ folio_add_lru_vma(folio, vma);
+ set_pmd_at(vma->vm_mm, haddr, pmd, entry);
+ update_mmu_cache_pmd(vma, haddr, pmd);
+ deferred_split_folio(folio, false);
+}
+
+static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd,
+ struct vm_area_struct *vma, unsigned long haddr)
+{
+ map_anon_folio_pmd_nopf(folio, pmd, vma, haddr);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ count_vm_event(THP_FAULT_ALLOC);
+ count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
+ count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
+}
+
+static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
+{
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
+ pgtable_t pgtable;
+ vm_fault_t ret = 0;
+
+ folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
+ if (unlikely(!folio))
+ return VM_FAULT_FALLBACK;
+
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (unlikely(!pgtable)) {
+ ret = VM_FAULT_OOM;
+ goto release;
+ }
+
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_none(*vmf->pmd))) {
+ goto unlock_release;
} else {
- pmd_t entry;
- entry = mk_huge_pmd(page, vma);
- page_add_new_anon_rmap(page, vma, haddr);
- pgtable_trans_huge_deposit(mm, pmd, pgtable);
- set_pmd_at(mm, haddr, pmd, entry);
- add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
- mm->nr_ptes++;
- spin_unlock(&mm->page_table_lock);
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret)
+ goto unlock_release;
+
+ /* Deliver the page fault to userland */
+ if (userfaultfd_missing(vma)) {
+ spin_unlock(vmf->ptl);
+ folio_put(folio);
+ pte_free(vma->vm_mm, pgtable);
+ ret = handle_userfault(vmf, VM_UFFD_MISSING);
+ VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+ return ret;
+ }
+ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+ map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
+ mm_inc_nr_ptes(vma->vm_mm);
+ spin_unlock(vmf->ptl);
}
return 0;
-}
+unlock_release:
+ spin_unlock(vmf->ptl);
+release:
+ if (pgtable)
+ pte_free(vma->vm_mm, pgtable);
+ folio_put(folio);
+ return ret;
-static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
-{
- return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
}
-static inline struct page *alloc_hugepage_vma(int defrag,
- struct vm_area_struct *vma,
- unsigned long haddr, int nd,
- gfp_t extra_gfp)
+vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
{
- return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
- HPAGE_PMD_ORDER, vma, haddr, nd);
+ struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = 0;
+ spinlock_t *ptl;
+ softleaf_t entry;
+ struct page *page;
+ struct folio *folio;
+
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
+ ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ entry = softleaf_from_pmd(vmf->orig_pmd);
+ page = softleaf_to_page(entry);
+ folio = page_folio(page);
+ vmf->page = page;
+ vmf->pte = NULL;
+ if (folio_trylock(folio)) {
+ folio_get(folio);
+ spin_unlock(ptl);
+ ret = page_pgmap(page)->ops->migrate_to_ram(vmf);
+ folio_unlock(folio);
+ folio_put(folio);
+ } else {
+ spin_unlock(ptl);
+ }
+
+ return ret;
}
-#ifndef CONFIG_NUMA
-static inline struct page *alloc_hugepage(int defrag)
+/*
+ * always: directly stall for all thp allocations
+ * defer: wake kswapd and fail if not immediately available
+ * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
+ * fail if not immediately available
+ * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
+ * available
+ * never: never stall for any thp allocation
+ */
+gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
{
- return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
- HPAGE_PMD_ORDER);
+ const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
+
+ /* Always do synchronous compaction */
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
+ return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
+
+ /* Kick kcompactd and fail quickly */
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
+ return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+
+ /* Synchronous compaction if madvised, otherwise kick kcompactd */
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
+ return GFP_TRANSHUGE_LIGHT |
+ (vma_madvised ? __GFP_DIRECT_RECLAIM :
+ __GFP_KSWAPD_RECLAIM);
+
+ /* Only do synchronous compaction if madvised */
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
+ return GFP_TRANSHUGE_LIGHT |
+ (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
+
+ return GFP_TRANSHUGE_LIGHT;
}
-#endif
-static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+/* Caller must hold page table lock. */
+static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
- struct page *zero_page)
+ struct folio *zero_folio)
{
pmd_t entry;
- if (!pmd_none(*pmd))
- return false;
- entry = mk_pmd(zero_page, vma->vm_page_prot);
- entry = pmd_wrprotect(entry);
- entry = pmd_mkhuge(entry);
+ entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
+ entry = pmd_mkspecial(entry);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
- mm->nr_ptes++;
- return true;
+ mm_inc_nr_ptes(mm);
}
-int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
- unsigned int flags)
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
{
- struct page *page;
- unsigned long haddr = address & HPAGE_PMD_MASK;
- pte_t *pte;
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ vm_fault_t ret;
- if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
- if (unlikely(anon_vma_prepare(vma)))
- return VM_FAULT_OOM;
- if (unlikely(khugepaged_enter(vma)))
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
+ return VM_FAULT_FALLBACK;
+ ret = vmf_anon_prepare(vmf);
+ if (ret)
+ return ret;
+ khugepaged_enter_vma(vma, vma->vm_flags);
+
+ if (!(vmf->flags & FAULT_FLAG_WRITE) &&
+ !mm_forbids_zeropage(vma->vm_mm) &&
+ transparent_hugepage_use_zero_page()) {
+ pgtable_t pgtable;
+ struct folio *zero_folio;
+ vm_fault_t ret;
+
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (unlikely(!pgtable))
return VM_FAULT_OOM;
- if (!(flags & FAULT_FLAG_WRITE) &&
- transparent_hugepage_use_zero_page()) {
- pgtable_t pgtable;
- struct page *zero_page;
- bool set;
- pgtable = pte_alloc_one(mm, haddr);
- if (unlikely(!pgtable))
- return VM_FAULT_OOM;
- zero_page = get_huge_zero_page();
- if (unlikely(!zero_page)) {
- pte_free(mm, pgtable);
- count_vm_event(THP_FAULT_FALLBACK);
- goto out;
- }
- spin_lock(&mm->page_table_lock);
- set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
- zero_page);
- spin_unlock(&mm->page_table_lock);
- if (!set) {
- pte_free(mm, pgtable);
- put_huge_zero_page();
- }
- return 0;
- }
- page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
- vma, haddr, numa_node_id(), 0);
- if (unlikely(!page)) {
+ zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
+ if (unlikely(!zero_folio)) {
+ pte_free(vma->vm_mm, pgtable);
count_vm_event(THP_FAULT_FALLBACK);
- goto out;
+ return VM_FAULT_FALLBACK;
}
- count_vm_event(THP_FAULT_ALLOC);
- if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
- put_page(page);
- goto out;
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ ret = 0;
+ if (pmd_none(*vmf->pmd)) {
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret) {
+ spin_unlock(vmf->ptl);
+ pte_free(vma->vm_mm, pgtable);
+ } else if (userfaultfd_missing(vma)) {
+ spin_unlock(vmf->ptl);
+ pte_free(vma->vm_mm, pgtable);
+ ret = handle_userfault(vmf, VM_UFFD_MISSING);
+ VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+ } else {
+ set_huge_zero_folio(pgtable, vma->vm_mm, vma,
+ haddr, vmf->pmd, zero_folio);
+ update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ spin_unlock(vmf->ptl);
+ }
+ } else {
+ spin_unlock(vmf->ptl);
+ pte_free(vma->vm_mm, pgtable);
}
- if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
- page))) {
- mem_cgroup_uncharge_page(page);
- put_page(page);
- goto out;
+ return ret;
+ }
+
+ return __do_huge_pmd_anonymous_page(vmf);
+}
+
+struct folio_or_pfn {
+ union {
+ struct folio *folio;
+ unsigned long pfn;
+ };
+ bool is_folio;
+};
+
+static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
+ bool write)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgtable_t pgtable = NULL;
+ spinlock_t *ptl;
+ pmd_t entry;
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ if (arch_needs_pgtable_deposit()) {
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (!pgtable)
+ return VM_FAULT_OOM;
+ }
+
+ ptl = pmd_lock(mm, pmd);
+ if (!pmd_none(*pmd)) {
+ const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
+ fop.pfn;
+
+ if (write) {
+ if (pmd_pfn(*pmd) != pfn) {
+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+ goto out_unlock;
+ }
+ entry = pmd_mkyoung(*pmd);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+ update_mmu_cache_pmd(vma, addr, pmd);
}
+ goto out_unlock;
+ }
- return 0;
+ if (fop.is_folio) {
+ entry = folio_mk_pmd(fop.folio, vma->vm_page_prot);
+
+ if (is_huge_zero_folio(fop.folio)) {
+ entry = pmd_mkspecial(entry);
+ } else {
+ folio_get(fop.folio);
+ folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
+ add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
+ }
+ } else {
+ entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
+ entry = pmd_mkspecial(entry);
}
-out:
+ if (write) {
+ entry = pmd_mkyoung(pmd_mkdirty(entry));
+ entry = maybe_pmd_mkwrite(entry, vma);
+ }
+
+ if (pgtable) {
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ mm_inc_nr_ptes(mm);
+ pgtable = NULL;
+ }
+
+ set_pmd_at(mm, addr, pmd, entry);
+ update_mmu_cache_pmd(vma, addr, pmd);
+
+out_unlock:
+ spin_unlock(ptl);
+ if (pgtable)
+ pte_free(mm, pgtable);
+ return VM_FAULT_NOPAGE;
+}
+
+/**
+ * vmf_insert_pfn_pmd - insert a pmd size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @write: whether it's a write fault
+ *
+ * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
+ *
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+ bool write)
+{
+ unsigned long addr = vmf->address & PMD_MASK;
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t pgprot = vma->vm_page_prot;
+ struct folio_or_pfn fop = {
+ .pfn = pfn,
+ };
+
/*
- * Use __pte_alloc instead of pte_alloc_map, because we can't
- * run pte_offset_map on the pmd, if an huge pmd could
- * materialize from under us from a different thread.
+ * If we had pmd_special, we could avoid all these restrictions,
+ * but we need to be consistent with PTEs and architectures that
+ * can't support a 'special' bit.
*/
- if (unlikely(pmd_none(*pmd)) &&
- unlikely(__pte_alloc(mm, vma, pmd, address)))
- return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd)))
- return 0;
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+ (VM_PFNMAP|VM_MIXEDMAP));
+ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+
+ pfnmap_setup_cachemode_pfn(pfn, &pgprot);
+
+ return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
+}
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
+
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address & PMD_MASK;
+ struct folio_or_pfn fop = {
+ .folio = folio,
+ .is_folio = true,
+ };
+
+ if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
+ return VM_FAULT_SIGBUS;
+
+ return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
+}
+EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_WRITE))
+ pud = pud_mkwrite(pud);
+ return pud;
+}
+
+static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
+ pud_t entry;
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ ptl = pud_lock(mm, pud);
+ if (!pud_none(*pud)) {
+ const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
+ fop.pfn;
+
+ if (write) {
+ if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
+ goto out_unlock;
+ entry = pud_mkyoung(*pud);
+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+ update_mmu_cache_pud(vma, addr, pud);
+ }
+ goto out_unlock;
+ }
+
+ if (fop.is_folio) {
+ entry = folio_mk_pud(fop.folio, vma->vm_page_prot);
+
+ folio_get(fop.folio);
+ folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
+ add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
+ } else {
+ entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
+ entry = pud_mkspecial(entry);
+ }
+ if (write) {
+ entry = pud_mkyoung(pud_mkdirty(entry));
+ entry = maybe_pud_mkwrite(entry, vma);
+ }
+ set_pud_at(mm, addr, pud, entry);
+ update_mmu_cache_pud(vma, addr, pud);
+out_unlock:
+ spin_unlock(ptl);
+ return VM_FAULT_NOPAGE;
+}
+
+/**
+ * vmf_insert_pfn_pud - insert a pud size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @write: whether it's a write fault
+ *
+ * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
+ *
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+ bool write)
+{
+ unsigned long addr = vmf->address & PUD_MASK;
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t pgprot = vma->vm_page_prot;
+ struct folio_or_pfn fop = {
+ .pfn = pfn,
+ };
+
/*
- * A regular pmd is established and it can't morph into a huge pmd
- * from under us anymore at this point because we hold the mmap_sem
- * read mode and khugepaged takes it in write mode. So now it's
- * safe to run pte_offset_map().
+ * If we had pud_special, we could avoid all these restrictions,
+ * but we need to be consistent with PTEs and architectures that
+ * can't support a 'special' bit.
*/
- pte = pte_offset_map(pmd, address);
- return handle_pte_fault(mm, vma, address, pte, pmd, flags);
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+ (VM_PFNMAP|VM_MIXEDMAP));
+ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+
+ pfnmap_setup_cachemode_pfn(pfn, &pgprot);
+
+ return insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
+}
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
+
+/**
+ * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry
+ * @vmf: Structure describing the fault
+ * @folio: folio to insert
+ * @write: whether it's a write fault
+ *
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address & PUD_MASK;
+ struct folio_or_pfn fop = {
+ .folio = folio,
+ .is_folio = true,
+ };
+
+ if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
+ return VM_FAULT_SIGBUS;
+
+ return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
+}
+EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+/**
+ * touch_pmd - Mark page table pmd entry as accessed and dirty (for write)
+ * @vma: The VMA covering @addr
+ * @addr: The virtual address
+ * @pmd: pmd pointer into the page table mapping @addr
+ * @write: Whether it's a write access
+ *
+ * Return: whether the pmd entry is changed
+ */
+bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, bool write)
+{
+ pmd_t entry;
+
+ entry = pmd_mkyoung(*pmd);
+ if (write)
+ entry = pmd_mkdirty(entry);
+ if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
+ pmd, entry, write)) {
+ update_mmu_cache_pmd(vma, addr, pmd);
+ return true;
+ }
+
+ return false;
+}
+
+static void copy_huge_non_present_pmd(
+ struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+ pmd_t pmd, pgtable_t pgtable)
+{
+ softleaf_t entry = softleaf_from_pmd(pmd);
+ struct folio *src_folio;
+
+ VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd));
+
+ if (softleaf_is_migration_write(entry) ||
+ softleaf_is_migration_read_exclusive(entry)) {
+ entry = make_readable_migration_entry(swp_offset(entry));
+ pmd = swp_entry_to_pmd(entry);
+ if (pmd_swp_soft_dirty(*src_pmd))
+ pmd = pmd_swp_mksoft_dirty(pmd);
+ if (pmd_swp_uffd_wp(*src_pmd))
+ pmd = pmd_swp_mkuffd_wp(pmd);
+ set_pmd_at(src_mm, addr, src_pmd, pmd);
+ } else if (softleaf_is_device_private(entry)) {
+ /*
+ * For device private entries, since there are no
+ * read exclusive entries, writable = !readable
+ */
+ if (softleaf_is_device_private_write(entry)) {
+ entry = make_readable_device_private_entry(swp_offset(entry));
+ pmd = swp_entry_to_pmd(entry);
+
+ if (pmd_swp_soft_dirty(*src_pmd))
+ pmd = pmd_swp_mksoft_dirty(pmd);
+ if (pmd_swp_uffd_wp(*src_pmd))
+ pmd = pmd_swp_mkuffd_wp(pmd);
+ set_pmd_at(src_mm, addr, src_pmd, pmd);
+ }
+
+ src_folio = softleaf_to_folio(entry);
+ VM_WARN_ON(!folio_test_large(src_folio));
+
+ folio_get(src_folio);
+ /*
+ * folio_try_dup_anon_rmap_pmd does not fail for
+ * device private entries.
+ */
+ folio_try_dup_anon_rmap_pmd(src_folio, &src_folio->page,
+ dst_vma, src_vma);
+ }
+
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ mm_inc_nr_ptes(dst_mm);
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+ if (!userfaultfd_wp(dst_vma))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
+ set_pmd_at(dst_mm, addr, dst_pmd, pmd);
}
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma)
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
{
+ spinlock_t *dst_ptl, *src_ptl;
struct page *src_page;
+ struct folio *src_folio;
pmd_t pmd;
- pgtable_t pgtable;
- int ret;
+ pgtable_t pgtable = NULL;
+ int ret = -ENOMEM;
+
+ pmd = pmdp_get_lockless(src_pmd);
+ if (unlikely(pmd_present(pmd) && pmd_special(pmd) &&
+ !is_huge_zero_pmd(pmd))) {
+ dst_ptl = pmd_lock(dst_mm, dst_pmd);
+ src_ptl = pmd_lockptr(src_mm, src_pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ /*
+ * No need to recheck the pmd, it can't change with write
+ * mmap lock held here.
+ *
+ * Meanwhile, making sure it's not a CoW VMA with writable
+ * mapping, otherwise it means either the anon page wrongly
+ * applied special bit, or we made the PRIVATE mapping be
+ * able to wrongly write to the backend MMIO.
+ */
+ VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
+ goto set_pmd;
+ }
- ret = -ENOMEM;
- pgtable = pte_alloc_one(dst_mm, addr);
+ /* Skip if can be re-fill on fault */
+ if (!vma_is_anonymous(dst_vma))
+ return 0;
+
+ pgtable = pte_alloc_one(dst_mm);
if (unlikely(!pgtable))
goto out;
- spin_lock(&dst_mm->page_table_lock);
- spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
+ dst_ptl = pmd_lock(dst_mm, dst_pmd);
+ src_ptl = pmd_lockptr(src_mm, src_pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
ret = -EAGAIN;
pmd = *src_pmd;
+
+ if (unlikely(thp_migration_supported() &&
+ pmd_is_valid_softleaf(pmd))) {
+ copy_huge_non_present_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr,
+ dst_vma, src_vma, pmd, pgtable);
+ ret = 0;
+ goto out_unlock;
+ }
+
if (unlikely(!pmd_trans_huge(pmd))) {
pte_free(dst_mm, pgtable);
goto out_unlock;
}
/*
- * mm->page_table_lock is enough to be sure that huge zero pmd is not
+ * When page table lock is held, the huge zero pmd should not be
* under splitting since we don't split the page itself, only pmd to
* a page table.
*/
if (is_huge_zero_pmd(pmd)) {
- struct page *zero_page;
- bool set;
/*
- * get_huge_zero_page() will never allocate a new page here,
- * since we already have a zero page to copy. It just takes a
- * reference.
+ * mm_get_huge_zero_folio() will never allocate a new
+ * folio here, since we already have a zero page to
+ * copy. It just takes a reference.
*/
- zero_page = get_huge_zero_page();
- set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
- zero_page);
- BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
- ret = 0;
- goto out_unlock;
+ mm_get_huge_zero_folio(dst_mm);
+ goto out_zero_page;
}
- if (unlikely(pmd_trans_splitting(pmd))) {
- /* split huge page running from under us */
- spin_unlock(&src_mm->page_table_lock);
- spin_unlock(&dst_mm->page_table_lock);
- pte_free(dst_mm, pgtable);
- wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
- goto out;
- }
src_page = pmd_page(pmd);
- VM_BUG_ON(!PageHead(src_page));
- get_page(src_page);
- page_dup_rmap(src_page);
- add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+ src_folio = page_folio(src_page);
- pmdp_set_wrprotect(src_mm, addr, src_pmd);
- pmd = pmd_mkold(pmd_wrprotect(pmd));
+ folio_get(src_folio);
+ if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
+ /* Page maybe pinned: split and retry the fault on PTEs. */
+ folio_put(src_folio);
+ pte_free(dst_mm, pgtable);
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
+ __split_huge_pmd(src_vma, src_pmd, addr, false);
+ return -EAGAIN;
+ }
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+out_zero_page:
+ mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+ pmdp_set_wrprotect(src_mm, addr, src_pmd);
+ if (!userfaultfd_wp(dst_vma))
+ pmd = pmd_clear_uffd_wp(pmd);
+ pmd = pmd_wrprotect(pmd);
+set_pmd:
+ pmd = pmd_mkold(pmd);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
- dst_mm->nr_ptes++;
ret = 0;
out_unlock:
- spin_unlock(&src_mm->page_table_lock);
- spin_unlock(&dst_mm->page_table_lock);
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
out:
return ret;
}
-void huge_pmd_set_accessed(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmd, pmd_t orig_pmd,
- int dirty)
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+void touch_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, bool write)
{
- pmd_t entry;
- unsigned long haddr;
-
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
- goto unlock;
-
- entry = pmd_mkyoung(orig_pmd);
- haddr = address & HPAGE_PMD_MASK;
- if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
- update_mmu_cache_pmd(vma, address, pmd);
-
-unlock:
- spin_unlock(&mm->page_table_lock);
+ pud_t _pud;
+
+ _pud = pud_mkyoung(*pud);
+ if (write)
+ _pud = pud_mkdirty(_pud);
+ if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
+ pud, _pud, write))
+ update_mmu_cache_pud(vma, addr, pud);
}
-static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma)
{
- pgtable_t pgtable;
- pmd_t _pmd;
- struct page *page;
- int i, ret = 0;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ spinlock_t *dst_ptl, *src_ptl;
+ pud_t pud;
+ int ret;
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!page) {
- ret |= VM_FAULT_OOM;
- goto out;
- }
+ dst_ptl = pud_lock(dst_mm, dst_pud);
+ src_ptl = pud_lockptr(src_mm, src_pud);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
- put_page(page);
- ret |= VM_FAULT_OOM;
- goto out;
+ ret = -EAGAIN;
+ pud = *src_pud;
+ if (unlikely(!pud_trans_huge(pud)))
+ goto out_unlock;
+
+ /*
+ * TODO: once we support anonymous pages, use
+ * folio_try_dup_anon_rmap_*() and split if duplicating fails.
+ */
+ if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
+ pudp_set_wrprotect(src_mm, addr, src_pud);
+ pud = pud_wrprotect(pud);
}
+ pud = pud_mkold(pud);
+ set_pud_at(dst_mm, addr, dst_pud, pud);
- clear_user_highpage(page, address);
- __SetPageUptodate(page);
+ ret = 0;
+out_unlock:
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
+ return ret;
+}
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
+{
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
- goto out_free_page;
+ vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
+ if (unlikely(!pud_same(*vmf->pud, orig_pud)))
+ goto unlock;
- pmdp_clear_flush(vma, haddr, pmd);
- /* leave pmd empty until pte is filled */
+ touch_pud(vmf->vma, vmf->address, vmf->pud, write);
+unlock:
+ spin_unlock(vmf->ptl);
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
- pgtable = pgtable_trans_huge_withdraw(mm, pmd);
- pmd_populate(mm, &_pmd, pgtable);
+bool huge_pmd_set_accessed(struct vm_fault *vmf)
+{
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
- pte_t *pte, entry;
- if (haddr == (address & PAGE_MASK)) {
- entry = mk_pte(page, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- page_add_new_anon_rmap(page, vma, haddr);
- } else {
- entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
- entry = pte_mkspecial(entry);
- }
- pte = pte_offset_map(&_pmd, haddr);
- VM_BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
- pte_unmap(pte);
- }
- smp_wmb(); /* make pte visible before pmd */
- pmd_populate(mm, pmd, pgtable);
- spin_unlock(&mm->page_table_lock);
- put_huge_zero_page();
- inc_mm_counter(mm, MM_ANONPAGES);
+ if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
+ return false;
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ return touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
+}
- ret |= VM_FAULT_WRITE;
-out:
+static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
+{
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ struct vm_area_struct *vma = vmf->vma;
+ struct mmu_notifier_range range;
+ struct folio *folio;
+ vm_fault_t ret = 0;
+
+ folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
+ if (unlikely(!folio))
+ return VM_FAULT_FALLBACK;
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
+ haddr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
+ goto release;
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret)
+ goto release;
+ (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
+ map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
+ goto unlock;
+release:
+ folio_put(folio);
+unlock:
+ spin_unlock(vmf->ptl);
+ mmu_notifier_invalidate_range_end(&range);
return ret;
-out_free_page:
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- mem_cgroup_uncharge_page(page);
- put_page(page);
- goto out;
}
-static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmd, pmd_t orig_pmd,
- struct page *page,
- unsigned long haddr)
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
{
- pgtable_t pgtable;
- pmd_t _pmd;
- int ret = 0, i;
- struct page **pages;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
-
- pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
- GFP_KERNEL);
- if (unlikely(!pages)) {
- ret |= VM_FAULT_OOM;
- goto out;
+ const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
+ struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
+ struct page *page;
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ pmd_t orig_pmd = vmf->orig_pmd;
+
+ vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
+ VM_BUG_ON_VMA(!vma->anon_vma, vma);
+
+ if (is_huge_zero_pmd(orig_pmd)) {
+ vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
+
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
+
+ /* Fallback to splitting PMD if THP cannot be allocated */
+ goto fallback;
}
- for (i = 0; i < HPAGE_PMD_NR; i++) {
- pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
- __GFP_OTHER_NODE,
- vma, address, page_to_nid(page));
- if (unlikely(!pages[i] ||
- mem_cgroup_newpage_charge(pages[i], mm,
- GFP_KERNEL))) {
- if (pages[i])
- put_page(pages[i]);
- mem_cgroup_uncharge_start();
- while (--i >= 0) {
- mem_cgroup_uncharge_page(pages[i]);
- put_page(pages[i]);
- }
- mem_cgroup_uncharge_end();
- kfree(pages);
- ret |= VM_FAULT_OOM;
- goto out;
+ spin_lock(vmf->ptl);
+
+ if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
+ spin_unlock(vmf->ptl);
+ return 0;
+ }
+
+ page = pmd_page(orig_pmd);
+ folio = page_folio(page);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+
+ /* Early check when only holding the PT lock. */
+ if (PageAnonExclusive(page))
+ goto reuse;
+
+ if (!folio_trylock(folio)) {
+ folio_get(folio);
+ spin_unlock(vmf->ptl);
+ folio_lock(folio);
+ spin_lock(vmf->ptl);
+ if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
+ spin_unlock(vmf->ptl);
+ folio_unlock(folio);
+ folio_put(folio);
+ return 0;
}
+ folio_put(folio);
}
- for (i = 0; i < HPAGE_PMD_NR; i++) {
- copy_user_highpage(pages[i], page + i,
- haddr + PAGE_SIZE * i, vma);
- __SetPageUptodate(pages[i]);
- cond_resched();
+ /* Recheck after temporarily dropping the PT lock. */
+ if (PageAnonExclusive(page)) {
+ folio_unlock(folio);
+ goto reuse;
}
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ /*
+ * See do_wp_page(): we can only reuse the folio exclusively if
+ * there are no additional references. Note that we always drain
+ * the LRU cache immediately after adding a THP.
+ */
+ if (folio_ref_count(folio) >
+ 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
+ goto unlock_fallback;
+ if (folio_test_swapcache(folio))
+ folio_free_swap(folio);
+ if (folio_ref_count(folio) == 1) {
+ pmd_t entry;
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
- goto out_free_pages;
- VM_BUG_ON(!PageHead(page));
+ folio_move_anon_rmap(folio, vma);
+ SetPageAnonExclusive(page);
+ folio_unlock(folio);
+reuse:
+ if (unlikely(unshare)) {
+ spin_unlock(vmf->ptl);
+ return 0;
+ }
+ entry = pmd_mkyoung(orig_pmd);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
+ update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ spin_unlock(vmf->ptl);
+ return 0;
+ }
+
+unlock_fallback:
+ folio_unlock(folio);
+ spin_unlock(vmf->ptl);
+fallback:
+ __split_huge_pmd(vma, vmf->pmd, vmf->address, false);
+ return VM_FAULT_FALLBACK;
+}
- pmdp_clear_flush(vma, haddr, pmd);
- /* leave pmd empty until pte is filled */
+static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd)
+{
+ struct page *page;
- pgtable = pgtable_trans_huge_withdraw(mm, pmd);
- pmd_populate(mm, &_pmd, pgtable);
+ if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
+ return false;
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
- pte_t *pte, entry;
- entry = mk_pte(pages[i], vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- page_add_new_anon_rmap(pages[i], vma, haddr);
- pte = pte_offset_map(&_pmd, haddr);
- VM_BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
- pte_unmap(pte);
+ /* Don't touch entries that are not even readable (NUMA hinting). */
+ if (pmd_protnone(pmd))
+ return false;
+
+ /* Do we need write faults for softdirty tracking? */
+ if (pmd_needs_soft_dirty_wp(vma, pmd))
+ return false;
+
+ /* Do we need write faults for uffd-wp tracking? */
+ if (userfaultfd_huge_pmd_wp(vma, pmd))
+ return false;
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ /* See can_change_pte_writable(). */
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ return page && PageAnon(page) && PageAnonExclusive(page);
}
- kfree(pages);
- smp_wmb(); /* make pte visible before pmd */
- pmd_populate(mm, pmd, pgtable);
- page_remove_rmap(page);
- spin_unlock(&mm->page_table_lock);
+ /* See can_change_pte_writable(). */
+ return pmd_dirty(pmd);
+}
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+/* NUMA hinting page fault entry point for trans huge pmds */
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ int nid = NUMA_NO_NODE;
+ int target_nid, last_cpupid;
+ pmd_t pmd, old_pmd;
+ bool writable = false;
+ int flags = 0;
+
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ old_pmd = pmdp_get(vmf->pmd);
+
+ if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
+ spin_unlock(vmf->ptl);
+ return 0;
+ }
- ret |= VM_FAULT_WRITE;
- put_page(page);
+ pmd = pmd_modify(old_pmd, vma->vm_page_prot);
-out:
- return ret;
+ /*
+ * Detect now whether the PMD could be writable; this information
+ * is only valid while holding the PT lock.
+ */
+ writable = pmd_write(pmd);
+ if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
+ can_change_pmd_writable(vma, vmf->address, pmd))
+ writable = true;
+
+ folio = vm_normal_folio_pmd(vma, haddr, pmd);
+ if (!folio)
+ goto out_map;
+
+ nid = folio_nid(folio);
+
+ target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
+ &last_cpupid);
+ if (target_nid == NUMA_NO_NODE)
+ goto out_map;
+ if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
+ flags |= TNF_MIGRATE_FAIL;
+ goto out_map;
+ }
+ /* The folio is isolated and isolation code holds a folio reference. */
+ spin_unlock(vmf->ptl);
+ writable = false;
+
+ if (!migrate_misplaced_folio(folio, target_nid)) {
+ flags |= TNF_MIGRATED;
+ nid = target_nid;
+ task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+ return 0;
+ }
-out_free_pages:
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- mem_cgroup_uncharge_start();
- for (i = 0; i < HPAGE_PMD_NR; i++) {
- mem_cgroup_uncharge_page(pages[i]);
- put_page(pages[i]);
+ flags |= TNF_MIGRATE_FAIL;
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
+ spin_unlock(vmf->ptl);
+ return 0;
}
- mem_cgroup_uncharge_end();
- kfree(pages);
- goto out;
+out_map:
+ /* Restore the PMD */
+ pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
+ pmd = pmd_mkyoung(pmd);
+ if (writable)
+ pmd = pmd_mkwrite(pmd, vma);
+ set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
+ update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ spin_unlock(vmf->ptl);
+
+ if (nid != NUMA_NO_NODE)
+ task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+ return 0;
}
-int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
+/*
+ * Return true if we do MADV_FREE successfully on entire pmd page.
+ * Otherwise, return false.
+ */
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, unsigned long next)
{
- int ret = 0;
- struct page *page = NULL, *new_page;
- unsigned long haddr;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ spinlock_t *ptl;
+ pmd_t orig_pmd;
+ struct folio *folio;
+ struct mm_struct *mm = tlb->mm;
+ bool ret = false;
+
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
- VM_BUG_ON(!vma->anon_vma);
- haddr = address & HPAGE_PMD_MASK;
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
+ goto out_unlocked;
+
+ orig_pmd = *pmd;
if (is_huge_zero_pmd(orig_pmd))
- goto alloc;
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
- goto out_unlock;
+ goto out;
- page = pmd_page(orig_pmd);
- VM_BUG_ON(!PageCompound(page) || !PageHead(page));
- if (page_mapcount(page) == 1) {
- pmd_t entry;
- entry = pmd_mkyoung(orig_pmd);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
- update_mmu_cache_pmd(vma, address, pmd);
- ret |= VM_FAULT_WRITE;
- goto out_unlock;
+ if (unlikely(!pmd_present(orig_pmd))) {
+ VM_BUG_ON(thp_migration_supported() &&
+ !pmd_is_migration_entry(orig_pmd));
+ goto out;
}
- get_page(page);
- spin_unlock(&mm->page_table_lock);
-alloc:
- if (transparent_hugepage_enabled(vma) &&
- !transparent_hugepage_debug_cow())
- new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
- vma, haddr, numa_node_id(), 0);
- else
- new_page = NULL;
- if (unlikely(!new_page)) {
- count_vm_event(THP_FAULT_FALLBACK);
- if (is_huge_zero_pmd(orig_pmd)) {
- ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
- address, pmd, orig_pmd, haddr);
- } else {
- ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
- pmd, orig_pmd, page, haddr);
- if (ret & VM_FAULT_OOM)
- split_huge_page(page);
- put_page(page);
- }
+ folio = pmd_folio(orig_pmd);
+ /*
+ * If other processes are mapping this folio, we couldn't discard
+ * the folio unless they all do MADV_FREE so let's skip the folio.
+ */
+ if (folio_maybe_mapped_shared(folio))
goto out;
- }
- count_vm_event(THP_FAULT_ALLOC);
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
- put_page(new_page);
- if (page) {
- split_huge_page(page);
- put_page(page);
- }
- ret |= VM_FAULT_OOM;
+ if (!folio_trylock(folio))
goto out;
+
+ /*
+ * If user want to discard part-pages of THP, split it so MADV_FREE
+ * will deactivate only them.
+ */
+ if (next - addr != HPAGE_PMD_SIZE) {
+ folio_get(folio);
+ spin_unlock(ptl);
+ split_folio(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ goto out_unlocked;
}
- if (is_huge_zero_pmd(orig_pmd))
- clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
- else
- copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
- __SetPageUptodate(new_page);
+ if (folio_test_dirty(folio))
+ folio_clear_dirty(folio);
+ folio_unlock(folio);
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
+ pmdp_invalidate(vma, addr, pmd);
+ orig_pmd = pmd_mkold(orig_pmd);
+ orig_pmd = pmd_mkclean(orig_pmd);
- spin_lock(&mm->page_table_lock);
- if (page)
- put_page(page);
- if (unlikely(!pmd_same(*pmd, orig_pmd))) {
- spin_unlock(&mm->page_table_lock);
- mem_cgroup_uncharge_page(new_page);
- put_page(new_page);
- goto out_mn;
- } else {
- pmd_t entry;
- entry = mk_huge_pmd(new_page, vma);
- pmdp_clear_flush(vma, haddr, pmd);
- page_add_new_anon_rmap(new_page, vma, haddr);
- set_pmd_at(mm, haddr, pmd, entry);
- update_mmu_cache_pmd(vma, address, pmd);
- if (is_huge_zero_pmd(orig_pmd)) {
- add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
- put_huge_zero_page();
- } else {
- VM_BUG_ON(!PageHead(page));
- page_remove_rmap(page);
- put_page(page);
- }
- ret |= VM_FAULT_WRITE;
+ set_pmd_at(mm, addr, pmd, orig_pmd);
+ tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
}
- spin_unlock(&mm->page_table_lock);
-out_mn:
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+ folio_mark_lazyfree(folio);
+ ret = true;
out:
- return ret;
-out_unlock:
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
+out_unlocked:
return ret;
}
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags)
+static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
{
- struct mm_struct *mm = vma->vm_mm;
- struct page *page = NULL;
-
- assert_spin_locked(&mm->page_table_lock);
-
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
- goto out;
-
- /* Avoid dumping huge zero page */
- if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
- return ERR_PTR(-EFAULT);
-
- page = pmd_page(*pmd);
- VM_BUG_ON(!PageHead(page));
- if (flags & FOLL_TOUCH) {
- pmd_t _pmd;
- /*
- * We should set the dirty bit only for FOLL_WRITE but
- * for now the dirty bit in the pmd is meaningless.
- * And if the dirty bit will become meaningful and
- * we'll only set it with FOLL_WRITE, an atomic
- * set_bit will be required on the pmd to set the
- * young bit, instead of the current set_pmd_at.
- */
- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
- if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
- pmd, _pmd, 1))
- update_mmu_cache_pmd(vma, addr, pmd);
- }
- if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
- if (page->mapping && trylock_page(page)) {
- lru_add_drain();
- if (page->mapping)
- mlock_vma_page(page);
- unlock_page(page);
- }
- }
- page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
- VM_BUG_ON(!PageCompound(page));
- if (flags & FOLL_GET)
- get_page_foll(page);
+ pgtable_t pgtable;
-out:
- return page;
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ pte_free(mm, pgtable);
+ mm_dec_nr_ptes(mm);
}
-/* NUMA hinting page fault entry point for trans huge pmds */
-int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr)
{
- struct page *page;
- unsigned long haddr = addr & HPAGE_PMD_MASK;
- int target_nid;
- int current_nid = -1;
- bool migrated;
-
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(pmd, *pmdp)))
- goto out_unlock;
+ pmd_t orig_pmd;
+ spinlock_t *ptl;
- page = pmd_page(pmd);
- get_page(page);
- current_nid = page_to_nid(page);
- count_vm_numa_event(NUMA_HINT_FAULTS);
- if (current_nid == numa_node_id())
- count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
- target_nid = mpol_misplaced(page, vma, haddr);
- if (target_nid == -1) {
- put_page(page);
- goto clear_pmdnuma;
- }
+ ptl = __pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
+ return 0;
+ /*
+ * For architectures like ppc64 we look at deposited pgtable
+ * when calling pmdp_huge_get_and_clear. So do the
+ * pgtable_trans_huge_withdraw after finishing pmdp related
+ * operations.
+ */
+ orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
+ tlb->fullmm);
+ arch_check_zapped_pmd(vma, orig_pmd);
+ tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
+ if (arch_needs_pgtable_deposit())
+ zap_deposited_table(tlb->mm, pmd);
+ spin_unlock(ptl);
+ } else if (is_huge_zero_pmd(orig_pmd)) {
+ if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
+ zap_deposited_table(tlb->mm, pmd);
+ spin_unlock(ptl);
+ } else {
+ struct folio *folio = NULL;
+ int flush_needed = 1;
- /* Acquire the page lock to serialise THP migrations */
- spin_unlock(&mm->page_table_lock);
- lock_page(page);
+ if (pmd_present(orig_pmd)) {
+ struct page *page = pmd_page(orig_pmd);
- /* Confirm the PTE did not while locked */
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(pmd, *pmdp))) {
- unlock_page(page);
- put_page(page);
- goto out_unlock;
- }
- spin_unlock(&mm->page_table_lock);
+ folio = page_folio(page);
+ folio_remove_rmap_pmd(folio, page, vma);
+ WARN_ON_ONCE(folio_mapcount(folio) < 0);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ } else if (pmd_is_valid_softleaf(orig_pmd)) {
+ const softleaf_t entry = softleaf_from_pmd(orig_pmd);
- /* Migrate the THP to the requested node */
- migrated = migrate_misplaced_transhuge_page(mm, vma,
- pmdp, pmd, addr, page, target_nid);
- if (!migrated)
- goto check_same;
+ folio = softleaf_to_folio(entry);
+ flush_needed = 0;
- task_numa_fault(target_nid, HPAGE_PMD_NR, true);
- return 0;
+ if (!thp_migration_supported())
+ WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+ }
-check_same:
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(pmd, *pmdp)))
- goto out_unlock;
-clear_pmdnuma:
- pmd = pmd_mknonnuma(pmd);
- set_pmd_at(mm, haddr, pmdp, pmd);
- VM_BUG_ON(pmd_numa(*pmdp));
- update_mmu_cache_pmd(vma, addr, pmdp);
-out_unlock:
- spin_unlock(&mm->page_table_lock);
- if (current_nid != -1)
- task_numa_fault(current_nid, HPAGE_PMD_NR, false);
- return 0;
-}
+ if (folio_test_anon(folio)) {
+ zap_deposited_table(tlb->mm, pmd);
+ add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+ } else {
+ if (arch_needs_pgtable_deposit())
+ zap_deposited_table(tlb->mm, pmd);
+ add_mm_counter(tlb->mm, mm_counter_file(folio),
+ -HPAGE_PMD_NR);
-int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr)
-{
- int ret = 0;
+ /*
+ * Use flush_needed to indicate whether the PMD entry
+ * is present, instead of checking pmd_present() again.
+ */
+ if (flush_needed && pmd_young(orig_pmd) &&
+ likely(vma_has_recency(vma)))
+ folio_mark_accessed(folio);
+ }
- if (__pmd_trans_huge_lock(pmd, vma) == 1) {
- struct page *page;
- pgtable_t pgtable;
- pmd_t orig_pmd;
- /*
- * For architectures like ppc64 we look at deposited pgtable
- * when calling pmdp_get_and_clear. So do the
- * pgtable_trans_huge_withdraw after finishing pmdp related
- * operations.
- */
- orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
- tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
- pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
- if (is_huge_zero_pmd(orig_pmd)) {
- tlb->mm->nr_ptes--;
- spin_unlock(&tlb->mm->page_table_lock);
- put_huge_zero_page();
- } else {
- page = pmd_page(orig_pmd);
- page_remove_rmap(page);
- VM_BUG_ON(page_mapcount(page) < 0);
- add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
- VM_BUG_ON(!PageHead(page));
- tlb->mm->nr_ptes--;
- spin_unlock(&tlb->mm->page_table_lock);
- tlb_remove_page(tlb, page);
+ if (folio_is_device_private(folio)) {
+ folio_remove_rmap_pmd(folio, &folio->page, vma);
+ WARN_ON_ONCE(folio_mapcount(folio) < 0);
+ folio_put(folio);
}
- pte_free(tlb->mm, pgtable);
- ret = 1;
+
+ spin_unlock(ptl);
+ if (flush_needed)
+ tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
}
- return ret;
+ return 1;
}
-int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
+#ifndef pmd_move_must_withdraw
+static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
+ spinlock_t *old_pmd_ptl,
+ struct vm_area_struct *vma)
{
- int ret = 0;
+ /*
+ * With split pmd lock we also need to move preallocated
+ * PTE page table if new_pmd is on different PMD page table.
+ *
+ * We also don't deposit and withdraw tables for file pages.
+ */
+ return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+}
+#endif
- if (__pmd_trans_huge_lock(pmd, vma) == 1) {
- /*
- * All logical pages in the range are present
- * if backed by a huge page.
- */
- spin_unlock(&vma->vm_mm->page_table_lock);
- memset(vec, 1, (end - addr) >> PAGE_SHIFT);
- ret = 1;
+static pmd_t move_soft_dirty_pmd(pmd_t pmd)
+{
+ if (pgtable_supports_soft_dirty()) {
+ if (unlikely(pmd_is_migration_entry(pmd)))
+ pmd = pmd_swp_mksoft_dirty(pmd);
+ else if (pmd_present(pmd))
+ pmd = pmd_mksoft_dirty(pmd);
}
- return ret;
+ return pmd;
}
-int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
- unsigned long old_addr,
- unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd)
+static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
{
- int ret = 0;
- pmd_t pmd;
+ if (pmd_none(pmd))
+ return pmd;
+ if (pmd_present(pmd))
+ pmd = pmd_clear_uffd_wp(pmd);
+ else
+ pmd = pmd_swp_clear_uffd_wp(pmd);
- struct mm_struct *mm = vma->vm_mm;
+ return pmd;
+}
- if ((old_addr & ~HPAGE_PMD_MASK) ||
- (new_addr & ~HPAGE_PMD_MASK) ||
- old_end - old_addr < HPAGE_PMD_SIZE ||
- (new_vma->vm_flags & VM_NOHUGEPAGE))
- goto out;
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
+{
+ spinlock_t *old_ptl, *new_ptl;
+ pmd_t pmd;
+ struct mm_struct *mm = vma->vm_mm;
+ bool force_flush = false;
/*
* The destination pmd shouldn't be established, free_pgtables()
- * should have release it.
+ * should have released it; but move_page_tables() might have already
+ * inserted a page table, if racing against shmem/file collapse.
*/
- if (WARN_ON(!pmd_none(*new_pmd))) {
+ if (!pmd_none(*new_pmd)) {
VM_BUG_ON(pmd_trans_huge(*new_pmd));
- goto out;
+ return false;
}
- ret = __pmd_trans_huge_lock(old_pmd, vma);
- if (ret == 1) {
- pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * ptlocks because exclusive mmap_lock prevents deadlock.
+ */
+ old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
+ if (old_ptl) {
+ new_ptl = pmd_lockptr(mm, new_pmd);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+ pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
+ if (pmd_present(pmd))
+ force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
- set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
- spin_unlock(&mm->page_table_lock);
+
+ if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
+ pgtable_t pgtable;
+ pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
+ pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
+ }
+ pmd = move_soft_dirty_pmd(pmd);
+ if (vma_has_uffd_without_event_remap(vma))
+ pmd = clear_uffd_wp_pmd(pmd);
+ set_pmd_at(mm, new_addr, new_pmd, pmd);
+ if (force_flush)
+ flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
+ return true;
}
-out:
- return ret;
+ return false;
}
-int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot, int prot_numa)
+static void change_non_present_huge_pmd(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmd, bool uffd_wp,
+ bool uffd_wp_resolve)
{
- struct mm_struct *mm = vma->vm_mm;
- int ret = 0;
+ softleaf_t entry = softleaf_from_pmd(*pmd);
+ const struct folio *folio = softleaf_to_folio(entry);
+ pmd_t newpmd;
- if (__pmd_trans_huge_lock(pmd, vma) == 1) {
- pmd_t entry;
- entry = pmdp_get_and_clear(mm, addr, pmd);
- if (!prot_numa) {
- entry = pmd_modify(entry, newprot);
- BUG_ON(pmd_write(entry));
- } else {
- struct page *page = pmd_page(*pmd);
-
- /* only check non-shared pages */
- if (page_mapcount(page) == 1 &&
- !pmd_numa(*pmd)) {
- entry = pmd_mknuma(entry);
- }
- }
- set_pmd_at(mm, addr, pmd, entry);
- spin_unlock(&vma->vm_mm->page_table_lock);
- ret = 1;
+ VM_WARN_ON(!pmd_is_valid_softleaf(*pmd));
+ if (softleaf_is_migration_write(entry)) {
+ /*
+ * A protection check is difficult so
+ * just be safe and disable write
+ */
+ if (folio_test_anon(folio))
+ entry = make_readable_exclusive_migration_entry(swp_offset(entry));
+ else
+ entry = make_readable_migration_entry(swp_offset(entry));
+ newpmd = swp_entry_to_pmd(entry);
+ if (pmd_swp_soft_dirty(*pmd))
+ newpmd = pmd_swp_mksoft_dirty(newpmd);
+ } else if (softleaf_is_device_private_write(entry)) {
+ entry = make_readable_device_private_entry(swp_offset(entry));
+ newpmd = swp_entry_to_pmd(entry);
+ } else {
+ newpmd = *pmd;
}
- return ret;
+ if (uffd_wp)
+ newpmd = pmd_swp_mkuffd_wp(newpmd);
+ else if (uffd_wp_resolve)
+ newpmd = pmd_swp_clear_uffd_wp(newpmd);
+ if (!pmd_same(*pmd, newpmd))
+ set_pmd_at(mm, addr, pmd, newpmd);
}
/*
- * Returns 1 if a given pmd maps a stable (not under splitting) thp.
- * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
- *
- * Note that if it returns 1, this routine returns without unlocking page
- * table locks. So callers must unlock them.
+ * Returns
+ * - 0 if PMD could not be locked
+ * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
+ * or if prot_numa but THP migration is not supported
+ * - HPAGE_PMD_NR if protections changed and TLB flush necessary
*/
-int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
-{
- spin_lock(&vma->vm_mm->page_table_lock);
- if (likely(pmd_trans_huge(*pmd))) {
- if (unlikely(pmd_trans_splitting(*pmd))) {
- spin_unlock(&vma->vm_mm->page_table_lock);
- wait_split_huge_page(vma->anon_vma, pmd);
- return -1;
- } else {
- /* Thp mapped by 'pmd' is stable, so we can
- * handle it as it is. */
- return 1;
- }
+int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
+ pmd_t oldpmd, entry;
+ bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
+ bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
+ bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ int ret = 1;
+
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
+
+ if (prot_numa && !thp_migration_supported())
+ return 1;
+
+ ptl = __pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
+ return 0;
+
+ if (thp_migration_supported() && pmd_is_valid_softleaf(*pmd)) {
+ change_non_present_huge_pmd(mm, addr, pmd, uffd_wp,
+ uffd_wp_resolve);
+ goto unlock;
}
- spin_unlock(&vma->vm_mm->page_table_lock);
- return 0;
-}
-pmd_t *page_check_address_pmd(struct page *page,
- struct mm_struct *mm,
- unsigned long address,
- enum page_check_address_pmd_flag flag)
-{
- pmd_t *pmd, *ret = NULL;
+ if (prot_numa) {
- if (address & ~HPAGE_PMD_MASK)
- goto out;
+ /*
+ * Avoid trapping faults against the zero page. The read-only
+ * data is likely to be read-cached on the local CPU and
+ * local/remote hits to the zero page are not interesting.
+ */
+ if (is_huge_zero_pmd(*pmd))
+ goto unlock;
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- goto out;
- if (pmd_none(*pmd))
- goto out;
- if (pmd_page(*pmd) != page)
- goto out;
+ if (pmd_protnone(*pmd))
+ goto unlock;
+
+ if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma,
+ vma_is_single_threaded_private(vma)))
+ goto unlock;
+ }
/*
- * split_vma() may create temporary aliased mappings. There is
- * no risk as long as all huge pmd are found and have their
- * splitting bit set before __split_huge_page_refcount
- * runs. Finding the same huge pmd more than once during the
- * same rmap walk is not a problem.
+ * In case prot_numa, we are under mmap_read_lock(mm). It's critical
+ * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+ * which is also under mmap_read_lock(mm):
+ *
+ * CPU0: CPU1:
+ * change_huge_pmd(prot_numa=1)
+ * pmdp_huge_get_and_clear_notify()
+ * madvise_dontneed()
+ * zap_pmd_range()
+ * pmd_trans_huge(*pmd) == 0 (without ptl)
+ * // skip the pmd
+ * set_pmd_at();
+ * // pmd is re-established
+ *
+ * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+ * which may break userspace.
+ *
+ * pmdp_invalidate_ad() is required to make sure we don't miss
+ * dirty/young flags set by hardware.
*/
- if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
- pmd_trans_splitting(*pmd))
- goto out;
- if (pmd_trans_huge(*pmd)) {
- VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
- !pmd_trans_splitting(*pmd));
- ret = pmd;
- }
-out:
- return ret;
-}
+ oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
-static int __split_huge_page_splitting(struct page *page,
- struct vm_area_struct *vma,
- unsigned long address)
-{
- struct mm_struct *mm = vma->vm_mm;
- pmd_t *pmd;
- int ret = 0;
- /* For mmu_notifiers */
- const unsigned long mmun_start = address;
- const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
-
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock);
- pmd = page_check_address_pmd(page, mm, address,
- PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
- if (pmd) {
+ entry = pmd_modify(oldpmd, newprot);
+ if (uffd_wp)
+ entry = pmd_mkuffd_wp(entry);
+ else if (uffd_wp_resolve)
/*
- * We can't temporarily set the pmd to null in order
- * to split it, the pmd must remain marked huge at all
- * times or the VM won't take the pmd_trans_huge paths
- * and it won't wait on the anon_vma->root->rwsem to
- * serialize against split_huge_page*.
+ * Leave the write bit to be handled by PF interrupt
+ * handler, then things like COW could be properly
+ * handled.
*/
- pmdp_splitting_flush(vma, address, pmd);
- ret = 1;
- }
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ entry = pmd_clear_uffd_wp(entry);
+
+ /* See change_pte_range(). */
+ if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
+ can_change_pmd_writable(vma, addr, entry))
+ entry = pmd_mkwrite(entry, vma);
+
+ ret = HPAGE_PMD_NR;
+ set_pmd_at(mm, addr, pmd, entry);
+ if (huge_pmd_needs_flush(oldpmd, entry))
+ tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
+unlock:
+ spin_unlock(ptl);
return ret;
}
-static void __split_huge_page_refcount(struct page *page,
- struct list_head *list)
+/*
+ * Returns:
+ *
+ * - 0: if pud leaf changed from under us
+ * - 1: if pud can be skipped
+ * - HPAGE_PUD_NR: if pud was successfully processed
+ */
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
{
- int i;
- struct zone *zone = page_zone(page);
- struct lruvec *lruvec;
- int tail_count = 0;
-
- /* prevent PageLRU to go away from under us, and freeze lru stats */
- spin_lock_irq(&zone->lru_lock);
- lruvec = mem_cgroup_page_lruvec(page, zone);
-
- compound_lock(page);
- /* complete memcg works before add pages to LRU */
- mem_cgroup_split_huge_fixup(page);
-
- for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
- struct page *page_tail = page + i;
-
- /* tail_page->_mapcount cannot change */
- BUG_ON(page_mapcount(page_tail) < 0);
- tail_count += page_mapcount(page_tail);
- /* check for overflow */
- BUG_ON(tail_count < 0);
- BUG_ON(atomic_read(&page_tail->_count) != 0);
- /*
- * tail_page->_count is zero and not changing from
- * under us. But get_page_unless_zero() may be running
- * from under us on the tail_page. If we used
- * atomic_set() below instead of atomic_add(), we
- * would then run atomic_set() concurrently with
- * get_page_unless_zero(), and atomic_set() is
- * implemented in C not using locked ops. spin_unlock
- * on x86 sometime uses locked ops because of PPro
- * errata 66, 92, so unless somebody can guarantee
- * atomic_set() here would be safe on all archs (and
- * not only on x86), it's safer to use atomic_add().
- */
- atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
- &page_tail->_count);
+ struct mm_struct *mm = vma->vm_mm;
+ pud_t oldpud, entry;
+ spinlock_t *ptl;
- /* after clearing PageTail the gup refcount can be released */
- smp_mb();
+ tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
- /*
- * retain hwpoison flag of the poisoned tail page:
- * fix for the unsuitable process killed on Guest Machine(KVM)
- * by the memory-failure.
- */
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
- page_tail->flags |= (page->flags &
- ((1L << PG_referenced) |
- (1L << PG_swapbacked) |
- (1L << PG_mlocked) |
- (1L << PG_uptodate)));
- page_tail->flags |= (1L << PG_dirty);
-
- /* clear PageTail before overwriting first_page */
- smp_wmb();
+ /* NUMA balancing doesn't apply to dax */
+ if (cp_flags & MM_CP_PROT_NUMA)
+ return 1;
- /*
- * __split_huge_page_splitting() already set the
- * splitting bit in all pmd that could map this
- * hugepage, that will ensure no CPU can alter the
- * mapcount on the head page. The mapcount is only
- * accounted in the head page and it has to be
- * transferred to all tail pages in the below code. So
- * for this code to be safe, the split the mapcount
- * can't change. But that doesn't mean userland can't
- * keep changing and reading the page contents while
- * we transfer the mapcount, so the pmd splitting
- * status is achieved setting a reserved bit in the
- * pmd, not by clearing the present bit.
- */
- page_tail->_mapcount = page->_mapcount;
-
- BUG_ON(page_tail->mapping);
- page_tail->mapping = page->mapping;
-
- page_tail->index = page->index + i;
- page_nid_xchg_last(page_tail, page_nid_last(page));
-
- BUG_ON(!PageAnon(page_tail));
- BUG_ON(!PageUptodate(page_tail));
- BUG_ON(!PageDirty(page_tail));
- BUG_ON(!PageSwapBacked(page_tail));
-
- lru_add_page_tail(page, page_tail, lruvec, list);
- }
- atomic_sub(tail_count, &page->_count);
- BUG_ON(atomic_read(&page->_count) <= 0);
-
- __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
- __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
-
- ClearPageCompound(page);
- compound_unlock(page);
- spin_unlock_irq(&zone->lru_lock);
-
- for (i = 1; i < HPAGE_PMD_NR; i++) {
- struct page *page_tail = page + i;
- BUG_ON(page_count(page_tail) <= 0);
- /*
- * Tail pages may be freed if there wasn't any mapping
- * like if add_to_swap() is running on a lru page that
- * had its mapping zapped. And freeing these pages
- * requires taking the lru_lock so we do the put_page
- * of the tail pages after the split is complete.
- */
- put_page(page_tail);
- }
+ /*
+ * Huge entries on userfault-wp only works with anonymous, while we
+ * don't have anonymous PUDs yet.
+ */
+ if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
+ return 1;
+
+ ptl = __pud_trans_huge_lock(pudp, vma);
+ if (!ptl)
+ return 0;
/*
- * Only the head page (now become a regular page) is required
- * to be pinned by the caller.
+ * Can't clear PUD or it can race with concurrent zapping. See
+ * change_huge_pmd().
*/
- BUG_ON(page_count(page) <= 0);
+ oldpud = pudp_invalidate(vma, addr, pudp);
+ entry = pud_modify(oldpud, newprot);
+ set_pud_at(mm, addr, pudp, entry);
+ tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
+
+ spin_unlock(ptl);
+ return HPAGE_PUD_NR;
}
+#endif
-static int __split_huge_page_map(struct page *page,
- struct vm_area_struct *vma,
- unsigned long address)
+#ifdef CONFIG_USERFAULTFD
+/*
+ * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
+ * the caller, but it must return after releasing the page_table_lock.
+ * Just move the page from src_pmd to dst_pmd if possible.
+ * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
+ * repeated by the caller, or other errors in case of failure.
+ */
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr)
{
- struct mm_struct *mm = vma->vm_mm;
- pmd_t *pmd, _pmd;
- int ret = 0, i;
- pgtable_t pgtable;
- unsigned long haddr;
-
- spin_lock(&mm->page_table_lock);
- pmd = page_check_address_pmd(page, mm, address,
- PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
- if (pmd) {
- pgtable = pgtable_trans_huge_withdraw(mm, pmd);
- pmd_populate(mm, &_pmd, pgtable);
-
- haddr = address;
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
- pte_t *pte, entry;
- BUG_ON(PageCompound(page+i));
- entry = mk_pte(page + i, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (!pmd_write(*pmd))
- entry = pte_wrprotect(entry);
- else
- BUG_ON(page_mapcount(page) != 1);
- if (!pmd_young(*pmd))
- entry = pte_mkold(entry);
- if (pmd_numa(*pmd))
- entry = pte_mknuma(entry);
- pte = pte_offset_map(&_pmd, haddr);
- BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
- pte_unmap(pte);
- }
+ pmd_t _dst_pmd, src_pmdval;
+ struct page *src_page;
+ struct folio *src_folio;
+ spinlock_t *src_ptl, *dst_ptl;
+ pgtable_t src_pgtable;
+ struct mmu_notifier_range range;
+ int err = 0;
- smp_wmb(); /* make pte visible before pmd */
- /*
- * Up to this point the pmd is present and huge and
- * userland has the whole access to the hugepage
- * during the split (which happens in place). If we
- * overwrite the pmd with the not-huge version
- * pointing to the pte here (which of course we could
- * if all CPUs were bug free), userland could trigger
- * a small page size TLB miss on the small sized TLB
- * while the hugepage TLB entry is still established
- * in the huge TLB. Some CPU doesn't like that. See
- * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
- * Erratum 383 on page 93. Intel should be safe but is
- * also warns that it's only safe if the permission
- * and cache attributes of the two entries loaded in
- * the two TLB is identical (which should be the case
- * here). But it is generally safer to never allow
- * small and huge TLB entries for the same virtual
- * address to be loaded simultaneously. So instead of
- * doing "pmd_populate(); flush_tlb_range();" we first
- * mark the current pmd notpresent (atomically because
- * here the pmd_trans_huge and pmd_trans_splitting
- * must remain set at all times on the pmd until the
- * split is complete for this pmd), then we flush the
- * SMP TLB and finally we write the non-huge version
- * of the pmd entry with pmd_populate.
- */
- pmdp_invalidate(vma, address, pmd);
- pmd_populate(mm, pmd, pgtable);
- ret = 1;
+ src_pmdval = *src_pmd;
+ src_ptl = pmd_lockptr(mm, src_pmd);
+
+ lockdep_assert_held(src_ptl);
+ vma_assert_locked(src_vma);
+ vma_assert_locked(dst_vma);
+
+ /* Sanity checks before the operation */
+ if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
+ WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
+ spin_unlock(src_ptl);
+ return -EINVAL;
}
- spin_unlock(&mm->page_table_lock);
- return ret;
-}
+ if (!pmd_trans_huge(src_pmdval)) {
+ spin_unlock(src_ptl);
+ if (pmd_is_migration_entry(src_pmdval)) {
+ pmd_migration_entry_wait(mm, &src_pmdval);
+ return -EAGAIN;
+ }
+ return -ENOENT;
+ }
-/* must be called with anon_vma->root->rwsem held */
-static void __split_huge_page(struct page *page,
- struct anon_vma *anon_vma,
- struct list_head *list)
-{
- int mapcount, mapcount2;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct anon_vma_chain *avc;
+ src_page = pmd_page(src_pmdval);
- BUG_ON(!PageHead(page));
- BUG_ON(PageTail(page));
+ if (!is_huge_zero_pmd(src_pmdval)) {
+ if (unlikely(!PageAnonExclusive(src_page))) {
+ spin_unlock(src_ptl);
+ return -EBUSY;
+ }
+
+ src_folio = page_folio(src_page);
+ folio_get(src_folio);
+ } else
+ src_folio = NULL;
+
+ spin_unlock(src_ptl);
+
+ flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
+ src_addr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+
+ if (src_folio)
+ folio_lock(src_folio);
- mapcount = 0;
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
- struct vm_area_struct *vma = avc->vma;
- unsigned long addr = vma_address(page, vma);
- BUG_ON(is_vma_temporary_stack(vma));
- mapcount += __split_huge_page_splitting(page, vma, addr);
+ dst_ptl = pmd_lockptr(mm, dst_pmd);
+ double_pt_lock(src_ptl, dst_ptl);
+ if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
+ !pmd_same(*dst_pmd, dst_pmdval))) {
+ err = -EAGAIN;
+ goto unlock_ptls;
}
- /*
- * It is critical that new vmas are added to the tail of the
- * anon_vma list. This guarantes that if copy_huge_pmd() runs
- * and establishes a child pmd before
- * __split_huge_page_splitting() freezes the parent pmd (so if
- * we fail to prevent copy_huge_pmd() from running until the
- * whole __split_huge_page() is complete), we will still see
- * the newly established pmd of the child later during the
- * walk, to be able to set it as pmd_trans_splitting too.
- */
- if (mapcount != page_mapcount(page))
- printk(KERN_ERR "mapcount %d page_mapcount %d\n",
- mapcount, page_mapcount(page));
- BUG_ON(mapcount != page_mapcount(page));
+ if (src_folio) {
+ if (folio_maybe_dma_pinned(src_folio) ||
+ !PageAnonExclusive(&src_folio->page)) {
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
- __split_huge_page_refcount(page, list);
+ if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
+ WARN_ON_ONCE(!folio_test_anon(src_folio))) {
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+ /* Folio got pinned from under us. Put it back and fail the move. */
+ if (folio_maybe_dma_pinned(src_folio)) {
+ set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
- mapcount2 = 0;
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
- struct vm_area_struct *vma = avc->vma;
- unsigned long addr = vma_address(page, vma);
- BUG_ON(is_vma_temporary_stack(vma));
- mapcount2 += __split_huge_page_map(page, vma, addr);
+ folio_move_anon_rmap(src_folio, dst_vma);
+ src_folio->index = linear_page_index(dst_vma, dst_addr);
+
+ _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
+ /* Follow mremap() behavior and treat the entry dirty after the move */
+ _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
+ } else {
+ src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+ _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
}
- if (mapcount != mapcount2)
- printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
- mapcount, mapcount2, page_mapcount(page));
- BUG_ON(mapcount != mapcount2);
+ set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
+
+ src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
+ pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
+unlock_ptls:
+ double_pt_unlock(src_ptl, dst_ptl);
+ /* unblock rmap walks */
+ if (src_folio)
+ folio_unlock(src_folio);
+ mmu_notifier_invalidate_range_end(&range);
+ if (src_folio)
+ folio_put(src_folio);
+ return err;
}
+#endif /* CONFIG_USERFAULTFD */
/*
- * Split a hugepage into normal pages. This doesn't change the position of head
- * page. If @list is null, tail pages will be added to LRU list, otherwise, to
- * @list. Both head page and tail pages will inherit mapping, flags, and so on
- * from the hugepage.
- * Return 0 if the hugepage is split successfully otherwise return 1.
+ * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
+ *
+ * Note that if it returns page table lock pointer, this routine returns without
+ * unlocking page table lock. So callers must unlock it.
*/
-int split_huge_page_to_list(struct page *page, struct list_head *list)
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
- struct anon_vma *anon_vma;
- int ret = 1;
+ spinlock_t *ptl;
- BUG_ON(is_huge_zero_page(page));
- BUG_ON(!PageAnon(page));
+ ptl = pmd_lock(vma->vm_mm, pmd);
+ if (likely(pmd_is_huge(*pmd)))
+ return ptl;
+ spin_unlock(ptl);
+ return NULL;
+}
- /*
- * The caller does not necessarily hold an mmap_sem that would prevent
- * the anon_vma disappearing so we first we take a reference to it
- * and then lock the anon_vma for write. This is similar to
- * page_lock_anon_vma_read except the write lock is taken to serialise
- * against parallel split or collapse operations.
- */
- anon_vma = page_get_anon_vma(page);
- if (!anon_vma)
- goto out;
- anon_vma_lock_write(anon_vma);
+/*
+ * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
+ *
+ * Note that if it returns page table lock pointer, this routine returns without
+ * unlocking page table lock. So callers must unlock it.
+ */
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
+{
+ spinlock_t *ptl;
- ret = 0;
- if (!PageCompound(page))
- goto out_unlock;
+ ptl = pud_lock(vma->vm_mm, pud);
+ if (likely(pud_trans_huge(*pud)))
+ return ptl;
+ spin_unlock(ptl);
+ return NULL;
+}
- BUG_ON(!PageSwapBacked(page));
- __split_huge_page(page, anon_vma, list);
- count_vm_event(THP_SPLIT);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr)
+{
+ spinlock_t *ptl;
+ pud_t orig_pud;
+
+ ptl = __pud_trans_huge_lock(pud, vma);
+ if (!ptl)
+ return 0;
+
+ orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
+ arch_check_zapped_pud(vma, orig_pud);
+ tlb_remove_pud_tlb_entry(tlb, pud, addr);
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
+ spin_unlock(ptl);
+ /* No zero page support yet */
+ } else {
+ struct page *page = NULL;
+ struct folio *folio;
+
+ /* No support for anonymous PUD pages or migration yet */
+ VM_WARN_ON_ONCE(vma_is_anonymous(vma) ||
+ !pud_present(orig_pud));
+
+ page = pud_page(orig_pud);
+ folio = page_folio(page);
+ folio_remove_rmap_pud(folio, page, vma);
+ add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR);
+
+ spin_unlock(ptl);
+ tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE);
+ }
+ return 1;
+}
+
+static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long haddr)
+{
+ struct folio *folio;
+ struct page *page;
+ pud_t old_pud;
+
+ VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
+ VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
+ VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
+ VM_BUG_ON(!pud_trans_huge(*pud));
+
+ count_vm_event(THP_SPLIT_PUD);
+
+ old_pud = pudp_huge_clear_flush(vma, haddr, pud);
+
+ if (!vma_is_dax(vma))
+ return;
+
+ page = pud_page(old_pud);
+ folio = page_folio(page);
+
+ if (!folio_test_dirty(folio) && pud_dirty(old_pud))
+ folio_mark_dirty(folio);
+ if (!folio_test_referenced(folio) && pud_young(old_pud))
+ folio_set_referenced(folio);
+ folio_remove_rmap_pud(folio, page, vma);
+ folio_put(folio);
+ add_mm_counter(vma->vm_mm, mm_counter_file(folio),
+ -HPAGE_PUD_NR);
+}
+
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+ spinlock_t *ptl;
+ struct mmu_notifier_range range;
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
+ address & HPAGE_PUD_MASK,
+ (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+ ptl = pud_lock(vma->vm_mm, pud);
+ if (unlikely(!pud_trans_huge(*pud)))
+ goto out;
+ __split_huge_pud_locked(vma, pud, range.start);
- BUG_ON(PageCompound(page));
-out_unlock:
- anon_vma_unlock_write(anon_vma);
- put_anon_vma(anon_vma);
out:
- return ret;
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(&range);
+}
+#else
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ unsigned long haddr, pmd_t *pmd)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgtable_t pgtable;
+ pmd_t _pmd, old_pmd;
+ unsigned long addr;
+ pte_t *pte;
+ int i;
+
+ /*
+ * Leave pmd empty until pte is filled note that it is fine to delay
+ * notification until mmu_notifier_invalidate_range_end() as we are
+ * replacing a zero pmd write protected page with a zero pte write
+ * protected page.
+ *
+ * See Documentation/mm/mmu_notifier.rst
+ */
+ old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
+
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ pmd_populate(mm, &_pmd, pgtable);
+
+ pte = pte_offset_map(&_pmd, haddr);
+ VM_BUG_ON(!pte);
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+ pte_t entry;
-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+ entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
+ entry = pte_mkspecial(entry);
+ if (pmd_uffd_wp(old_pmd))
+ entry = pte_mkuffd_wp(entry);
+ VM_BUG_ON(!pte_none(ptep_get(pte)));
+ set_pte_at(mm, addr, pte, entry);
+ pte++;
+ }
+ pte_unmap(pte - 1);
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
+}
-int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice)
+static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long haddr, bool freeze)
{
struct mm_struct *mm = vma->vm_mm;
+ struct folio *folio;
+ struct page *page;
+ pgtable_t pgtable;
+ pmd_t old_pmd, _pmd;
+ bool soft_dirty, uffd_wp = false, young = false, write = false;
+ bool anon_exclusive = false, dirty = false;
+ unsigned long addr;
+ pte_t *pte;
+ int i;
+
+ VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
+ VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
+ VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
+
+ VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
- switch (advice) {
- case MADV_HUGEPAGE:
+ count_vm_event(THP_SPLIT_PMD);
+
+ if (!vma_is_anonymous(vma)) {
+ old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
/*
- * Be somewhat over-protective like KSM for now!
+ * We are going to unmap this huge page. So
+ * just go ahead and zap it
*/
- if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
- return -EINVAL;
- if (mm->def_flags & VM_NOHUGEPAGE)
- return -EINVAL;
- *vm_flags &= ~VM_NOHUGEPAGE;
- *vm_flags |= VM_HUGEPAGE;
+ if (arch_needs_pgtable_deposit())
+ zap_deposited_table(mm, pmd);
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma))
+ return;
+ if (unlikely(pmd_is_migration_entry(old_pmd))) {
+ const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
+
+ folio = softleaf_to_folio(old_entry);
+ } else if (is_huge_zero_pmd(old_pmd)) {
+ return;
+ } else {
+ page = pmd_page(old_pmd);
+ folio = page_folio(page);
+ if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
+ folio_mark_dirty(folio);
+ if (!folio_test_referenced(folio) && pmd_young(old_pmd))
+ folio_set_referenced(folio);
+ folio_remove_rmap_pmd(folio, page, vma);
+ folio_put(folio);
+ }
+ add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
+ return;
+ }
+
+ if (is_huge_zero_pmd(*pmd)) {
/*
- * If the vma become good for khugepaged to scan,
- * register it here without waiting a page fault that
- * may not happen any time soon.
+ * FIXME: Do we want to invalidate secondary mmu by calling
+ * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
+ * inside __split_huge_pmd() ?
+ *
+ * We are going from a zero huge page write protected to zero
+ * small page also write protected so it does not seems useful
+ * to invalidate secondary mmu at this time.
*/
- if (unlikely(khugepaged_enter_vma_merge(vma)))
- return -ENOMEM;
- break;
- case MADV_NOHUGEPAGE:
+ return __split_huge_zero_page_pmd(vma, haddr, pmd);
+ }
+
+ if (pmd_is_migration_entry(*pmd)) {
+ softleaf_t entry;
+
+ old_pmd = *pmd;
+ entry = softleaf_from_pmd(old_pmd);
+ page = softleaf_to_page(entry);
+ folio = page_folio(page);
+
+ soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ uffd_wp = pmd_swp_uffd_wp(old_pmd);
+
+ write = softleaf_is_migration_write(entry);
+ if (PageAnon(page))
+ anon_exclusive = softleaf_is_migration_read_exclusive(entry);
+ young = softleaf_is_migration_young(entry);
+ dirty = softleaf_is_migration_dirty(entry);
+ } else if (pmd_is_device_private_entry(*pmd)) {
+ softleaf_t entry;
+
+ old_pmd = *pmd;
+ entry = softleaf_from_pmd(old_pmd);
+ page = softleaf_to_page(entry);
+ folio = page_folio(page);
+
+ soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ uffd_wp = pmd_swp_uffd_wp(old_pmd);
+
+ write = softleaf_is_device_private_write(entry);
+ anon_exclusive = PageAnonExclusive(page);
+
+ /*
+ * Device private THP should be treated the same as regular
+ * folios w.r.t anon exclusive handling. See the comments for
+ * folio handling and anon_exclusive below.
+ */
+ if (freeze && anon_exclusive &&
+ folio_try_share_anon_rmap_pmd(folio, page))
+ freeze = false;
+ if (!freeze) {
+ rmap_t rmap_flags = RMAP_NONE;
+
+ folio_ref_add(folio, HPAGE_PMD_NR - 1);
+ if (anon_exclusive)
+ rmap_flags |= RMAP_EXCLUSIVE;
+
+ folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
+ vma, haddr, rmap_flags);
+ }
+ } else {
/*
- * Be somewhat over-protective like KSM for now!
+ * Up to this point the pmd is present and huge and userland has
+ * the whole access to the hugepage during the split (which
+ * happens in place). If we overwrite the pmd with the not-huge
+ * version pointing to the pte here (which of course we could if
+ * all CPUs were bug free), userland could trigger a small page
+ * size TLB miss on the small sized TLB while the hugepage TLB
+ * entry is still established in the huge TLB. Some CPU doesn't
+ * like that. See
+ * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
+ * 383 on page 105. Intel should be safe but is also warns that
+ * it's only safe if the permission and cache attributes of the
+ * two entries loaded in the two TLB is identical (which should
+ * be the case here). But it is generally safer to never allow
+ * small and huge TLB entries for the same virtual address to be
+ * loaded simultaneously. So instead of doing "pmd_populate();
+ * flush_pmd_tlb_range();" we first mark the current pmd
+ * notpresent (atomically because here the pmd_trans_huge must
+ * remain set at all times on the pmd until the split is
+ * complete for this pmd), then we flush the SMP TLB and finally
+ * we write the non-huge version of the pmd entry with
+ * pmd_populate.
*/
- if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
- return -EINVAL;
- *vm_flags &= ~VM_HUGEPAGE;
- *vm_flags |= VM_NOHUGEPAGE;
+ old_pmd = pmdp_invalidate(vma, haddr, pmd);
+ page = pmd_page(old_pmd);
+ folio = page_folio(page);
+ if (pmd_dirty(old_pmd)) {
+ dirty = true;
+ folio_set_dirty(folio);
+ }
+ write = pmd_write(old_pmd);
+ young = pmd_young(old_pmd);
+ soft_dirty = pmd_soft_dirty(old_pmd);
+ uffd_wp = pmd_uffd_wp(old_pmd);
+
+ VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+
/*
- * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
- * this vma even if we leave the mm registered in khugepaged if
- * it got registered before VM_NOHUGEPAGE was set.
+ * Without "freeze", we'll simply split the PMD, propagating the
+ * PageAnonExclusive() flag for each PTE by setting it for
+ * each subpage -- no need to (temporarily) clear.
+ *
+ * With "freeze" we want to replace mapped pages by
+ * migration entries right away. This is only possible if we
+ * managed to clear PageAnonExclusive() -- see
+ * set_pmd_migration_entry().
+ *
+ * In case we cannot clear PageAnonExclusive(), split the PMD
+ * only and let try_to_migrate_one() fail later.
+ *
+ * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
*/
- break;
+ anon_exclusive = PageAnonExclusive(page);
+ if (freeze && anon_exclusive &&
+ folio_try_share_anon_rmap_pmd(folio, page))
+ freeze = false;
+ if (!freeze) {
+ rmap_t rmap_flags = RMAP_NONE;
+
+ folio_ref_add(folio, HPAGE_PMD_NR - 1);
+ if (anon_exclusive)
+ rmap_flags |= RMAP_EXCLUSIVE;
+ folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
+ vma, haddr, rmap_flags);
+ }
}
- return 0;
-}
+ /*
+ * Withdraw the table only after we mark the pmd entry invalid.
+ * This's critical for some architectures (Power).
+ */
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ pmd_populate(mm, &_pmd, pgtable);
-static int __init khugepaged_slab_init(void)
-{
- mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
- sizeof(struct mm_slot),
- __alignof__(struct mm_slot), 0, NULL);
- if (!mm_slot_cache)
- return -ENOMEM;
+ pte = pte_offset_map(&_pmd, haddr);
+ VM_BUG_ON(!pte);
- return 0;
+ /*
+ * Note that NUMA hinting access restrictions are not transferred to
+ * avoid any possibility of altering permissions across VMAs.
+ */
+ if (freeze || pmd_is_migration_entry(old_pmd)) {
+ pte_t entry;
+ swp_entry_t swp_entry;
+
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+ if (write)
+ swp_entry = make_writable_migration_entry(
+ page_to_pfn(page + i));
+ else if (anon_exclusive)
+ swp_entry = make_readable_exclusive_migration_entry(
+ page_to_pfn(page + i));
+ else
+ swp_entry = make_readable_migration_entry(
+ page_to_pfn(page + i));
+ if (young)
+ swp_entry = make_migration_entry_young(swp_entry);
+ if (dirty)
+ swp_entry = make_migration_entry_dirty(swp_entry);
+ entry = swp_entry_to_pte(swp_entry);
+ if (soft_dirty)
+ entry = pte_swp_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_swp_mkuffd_wp(entry);
+ VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+ set_pte_at(mm, addr, pte + i, entry);
+ }
+ } else if (pmd_is_device_private_entry(old_pmd)) {
+ pte_t entry;
+ swp_entry_t swp_entry;
+
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+ /*
+ * anon_exclusive was already propagated to the relevant
+ * pages corresponding to the pte entries when freeze
+ * is false.
+ */
+ if (write)
+ swp_entry = make_writable_device_private_entry(
+ page_to_pfn(page + i));
+ else
+ swp_entry = make_readable_device_private_entry(
+ page_to_pfn(page + i));
+ /*
+ * Young and dirty bits are not progated via swp_entry
+ */
+ entry = swp_entry_to_pte(swp_entry);
+ if (soft_dirty)
+ entry = pte_swp_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_swp_mkuffd_wp(entry);
+ VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+ set_pte_at(mm, addr, pte + i, entry);
+ }
+ } else {
+ pte_t entry;
+
+ entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
+ if (write)
+ entry = pte_mkwrite(entry, vma);
+ if (!young)
+ entry = pte_mkold(entry);
+ /* NOTE: this may set soft-dirty too on some archs */
+ if (dirty)
+ entry = pte_mkdirty(entry);
+ if (soft_dirty)
+ entry = pte_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_mkuffd_wp(entry);
+
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+ VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+
+ set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
+ }
+ pte_unmap(pte);
+
+ if (!pmd_is_migration_entry(*pmd))
+ folio_remove_rmap_pmd(folio, page, vma);
+ if (freeze)
+ put_page(page);
+
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
}
-static inline struct mm_slot *alloc_mm_slot(void)
+void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, bool freeze)
{
- if (!mm_slot_cache) /* initialization failed */
- return NULL;
- return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
+ VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
+ if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd))
+ __split_huge_pmd_locked(vma, pmd, address, freeze);
}
-static inline void free_mm_slot(struct mm_slot *mm_slot)
+void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long address, bool freeze)
{
- kmem_cache_free(mm_slot_cache, mm_slot);
+ spinlock_t *ptl;
+ struct mmu_notifier_range range;
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
+ address & HPAGE_PMD_MASK,
+ (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+ ptl = pmd_lock(vma->vm_mm, pmd);
+ split_huge_pmd_locked(vma, range.start, pmd, freeze);
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(&range);
}
-static struct mm_slot *get_mm_slot(struct mm_struct *mm)
+void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
+ bool freeze)
{
- struct mm_slot *mm_slot;
+ pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
- hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
- if (mm == mm_slot->mm)
- return mm_slot;
+ if (!pmd)
+ return;
- return NULL;
+ __split_huge_pmd(vma, pmd, address, freeze);
}
-static void insert_to_mm_slots_hash(struct mm_struct *mm,
- struct mm_slot *mm_slot)
+static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
{
- mm_slot->mm = mm;
- hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
+ /*
+ * If the new address isn't hpage aligned and it could previously
+ * contain an hugepage: check if we need to split an huge pmd.
+ */
+ if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
+ range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
+ ALIGN(address, HPAGE_PMD_SIZE)))
+ split_huge_pmd_address(vma, address, false);
}
-static inline int khugepaged_test_exit(struct mm_struct *mm)
+void vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ struct vm_area_struct *next)
{
- return atomic_read(&mm->mm_users) == 0;
+ /* Check if we need to split start first. */
+ split_huge_pmd_if_needed(vma, start);
+
+ /* Check if we need to split end next. */
+ split_huge_pmd_if_needed(vma, end);
+
+ /* If we're incrementing next->vm_start, we might need to split it. */
+ if (next)
+ split_huge_pmd_if_needed(next, end);
}
-int __khugepaged_enter(struct mm_struct *mm)
+static void unmap_folio(struct folio *folio)
{
- struct mm_slot *mm_slot;
- int wakeup;
+ enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
+ TTU_BATCH_FLUSH;
- mm_slot = alloc_mm_slot();
- if (!mm_slot)
- return -ENOMEM;
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
- /* __khugepaged_exit() must not run from under us */
- VM_BUG_ON(khugepaged_test_exit(mm));
- if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
- free_mm_slot(mm_slot);
- return 0;
+ if (folio_test_pmd_mappable(folio))
+ ttu_flags |= TTU_SPLIT_HUGE_PMD;
+
+ /*
+ * Anon pages need migration entries to preserve them, but file
+ * pages can simply be left unmapped, then faulted back on demand.
+ * If that is ever changed (perhaps for mlock), update remap_page().
+ */
+ if (folio_test_anon(folio))
+ try_to_migrate(folio, ttu_flags);
+ else
+ try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
+
+ try_to_unmap_flush();
+}
+
+static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp,
+ struct folio *folio)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int ref_count, map_count;
+ pmd_t orig_pmd = *pmdp;
+
+ if (pmd_dirty(orig_pmd))
+ folio_set_dirty(folio);
+ if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
+ folio_set_swapbacked(folio);
+ return false;
}
- spin_lock(&khugepaged_mm_lock);
- insert_to_mm_slots_hash(mm, mm_slot);
+ orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
+
/*
- * Insert just behind the scanning cursor, to let the area settle
- * down a little.
+ * Syncing against concurrent GUP-fast:
+ * - clear PMD; barrier; read refcount
+ * - inc refcount; barrier; read PMD
*/
- wakeup = list_empty(&khugepaged_scan.mm_head);
- list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
- spin_unlock(&khugepaged_mm_lock);
+ smp_mb();
- atomic_inc(&mm->mm_count);
- if (wakeup)
- wake_up_interruptible(&khugepaged_wait);
+ ref_count = folio_ref_count(folio);
+ map_count = folio_mapcount(folio);
- return 0;
+ /*
+ * Order reads for folio refcount and dirty flag
+ * (see comments in __remove_mapping()).
+ */
+ smp_rmb();
+
+ /*
+ * If the folio or its PMD is redirtied at this point, or if there
+ * are unexpected references, we will give up to discard this folio
+ * and remap it.
+ *
+ * The only folio refs must be one from isolation plus the rmap(s).
+ */
+ if (pmd_dirty(orig_pmd))
+ folio_set_dirty(folio);
+ if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
+ folio_set_swapbacked(folio);
+ set_pmd_at(mm, addr, pmdp, orig_pmd);
+ return false;
+ }
+
+ if (ref_count != map_count + 1) {
+ set_pmd_at(mm, addr, pmdp, orig_pmd);
+ return false;
+ }
+
+ folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
+ zap_deposited_table(mm, pmdp);
+ add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+ if (vma->vm_flags & VM_LOCKED)
+ mlock_drain_local();
+ folio_put(folio);
+
+ return true;
}
-int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmdp, struct folio *folio)
{
- unsigned long hstart, hend;
- if (!vma->anon_vma)
- /*
- * Not yet faulted in so we will register later in the
- * page fault if needed.
- */
- return 0;
- if (vma->vm_ops)
- /* khugepaged not yet working on file or special mappings */
- return 0;
- VM_BUG_ON(vma->vm_flags & VM_NO_THP);
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
- if (hstart < hend)
- return khugepaged_enter(vma);
- return 0;
+ VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
+ VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
+
+ return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
}
-void __khugepaged_exit(struct mm_struct *mm)
+static void remap_page(struct folio *folio, unsigned long nr, int flags)
{
- struct mm_slot *mm_slot;
- int free = 0;
+ int i = 0;
- spin_lock(&khugepaged_mm_lock);
- mm_slot = get_mm_slot(mm);
- if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
- hash_del(&mm_slot->hash);
- list_del(&mm_slot->mm_node);
- free = 1;
+ /* If unmap_folio() uses try_to_migrate() on file, remove this check */
+ if (!folio_test_anon(folio))
+ return;
+ for (;;) {
+ remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
+ i += folio_nr_pages(folio);
+ if (i >= nr)
+ break;
+ folio = folio_next(folio);
}
- spin_unlock(&khugepaged_mm_lock);
+}
- if (free) {
- clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
- free_mm_slot(mm_slot);
- mmdrop(mm);
- } else if (mm_slot) {
- /*
- * This is required to serialize against
- * khugepaged_test_exit() (which is guaranteed to run
- * under mmap sem read mode). Stop here (after we
- * return all pagetables will be destroyed) until
- * khugepaged has finished working on the pagetables
- * under the mmap_sem.
- */
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
+ struct lruvec *lruvec, struct list_head *list)
+{
+ VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
+ lockdep_assert_held(&lruvec->lru_lock);
+
+ if (folio_is_device_private(folio))
+ return;
+
+ if (list) {
+ /* page reclaim is reclaiming a huge page */
+ VM_WARN_ON(folio_test_lru(folio));
+ folio_get(new_folio);
+ list_add_tail(&new_folio->lru, list);
+ } else {
+ /* head is still on lru (and we have it frozen) */
+ VM_WARN_ON(!folio_test_lru(folio));
+ if (folio_test_unevictable(folio))
+ new_folio->mlock_count = 0;
+ else
+ list_add_tail(&new_folio->lru, &folio->lru);
+ folio_set_lru(new_folio);
}
}
-static void release_pte_page(struct page *page)
+/* Racy check whether the huge page can be split */
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
- /* 0 stands for page_is_file_cache(page) == false */
- dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
- unlock_page(page);
- putback_lru_page(page);
+ int extra_pins;
+
+ /* Additional pins from page cache */
+ if (folio_test_anon(folio))
+ extra_pins = folio_test_swapcache(folio) ?
+ folio_nr_pages(folio) : 0;
+ else
+ extra_pins = folio_nr_pages(folio);
+ if (pextra_pins)
+ *pextra_pins = extra_pins;
+ return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
+ caller_pins;
}
-static void release_pte_pages(pte_t *pte, pte_t *_pte)
+static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
{
- while (--_pte >= pte) {
- pte_t pteval = *_pte;
- if (!pte_none(pteval))
- release_pte_page(pte_page(pteval));
- }
+ for (; nr_pages; page++, nr_pages--)
+ if (PageHWPoison(page))
+ return true;
+ return false;
}
-static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *pte)
+/*
+ * It splits @folio into @new_order folios and copies the @folio metadata to
+ * all the resulting folios.
+ */
+static void __split_folio_to_order(struct folio *folio, int old_order,
+ int new_order)
{
- struct page *page;
- pte_t *_pte;
- int referenced = 0, none = 0;
- for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
- _pte++, address += PAGE_SIZE) {
- pte_t pteval = *_pte;
- if (pte_none(pteval)) {
- if (++none <= khugepaged_max_ptes_none)
- continue;
- else
- goto out;
- }
- if (!pte_present(pteval) || !pte_write(pteval))
- goto out;
- page = vm_normal_page(vma, address, pteval);
- if (unlikely(!page))
- goto out;
+ /* Scan poisoned pages when split a poisoned folio to large folios */
+ const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
+ long new_nr_pages = 1 << new_order;
+ long nr_pages = 1 << old_order;
+ long i;
+
+ folio_clear_has_hwpoisoned(folio);
+
+ /* Check first new_nr_pages since the loop below skips them */
+ if (handle_hwpoison &&
+ page_range_has_hwpoisoned(folio_page(folio, 0), new_nr_pages))
+ folio_set_has_hwpoisoned(folio);
+ /*
+ * Skip the first new_nr_pages, since the new folio from them have all
+ * the flags from the original folio.
+ */
+ for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
+ struct page *new_head = &folio->page + i;
+ /*
+ * Careful: new_folio is not a "real" folio before we cleared PageTail.
+ * Don't pass it around before clear_compound_head().
+ */
+ struct folio *new_folio = (struct folio *)new_head;
- VM_BUG_ON(PageCompound(page));
- BUG_ON(!PageAnon(page));
- VM_BUG_ON(!PageSwapBacked(page));
+ VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head);
- /* cannot use mapcount: can't collapse if there's a gup pin */
- if (page_count(page) != 1)
- goto out;
/*
- * We can do it before isolate_lru_page because the
- * page can't be freed from under us. NOTE: PG_lock
- * is needed to serialize against split_huge_page
- * when invoked from the VM.
+ * Clone page flags before unfreezing refcount.
+ *
+ * After successful get_page_unless_zero() might follow flags change,
+ * for example lock_page() which set PG_waiters.
+ *
+ * Note that for mapped sub-pages of an anonymous THP,
+ * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
+ * the migration entry instead from where remap_page() will restore it.
+ * We can still have PG_anon_exclusive set on effectively unmapped and
+ * unreferenced sub-pages of an anonymous THP: we can simply drop
+ * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
*/
- if (!trylock_page(page))
- goto out;
+ new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ new_folio->flags.f |= (folio->flags.f &
+ ((1L << PG_referenced) |
+ (1L << PG_swapbacked) |
+ (1L << PG_swapcache) |
+ (1L << PG_mlocked) |
+ (1L << PG_uptodate) |
+ (1L << PG_active) |
+ (1L << PG_workingset) |
+ (1L << PG_locked) |
+ (1L << PG_unevictable) |
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+ (1L << PG_arch_2) |
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+ (1L << PG_arch_3) |
+#endif
+ (1L << PG_dirty) |
+ LRU_GEN_MASK | LRU_REFS_MASK));
+
+ if (handle_hwpoison &&
+ page_range_has_hwpoisoned(new_head, new_nr_pages))
+ folio_set_has_hwpoisoned(new_folio);
+
+ new_folio->mapping = folio->mapping;
+ new_folio->index = folio->index + i;
+
+ if (folio_test_swapcache(folio))
+ new_folio->swap.val = folio->swap.val + i;
+
+ /* Page flags must be visible before we make the page non-compound. */
+ smp_wmb();
+
/*
- * Isolate the page to avoid collapsing an hugepage
- * currently in use by the VM.
+ * Clear PageTail before unfreezing page refcount.
+ *
+ * After successful get_page_unless_zero() might follow put_page()
+ * which needs correct compound_head().
*/
- if (isolate_lru_page(page)) {
- unlock_page(page);
- goto out;
+ clear_compound_head(new_head);
+ if (new_order) {
+ prep_compound_page(new_head, new_order);
+ folio_set_large_rmappable(new_folio);
}
- /* 0 stands for page_is_file_cache(page) == false */
- inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageLRU(page));
- /* If there is no mapped pte young don't collapse the page */
- if (pte_young(pteval) || PageReferenced(page) ||
- mmu_notifier_test_young(vma->vm_mm, address))
- referenced = 1;
+ if (folio_test_young(folio))
+ folio_set_young(new_folio);
+ if (folio_test_idle(folio))
+ folio_set_idle(new_folio);
+#ifdef CONFIG_MEMCG
+ new_folio->memcg_data = folio->memcg_data;
+#endif
+
+ folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
}
- if (likely(referenced))
- return 1;
-out:
- release_pte_pages(pte, _pte);
- return 0;
+
+ if (new_order)
+ folio_set_order(folio, new_order);
+ else
+ ClearPageCompound(&folio->page);
}
-static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
- struct vm_area_struct *vma,
- unsigned long address,
- spinlock_t *ptl)
+/**
+ * __split_unmapped_folio() - splits an unmapped @folio to lower order folios in
+ * two ways: uniform split or non-uniform split.
+ * @folio: the to-be-split folio
+ * @new_order: the smallest order of the after split folios (since buddy
+ * allocator like split generates folios with orders from @folio's
+ * order - 1 to new_order).
+ * @split_at: in buddy allocator like split, the folio containing @split_at
+ * will be split until its order becomes @new_order.
+ * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
+ * @mapping: @folio->mapping
+ * @split_type: if the split is uniform or not (buddy allocator like split)
+ *
+ *
+ * 1. uniform split: the given @folio into multiple @new_order small folios,
+ * where all small folios have the same order. This is done when
+ * split_type is SPLIT_TYPE_UNIFORM.
+ * 2. buddy allocator like (non-uniform) split: the given @folio is split into
+ * half and one of the half (containing the given page) is split into half
+ * until the given @folio's order becomes @new_order. This is done when
+ * split_type is SPLIT_TYPE_NON_UNIFORM.
+ *
+ * The high level flow for these two methods are:
+ *
+ * 1. uniform split: @xas is split with no expectation of failure and a single
+ * __split_folio_to_order() is called to split the @folio into @new_order
+ * along with stats update.
+ * 2. non-uniform split: folio_order - @new_order calls to
+ * __split_folio_to_order() are expected to be made in a for loop to split
+ * the @folio to one lower order at a time. The folio containing @split_at
+ * is split in each iteration. @xas is split into half in each iteration and
+ * can fail. A failed @xas split leaves split folios as is without merging
+ * them back.
+ *
+ * After splitting, the caller's folio reference will be transferred to the
+ * folio containing @split_at. The caller needs to unlock and/or free
+ * after-split folios if necessary.
+ *
+ * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
+ */
+static int __split_unmapped_folio(struct folio *folio, int new_order,
+ struct page *split_at, struct xa_state *xas,
+ struct address_space *mapping, enum split_type split_type)
{
- pte_t *_pte;
- for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
- pte_t pteval = *_pte;
- struct page *src_page;
+ const bool is_anon = folio_test_anon(folio);
+ int old_order = folio_order(folio);
+ int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1;
+ int split_order;
- if (pte_none(pteval)) {
- clear_user_highpage(page, address);
- add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
- } else {
- src_page = pte_page(pteval);
- copy_user_highpage(page, src_page, address, vma);
- VM_BUG_ON(page_mapcount(src_page) != 1);
- release_pte_page(src_page);
- /*
- * ptl mostly unnecessary, but preempt has to
- * be disabled to update the per-cpu stats
- * inside page_remove_rmap().
- */
- spin_lock(ptl);
+ /*
+ * split to new_order one order at a time. For uniform split,
+ * folio is split to new_order directly.
+ */
+ for (split_order = start_order;
+ split_order >= new_order;
+ split_order--) {
+ int nr_new_folios = 1UL << (old_order - split_order);
+
+ /* order-1 anonymous folio is not supported */
+ if (is_anon && split_order == 1)
+ continue;
+
+ if (mapping) {
/*
- * paravirt calls inside pte_clear here are
- * superfluous.
+ * uniform split has xas_split_alloc() called before
+ * irq is disabled to allocate enough memory, whereas
+ * non-uniform split can handle ENOMEM.
*/
- pte_clear(vma->vm_mm, address, _pte);
- page_remove_rmap(src_page);
- spin_unlock(ptl);
- free_page_and_swap_cache(src_page);
+ if (split_type == SPLIT_TYPE_UNIFORM)
+ xas_split(xas, folio, old_order);
+ else {
+ xas_set_order(xas, folio->index, split_order);
+ xas_try_split(xas, folio, old_order);
+ if (xas_error(xas))
+ return xas_error(xas);
+ }
}
- address += PAGE_SIZE;
- page++;
+ folio_split_memcg_refs(folio, old_order, split_order);
+ split_page_owner(&folio->page, old_order, split_order);
+ pgalloc_tag_split(folio, old_order, split_order);
+ __split_folio_to_order(folio, old_order, split_order);
+
+ if (is_anon) {
+ mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
+ mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios);
+ }
+ /*
+ * If uniform split, the process is complete.
+ * If non-uniform, continue splitting the folio at @split_at
+ * as long as the next @split_order is >= @new_order.
+ */
+ folio = page_folio(split_at);
+ old_order = split_order;
}
-}
-static void khugepaged_alloc_sleep(void)
-{
- wait_event_freezable_timeout(khugepaged_wait, false,
- msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+ return 0;
}
-#ifdef CONFIG_NUMA
-static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+bool folio_split_supported(struct folio *folio, unsigned int new_order,
+ enum split_type split_type, bool warns)
{
- if (IS_ERR(*hpage)) {
- if (!*wait)
+ if (folio_test_anon(folio)) {
+ /* order-1 is not supported for anonymous THP. */
+ VM_WARN_ONCE(warns && new_order == 1,
+ "Cannot split to order-1 folio");
+ if (new_order == 1)
+ return false;
+ } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ !mapping_large_folio_support(folio->mapping)) {
+ /*
+ * We can always split a folio down to a single page
+ * (new_order == 0) uniformly.
+ *
+ * For any other scenario
+ * a) uniform split targeting a large folio
+ * (new_order > 0)
+ * b) any non-uniform split
+ * we must confirm that the file system supports large
+ * folios.
+ *
+ * Note that we might still have THPs in such
+ * mappings, which is created from khugepaged when
+ * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that
+ * case, the mapping does not actually support large
+ * folios properly.
+ */
+ VM_WARN_ONCE(warns,
+ "Cannot split file folio to non-0 order");
return false;
+ }
+ }
- *wait = false;
- *hpage = NULL;
- khugepaged_alloc_sleep();
- } else if (*hpage) {
- put_page(*hpage);
- *hpage = NULL;
+ /*
+ * swapcache folio could only be split to order 0
+ *
+ * non-uniform split creates after-split folios with orders from
+ * folio_order(folio) - 1 to new_order, making it not suitable for any
+ * swapcache folio split. Only uniform split to order-0 can be used
+ * here.
+ */
+ if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
+ VM_WARN_ONCE(warns,
+ "Cannot split swapcache folio to non-0 order");
+ return false;
}
return true;
}
-static struct page
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- int node)
+static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
+ struct page *split_at, struct xa_state *xas,
+ struct address_space *mapping, bool do_lru,
+ struct list_head *list, enum split_type split_type,
+ pgoff_t end, int *nr_shmem_dropped, int extra_pins)
{
- VM_BUG_ON(*hpage);
+ struct folio *end_folio = folio_next(folio);
+ struct folio *new_folio, *next;
+ int old_order = folio_order(folio);
+ int ret = 0;
+ struct deferred_split *ds_queue;
+
+ VM_WARN_ON_ONCE(!mapping && end);
+ /* Prevent deferred_split_scan() touching ->_refcount */
+ ds_queue = folio_split_queue_lock(folio);
+ if (folio_ref_freeze(folio, 1 + extra_pins)) {
+ struct swap_cluster_info *ci = NULL;
+ struct lruvec *lruvec;
+ int expected_refs;
+
+ if (old_order > 1) {
+ if (!list_empty(&folio->_deferred_list)) {
+ ds_queue->split_queue_len--;
+ /*
+ * Reinitialize page_deferred_list after removing the
+ * page from the split_queue, otherwise a subsequent
+ * split will see list corruption when checking the
+ * page_deferred_list.
+ */
+ list_del_init(&folio->_deferred_list);
+ }
+ if (folio_test_partially_mapped(folio)) {
+ folio_clear_partially_mapped(folio);
+ mod_mthp_stat(old_order,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
+ }
+ split_queue_unlock(ds_queue);
+ if (mapping) {
+ int nr = folio_nr_pages(folio);
+
+ if (folio_test_pmd_mappable(folio) &&
+ new_order < HPAGE_PMD_ORDER) {
+ if (folio_test_swapbacked(folio)) {
+ lruvec_stat_mod_folio(folio,
+ NR_SHMEM_THPS, -nr);
+ } else {
+ lruvec_stat_mod_folio(folio,
+ NR_FILE_THPS, -nr);
+ filemap_nr_thps_dec(mapping);
+ }
+ }
+ }
+
+ if (folio_test_swapcache(folio)) {
+ if (mapping) {
+ VM_WARN_ON_ONCE_FOLIO(mapping, folio);
+ return -EINVAL;
+ }
+
+ ci = swap_cluster_get_and_lock(folio);
+ }
+
+ /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
+ if (do_lru)
+ lruvec = folio_lruvec_lock(folio);
+
+ ret = __split_unmapped_folio(folio, new_order, split_at, xas,
+ mapping, split_type);
+
+ /*
+ * Unfreeze after-split folios and put them back to the right
+ * list. @folio should be kept frozon until page cache
+ * entries are updated with all the other after-split folios
+ * to prevent others seeing stale page cache entries.
+ * As a result, new_folio starts from the next folio of
+ * @folio.
+ */
+ for (new_folio = folio_next(folio); new_folio != end_folio;
+ new_folio = next) {
+ unsigned long nr_pages = folio_nr_pages(new_folio);
+
+ next = folio_next(new_folio);
+
+ zone_device_private_split_cb(folio, new_folio);
+
+ expected_refs = folio_expected_ref_count(new_folio) + 1;
+ folio_ref_unfreeze(new_folio, expected_refs);
+
+ if (do_lru)
+ lru_add_split_folio(folio, new_folio, lruvec, list);
+
+ /*
+ * Anonymous folio with swap cache.
+ * NOTE: shmem in swap cache is not supported yet.
+ */
+ if (ci) {
+ __swap_cache_replace_folio(ci, folio, new_folio);
+ continue;
+ }
+
+ /* Anonymous folio without swap cache */
+ if (!mapping)
+ continue;
+
+ /* Add the new folio to the page cache. */
+ if (new_folio->index < end) {
+ __xa_store(&mapping->i_pages, new_folio->index,
+ new_folio, 0);
+ continue;
+ }
+
+ VM_WARN_ON_ONCE(!nr_shmem_dropped);
+ /* Drop folio beyond EOF: ->index >= end */
+ if (shmem_mapping(mapping) && nr_shmem_dropped)
+ *nr_shmem_dropped += nr_pages;
+ else if (folio_test_clear_dirty(new_folio))
+ folio_account_cleaned(
+ new_folio, inode_to_wb(mapping->host));
+ __filemap_remove_folio(new_folio, NULL);
+ folio_put_refs(new_folio, nr_pages);
+ }
+
+ zone_device_private_split_cb(folio, NULL);
+ /*
+ * Unfreeze @folio only after all page cache entries, which
+ * used to point to it, have been updated with new folios.
+ * Otherwise, a parallel folio_try_get() can grab @folio
+ * and its caller can see stale page cache entries.
+ */
+ expected_refs = folio_expected_ref_count(folio) + 1;
+ folio_ref_unfreeze(folio, expected_refs);
+
+ if (do_lru)
+ unlock_page_lruvec(lruvec);
+
+ if (ci)
+ swap_cluster_unlock(ci);
+ } else {
+ split_queue_unlock(ds_queue);
+ return -EAGAIN;
+ }
+
+ return ret;
+}
+
+/**
+ * __folio_split() - split a folio at @split_at to a @new_order folio
+ * @folio: folio to split
+ * @new_order: the order of the new folio
+ * @split_at: a page within the new folio
+ * @lock_at: a page within @folio to be left locked to caller
+ * @list: after-split folios will be put on it if non NULL
+ * @split_type: perform uniform split or not (non-uniform split)
+ *
+ * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
+ * It is in charge of checking whether the split is supported or not and
+ * preparing @folio for __split_unmapped_folio().
+ *
+ * After splitting, the after-split folio containing @lock_at remains locked
+ * and others are unlocked:
+ * 1. for uniform split, @lock_at points to one of @folio's subpages;
+ * 2. for buddy allocator like (non-uniform) split, @lock_at points to @folio.
+ *
+ * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
+ */
+static int __folio_split(struct folio *folio, unsigned int new_order,
+ struct page *split_at, struct page *lock_at,
+ struct list_head *list, enum split_type split_type)
+{
+ XA_STATE(xas, &folio->mapping->i_pages, folio->index);
+ struct folio *end_folio = folio_next(folio);
+ bool is_anon = folio_test_anon(folio);
+ struct address_space *mapping = NULL;
+ struct anon_vma *anon_vma = NULL;
+ int old_order = folio_order(folio);
+ struct folio *new_folio, *next;
+ int nr_shmem_dropped = 0;
+ int remap_flags = 0;
+ int extra_pins, ret;
+ pgoff_t end = 0;
+ bool is_hzp;
+
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
+
+ if (folio != page_folio(split_at) || folio != page_folio(lock_at))
+ return -EINVAL;
+
/*
- * Allocate the page while the vma is still valid and under
- * the mmap_sem read mode so there is no memory allocation
- * later when we take the mmap_sem in write mode. This is more
- * friendly behavior (OTOH it may actually hide bugs) to
- * filesystems in userland with daemons allocating memory in
- * the userland I/O paths. Allocating memory with the
- * mmap_sem in read mode is good idea also to allow greater
- * scalability.
+ * Folios that just got truncated cannot get split. Signal to the
+ * caller that there was a race.
+ *
+ * TODO: this will also currently refuse shmem folios that are in the
+ * swapcache.
*/
- *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
- node, __GFP_OTHER_NODE);
+ if (!is_anon && !folio->mapping)
+ return -EBUSY;
+
+ if (new_order >= old_order)
+ return -EINVAL;
+
+ if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true))
+ return -EINVAL;
+
+ is_hzp = is_huge_zero_folio(folio);
+ if (is_hzp) {
+ pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
+ return -EBUSY;
+ }
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+
+ if (is_anon) {
+ /*
+ * The caller does not necessarily hold an mmap_lock that would
+ * prevent the anon_vma disappearing so we first we take a
+ * reference to it and then lock the anon_vma for write. This
+ * is similar to folio_lock_anon_vma_read except the write lock
+ * is taken to serialise against parallel split or collapse
+ * operations.
+ */
+ anon_vma = folio_get_anon_vma(folio);
+ if (!anon_vma) {
+ ret = -EBUSY;
+ goto out;
+ }
+ anon_vma_lock_write(anon_vma);
+ mapping = NULL;
+ } else {
+ unsigned int min_order;
+ gfp_t gfp;
+
+ mapping = folio->mapping;
+ min_order = mapping_min_folio_order(folio->mapping);
+ if (new_order < min_order) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ gfp = current_gfp_context(mapping_gfp_mask(mapping) &
+ GFP_RECLAIM_MASK);
+
+ if (!filemap_release_folio(folio, gfp)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (split_type == SPLIT_TYPE_UNIFORM) {
+ xas_set_order(&xas, folio->index, new_order);
+ xas_split_alloc(&xas, folio, old_order, gfp);
+ if (xas_error(&xas)) {
+ ret = xas_error(&xas);
+ goto out;
+ }
+ }
+
+ anon_vma = NULL;
+ i_mmap_lock_read(mapping);
+
+ /*
+ *__split_unmapped_folio() may need to trim off pages beyond
+ * EOF: but on 32-bit, i_size_read() takes an irq-unsafe
+ * seqlock, which cannot be nested inside the page tree lock.
+ * So note end now: i_size itself may be changed at any moment,
+ * but folio lock is good enough to serialize the trimming.
+ */
+ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
+ if (shmem_mapping(mapping))
+ end = shmem_fallocend(mapping->host, end);
+ }
/*
- * After allocating the hugepage, release the mmap_sem read lock in
- * preparation for taking it in write mode.
+ * Racy check if we can split the page, before unmap_folio() will
+ * split PMDs
*/
- up_read(&mm->mmap_sem);
- if (unlikely(!*hpage)) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- *hpage = ERR_PTR(-ENOMEM);
- return NULL;
+ if (!can_split_folio(folio, 1, &extra_pins)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ unmap_folio(folio);
+
+ /* block interrupt reentry in xa_lock and spinlock */
+ local_irq_disable();
+ if (mapping) {
+ /*
+ * Check if the folio is present in page cache.
+ * We assume all tail are present too, if folio is there.
+ */
+ xas_lock(&xas);
+ xas_reset(&xas);
+ if (xas_load(&xas) != folio) {
+ ret = -EAGAIN;
+ goto fail;
+ }
}
- count_vm_event(THP_COLLAPSE_ALLOC);
- return *hpage;
+ ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
+ true, list, split_type, end, &nr_shmem_dropped,
+ extra_pins);
+fail:
+ if (mapping)
+ xas_unlock(&xas);
+
+ local_irq_enable();
+
+ if (nr_shmem_dropped)
+ shmem_uncharge(mapping->host, nr_shmem_dropped);
+
+ if (!ret && is_anon && !folio_is_device_private(folio))
+ remap_flags = RMP_USE_SHARED_ZEROPAGE;
+
+ remap_page(folio, 1 << old_order, remap_flags);
+
+ /*
+ * Unlock all after-split folios except the one containing
+ * @lock_at page. If @folio is not split, it will be kept locked.
+ */
+ for (new_folio = folio; new_folio != end_folio; new_folio = next) {
+ next = folio_next(new_folio);
+ if (new_folio == page_folio(lock_at))
+ continue;
+
+ folio_unlock(new_folio);
+ /*
+ * Subpages may be freed if there wasn't any mapping
+ * like if add_to_swap() is running on a lru page that
+ * had its mapping zapped. And freeing these pages
+ * requires taking the lru_lock so we do the put_page
+ * of the tail pages after the split is complete.
+ */
+ free_folio_and_swap_cache(new_folio);
+ }
+
+out_unlock:
+ if (anon_vma) {
+ anon_vma_unlock_write(anon_vma);
+ put_anon_vma(anon_vma);
+ }
+ if (mapping)
+ i_mmap_unlock_read(mapping);
+out:
+ xas_destroy(&xas);
+ if (old_order == HPAGE_PMD_ORDER)
+ count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+ count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
+ return ret;
}
-#else
-static struct page *khugepaged_alloc_hugepage(bool *wait)
+
+/**
+ * folio_split_unmapped() - split a large anon folio that is already unmapped
+ * @folio: folio to split
+ * @new_order: the order of folios after split
+ *
+ * This function is a helper for splitting folios that have already been
+ * unmapped. The use case is that the device or the CPU can refuse to migrate
+ * THP pages in the middle of migration, due to allocation issues on either
+ * side.
+ *
+ * anon_vma_lock is not required to be held, mmap_read_lock() or
+ * mmap_write_lock() should be held. @folio is expected to be locked by the
+ * caller. device-private and non device-private folios are supported along
+ * with folios that are in the swapcache. @folio should also be unmapped and
+ * isolated from LRU (if applicable)
+ *
+ * Upon return, the folio is not remapped, split folios are not added to LRU,
+ * free_folio_and_swap_cache() is not called, and new folios remain locked.
+ *
+ * Return: 0 on success, -EAGAIN if the folio cannot be split (e.g., due to
+ * insufficient reference count or extra pins).
+ */
+int folio_split_unmapped(struct folio *folio, unsigned int new_order)
{
- struct page *hpage;
+ int extra_pins, ret = 0;
- do {
- hpage = alloc_hugepage(khugepaged_defrag());
- if (!hpage) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- if (!*wait)
- return NULL;
+ VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
- *wait = false;
- khugepaged_alloc_sleep();
- } else
- count_vm_event(THP_COLLAPSE_ALLOC);
- } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+ if (!can_split_folio(folio, 1, &extra_pins))
+ return -EAGAIN;
- return hpage;
+ local_irq_disable();
+ ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
+ NULL, false, NULL, SPLIT_TYPE_UNIFORM,
+ 0, NULL, extra_pins);
+ local_irq_enable();
+ return ret;
}
-static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+/*
+ * This function splits a large folio into smaller folios of order @new_order.
+ * @page can point to any page of the large folio to split. The split operation
+ * does not change the position of @page.
+ *
+ * Prerequisites:
+ *
+ * 1) The caller must hold a reference on the @page's owning folio, also known
+ * as the large folio.
+ *
+ * 2) The large folio must be locked.
+ *
+ * 3) The folio must not be pinned. Any unexpected folio references, including
+ * GUP pins, will result in the folio not getting split; instead, the caller
+ * will receive an -EAGAIN.
+ *
+ * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
+ * supported for non-file-backed folios, because folio->_deferred_list, which
+ * is used by partially mapped folios, is stored in subpage 2, but an order-1
+ * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
+ * since they do not use _deferred_list.
+ *
+ * After splitting, the caller's folio reference will be transferred to @page,
+ * resulting in a raised refcount of @page after this call. The other pages may
+ * be freed if they are not mapped.
+ *
+ * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
+ *
+ * Pages in @new_order will inherit the mapping, flags, and so on from the
+ * huge page.
+ *
+ * Returns 0 if the huge page was split successfully.
+ *
+ * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
+ * the folio was concurrently removed from the page cache.
+ *
+ * Returns -EBUSY when trying to split the huge zeropage, if the folio is
+ * under writeback, if fs-specific folio metadata cannot currently be
+ * released, or if some unexpected race happened (e.g., anon VMA disappeared,
+ * truncation).
+ *
+ * Callers should ensure that the order respects the address space mapping
+ * min-order if one is set for non-anonymous folios.
+ *
+ * Returns -EINVAL when trying to split to an order that is incompatible
+ * with the folio. Splitting to order 0 is compatible with all folios.
+ */
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
{
- if (!*hpage)
- *hpage = khugepaged_alloc_hugepage(wait);
-
- if (unlikely(!*hpage))
- return false;
+ struct folio *folio = page_folio(page);
- return true;
+ return __folio_split(folio, new_order, &folio->page, page, list,
+ SPLIT_TYPE_UNIFORM);
}
-static struct page
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- int node)
+/**
+ * folio_split() - split a folio at @split_at to a @new_order folio
+ * @folio: folio to split
+ * @new_order: the order of the new folio
+ * @split_at: a page within the new folio
+ * @list: after-split folios are added to @list if not null, otherwise to LRU
+ * list
+ *
+ * It has the same prerequisites and returns as
+ * split_huge_page_to_list_to_order().
+ *
+ * Split a folio at @split_at to a new_order folio, leave the
+ * remaining subpages of the original folio as large as possible. For example,
+ * in the case of splitting an order-9 folio at its third order-3 subpages to
+ * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio.
+ * After the split, there will be a group of folios with different orders and
+ * the new folio containing @split_at is marked in bracket:
+ * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
+ *
+ * After split, folio is left locked for caller.
+ *
+ * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
+ */
+int folio_split(struct folio *folio, unsigned int new_order,
+ struct page *split_at, struct list_head *list)
{
- up_read(&mm->mmap_sem);
- VM_BUG_ON(!*hpage);
- return *hpage;
+ return __folio_split(folio, new_order, split_at, &folio->page, list,
+ SPLIT_TYPE_NON_UNIFORM);
}
-#endif
-static bool hugepage_vma_check(struct vm_area_struct *vma)
+int min_order_for_split(struct folio *folio)
{
- if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
- (vma->vm_flags & VM_NOHUGEPAGE))
- return false;
+ if (folio_test_anon(folio))
+ return 0;
- if (!vma->anon_vma || vma->vm_ops)
- return false;
- if (is_vma_temporary_stack(vma))
- return false;
- VM_BUG_ON(vma->vm_flags & VM_NO_THP);
- return true;
+ if (!folio->mapping) {
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_SPLIT_PAGE_FAILED);
+ return -EBUSY;
+ }
+
+ return mapping_min_folio_order(folio->mapping);
}
-static void collapse_huge_page(struct mm_struct *mm,
- unsigned long address,
- struct page **hpage,
- struct vm_area_struct *vma,
- int node)
+int split_folio_to_list(struct folio *folio, struct list_head *list)
{
- pmd_t *pmd, _pmd;
- pte_t *pte;
- pgtable_t pgtable;
- struct page *new_page;
- spinlock_t *ptl;
- int isolated;
- unsigned long hstart, hend;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ return split_huge_page_to_list_to_order(&folio->page, list, 0);
+}
+
+/*
+ * __folio_unqueue_deferred_split() is not to be called directly:
+ * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
+ * limits its calls to those folios which may have a _deferred_list for
+ * queueing THP splits, and that list is (racily observed to be) non-empty.
+ *
+ * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
+ * zero: because even when split_queue_lock is held, a non-empty _deferred_list
+ * might be in use on deferred_split_scan()'s unlocked on-stack list.
+ *
+ * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
+ * therefore important to unqueue deferred split before changing folio memcg.
+ */
+bool __folio_unqueue_deferred_split(struct folio *folio)
+{
+ struct deferred_split *ds_queue;
+ unsigned long flags;
+ bool unqueued = false;
+
+ WARN_ON_ONCE(folio_ref_count(folio));
+ WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
+
+ ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
+ if (!list_empty(&folio->_deferred_list)) {
+ ds_queue->split_queue_len--;
+ if (folio_test_partially_mapped(folio)) {
+ folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
+ list_del_init(&folio->_deferred_list);
+ unqueued = true;
+ }
+ split_queue_unlock_irqrestore(ds_queue, flags);
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ return unqueued; /* useful for debug warnings */
+}
+
+/* partially_mapped=false won't clear PG_partially_mapped folio flag */
+void deferred_split_folio(struct folio *folio, bool partially_mapped)
+{
+ struct deferred_split *ds_queue;
+ unsigned long flags;
- /* release the mmap_sem read lock. */
- new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
- if (!new_page)
+ /*
+ * Order 1 folios have no space for a deferred list, but we also
+ * won't waste much memory by not adding them to the deferred list.
+ */
+ if (folio_order(folio) <= 1)
return;
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
+ if (!partially_mapped && !split_underused_thp)
return;
/*
- * Prevent all access to pagetables with the exception of
- * gup_fast later hanlded by the ptep_clear_flush and the VM
- * handled by the anon_vma lock + PG_lock.
+ * Exclude swapcache: originally to avoid a corrupt deferred split
+ * queue. Nowadays that is fully prevented by memcg1_swapout();
+ * but if page reclaim is already handling the same folio, it is
+ * unnecessary to handle it again in the shrinker, so excluding
+ * swapcache here may still be a useful optimization.
*/
- down_write(&mm->mmap_sem);
- if (unlikely(khugepaged_test_exit(mm)))
- goto out;
+ if (folio_test_swapcache(folio))
+ return;
- vma = find_vma(mm, address);
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
- if (address < hstart || address + HPAGE_PMD_SIZE > hend)
- goto out;
- if (!hugepage_vma_check(vma))
- goto out;
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- goto out;
- if (pmd_trans_huge(*pmd))
- goto out;
+ ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
+ if (partially_mapped) {
+ if (!folio_test_partially_mapped(folio)) {
+ folio_set_partially_mapped(folio);
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+ count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
- anon_vma_lock_write(vma->anon_vma);
+ }
+ } else {
+ /* partially mapped folios cannot become non-partially mapped */
+ VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
+ }
+ if (list_empty(&folio->_deferred_list)) {
+ struct mem_cgroup *memcg;
+
+ memcg = folio_split_queue_memcg(folio, ds_queue);
+ list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
+ ds_queue->split_queue_len++;
+ if (memcg)
+ set_shrinker_bit(memcg, folio_nid(folio),
+ shrinker_id(deferred_split_shrinker));
+ }
+ split_queue_unlock_irqrestore(ds_queue, flags);
+}
- pte = pte_offset_map(pmd, address);
- ptl = pte_lockptr(mm, pmd);
+static unsigned long deferred_split_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct pglist_data *pgdata = NODE_DATA(sc->nid);
+ struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
- mmun_start = address;
- mmun_end = address + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock); /* probably unnecessary */
- /*
- * After this gup_fast can't run anymore. This also removes
- * any huge TLB entry from the CPU so we won't allow
- * huge and small TLB entries for the same virtual address
- * to avoid the risk of CPU bugs in that area.
- */
- _pmd = pmdp_clear_flush(vma, address, pmd);
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+#ifdef CONFIG_MEMCG
+ if (sc->memcg)
+ ds_queue = &sc->memcg->deferred_split_queue;
+#endif
+ return READ_ONCE(ds_queue->split_queue_len);
+}
- spin_lock(ptl);
- isolated = __collapse_huge_page_isolate(vma, address, pte);
- spin_unlock(ptl);
+static bool thp_underused(struct folio *folio)
+{
+ int num_zero_pages = 0, num_filled_pages = 0;
+ int i;
+
+ if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
+ return false;
+
+ if (folio_contain_hwpoisoned_page(folio))
+ return false;
- if (unlikely(!isolated)) {
- pte_unmap(pte);
- spin_lock(&mm->page_table_lock);
- BUG_ON(!pmd_none(*pmd));
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
+ if (++num_zero_pages > khugepaged_max_ptes_none)
+ return true;
+ } else {
+ /*
+ * Another path for early exit once the number
+ * of non-zero filled pages exceeds threshold.
+ */
+ if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
+ return false;
+ }
+ }
+ return false;
+}
+
+static unsigned long deferred_split_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct deferred_split *ds_queue;
+ unsigned long flags;
+ struct folio *folio, *next;
+ int split = 0, i;
+ struct folio_batch fbatch;
+
+ folio_batch_init(&fbatch);
+
+retry:
+ ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
+ /* Take pin on all head pages to avoid freeing them under us */
+ list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
+ _deferred_list) {
+ if (folio_try_get(folio)) {
+ folio_batch_add(&fbatch, folio);
+ } else if (folio_test_partially_mapped(folio)) {
+ /* We lost race with folio_put() */
+ folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
+ list_del_init(&folio->_deferred_list);
+ ds_queue->split_queue_len--;
+ if (!--sc->nr_to_scan)
+ break;
+ if (!folio_batch_space(&fbatch))
+ break;
+ }
+ split_queue_unlock_irqrestore(ds_queue, flags);
+
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ bool did_split = false;
+ bool underused = false;
+ struct deferred_split *fqueue;
+
+ folio = fbatch.folios[i];
+ if (!folio_test_partially_mapped(folio)) {
+ /*
+ * See try_to_map_unused_to_zeropage(): we cannot
+ * optimize zero-filled pages after splitting an
+ * mlocked folio.
+ */
+ if (folio_test_mlocked(folio))
+ goto next;
+ underused = thp_underused(folio);
+ if (!underused)
+ goto next;
+ }
+ if (!folio_trylock(folio))
+ goto next;
+ if (!split_folio(folio)) {
+ did_split = true;
+ if (underused)
+ count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
+ split++;
+ }
+ folio_unlock(folio);
+next:
+ if (did_split || !folio_test_partially_mapped(folio))
+ continue;
/*
- * We can only use set_pmd_at when establishing
- * hugepmds and never for establishing regular pmds that
- * points to regular pagetables. Use pmd_populate for that
+ * Only add back to the queue if folio is partially mapped.
+ * If thp_underused returns false, or if split_folio fails
+ * in the case it was underused, then consider it used and
+ * don't add it back to split_queue.
*/
- pmd_populate(mm, pmd, pmd_pgtable(_pmd));
- spin_unlock(&mm->page_table_lock);
- anon_vma_unlock_write(vma->anon_vma);
- goto out;
+ fqueue = folio_split_queue_lock_irqsave(folio, &flags);
+ if (list_empty(&folio->_deferred_list)) {
+ list_add_tail(&folio->_deferred_list, &fqueue->split_queue);
+ fqueue->split_queue_len++;
+ }
+ split_queue_unlock_irqrestore(fqueue, flags);
+ }
+ folios_put(&fbatch);
+
+ if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
+ cond_resched();
+ goto retry;
}
/*
- * All pages are isolated and locked so anon_vma rmap
- * can't run anymore.
+ * Stop shrinker if we didn't split any page, but the queue is empty.
+ * This can happen if pages were freed under us.
*/
- anon_vma_unlock_write(vma->anon_vma);
-
- __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
- pte_unmap(pte);
- __SetPageUptodate(new_page);
- pgtable = pmd_pgtable(_pmd);
+ if (!split && list_empty(&ds_queue->split_queue))
+ return SHRINK_STOP;
+ return split;
+}
- _pmd = mk_huge_pmd(new_page, vma);
+#ifdef CONFIG_MEMCG
+void reparent_deferred_split_queue(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+ struct deferred_split *ds_queue = &memcg->deferred_split_queue;
+ struct deferred_split *parent_ds_queue = &parent->deferred_split_queue;
+ int nid;
- /*
- * spin_lock() below is not the equivalent of smp_wmb(), so
- * this is needed to avoid the copy_huge_page writes to become
- * visible after the set_pmd_at() write.
- */
- smp_wmb();
+ spin_lock_irq(&ds_queue->split_queue_lock);
+ spin_lock_nested(&parent_ds_queue->split_queue_lock, SINGLE_DEPTH_NESTING);
- spin_lock(&mm->page_table_lock);
- BUG_ON(!pmd_none(*pmd));
- page_add_new_anon_rmap(new_page, vma, address);
- pgtable_trans_huge_deposit(mm, pmd, pgtable);
- set_pmd_at(mm, address, pmd, _pmd);
- update_mmu_cache_pmd(vma, address, pmd);
- spin_unlock(&mm->page_table_lock);
+ if (!ds_queue->split_queue_len)
+ goto unlock;
- *hpage = NULL;
+ list_splice_tail_init(&ds_queue->split_queue, &parent_ds_queue->split_queue);
+ parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
+ ds_queue->split_queue_len = 0;
- khugepaged_pages_collapsed++;
-out_up_write:
- up_write(&mm->mmap_sem);
- return;
+ for_each_node(nid)
+ set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
-out:
- mem_cgroup_uncharge_page(new_page);
- goto out_up_write;
+unlock:
+ spin_unlock(&parent_ds_queue->split_queue_lock);
+ spin_unlock_irq(&ds_queue->split_queue_lock);
}
+#endif
-static int khugepaged_scan_pmd(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address,
- struct page **hpage)
+#ifdef CONFIG_DEBUG_FS
+static void split_huge_pages_all(void)
{
- pmd_t *pmd;
- pte_t *pte, *_pte;
- int ret = 0, referenced = 0, none = 0;
+ struct zone *zone;
struct page *page;
- unsigned long _address;
- spinlock_t *ptl;
- int node = NUMA_NO_NODE;
-
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ struct folio *folio;
+ unsigned long pfn, max_zone_pfn;
+ unsigned long total = 0, split = 0;
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- goto out;
- if (pmd_trans_huge(*pmd))
- goto out;
+ pr_debug("Split all THPs\n");
+ for_each_zone(zone) {
+ if (!managed_zone(zone))
+ continue;
+ max_zone_pfn = zone_end_pfn(zone);
+ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+ int nr_pages;
- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
- _pte++, _address += PAGE_SIZE) {
- pte_t pteval = *_pte;
- if (pte_none(pteval)) {
- if (++none <= khugepaged_max_ptes_none)
+ page = pfn_to_online_page(pfn);
+ if (!page || PageTail(page))
continue;
- else
- goto out_unmap;
+ folio = page_folio(page);
+ if (!folio_try_get(folio))
+ continue;
+
+ if (unlikely(page_folio(page) != folio))
+ goto next;
+
+ if (zone != folio_zone(folio))
+ goto next;
+
+ if (!folio_test_large(folio)
+ || folio_test_hugetlb(folio)
+ || !folio_test_lru(folio))
+ goto next;
+
+ total++;
+ folio_lock(folio);
+ nr_pages = folio_nr_pages(folio);
+ if (!split_folio(folio))
+ split++;
+ pfn += nr_pages - 1;
+ folio_unlock(folio);
+next:
+ folio_put(folio);
+ cond_resched();
}
- if (!pte_present(pteval) || !pte_write(pteval))
- goto out_unmap;
- page = vm_normal_page(vma, _address, pteval);
- if (unlikely(!page))
- goto out_unmap;
- /*
- * Chose the node of the first page. This could
- * be more sophisticated and look at more pages,
- * but isn't for now.
- */
- if (node == NUMA_NO_NODE)
- node = page_to_nid(page);
- VM_BUG_ON(PageCompound(page));
- if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
- goto out_unmap;
- /* cannot use mapcount: can't collapse if there's a gup pin */
- if (page_count(page) != 1)
- goto out_unmap;
- if (pte_young(pteval) || PageReferenced(page) ||
- mmu_notifier_test_young(vma->vm_mm, address))
- referenced = 1;
- }
- if (referenced)
- ret = 1;
-out_unmap:
- pte_unmap_unlock(pte, ptl);
- if (ret)
- /* collapse_huge_page will return with the mmap_sem released */
- collapse_huge_page(mm, address, hpage, vma, node);
-out:
- return ret;
+ }
+
+ pr_debug("%lu of %lu THP split\n", split, total);
}
-static void collect_mm_slot(struct mm_slot *mm_slot)
+static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
{
- struct mm_struct *mm = mm_slot->mm;
-
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
+ is_vm_hugetlb_page(vma);
+}
- if (khugepaged_test_exit(mm)) {
- /* free mm_slot */
- hash_del(&mm_slot->hash);
- list_del(&mm_slot->mm_node);
+static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
+ unsigned long vaddr_end, unsigned int new_order,
+ long in_folio_offset)
+{
+ int ret = 0;
+ struct task_struct *task;
+ struct mm_struct *mm;
+ unsigned long total = 0, split = 0;
+ unsigned long addr;
- /*
- * Not strictly needed because the mm exited already.
- *
- * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
- */
+ vaddr_start &= PAGE_MASK;
+ vaddr_end &= PAGE_MASK;
- /* khugepaged_mm_lock actually not necessary for the below */
- free_mm_slot(mm_slot);
- mmdrop(mm);
+ task = find_get_task_by_vpid(pid);
+ if (!task) {
+ ret = -ESRCH;
+ goto out;
}
-}
-static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
- struct page **hpage)
- __releases(&khugepaged_mm_lock)
- __acquires(&khugepaged_mm_lock)
-{
- struct mm_slot *mm_slot;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- int progress = 0;
-
- VM_BUG_ON(!pages);
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
-
- if (khugepaged_scan.mm_slot)
- mm_slot = khugepaged_scan.mm_slot;
- else {
- mm_slot = list_entry(khugepaged_scan.mm_head.next,
- struct mm_slot, mm_node);
- khugepaged_scan.address = 0;
- khugepaged_scan.mm_slot = mm_slot;
- }
- spin_unlock(&khugepaged_mm_lock);
-
- mm = mm_slot->mm;
- down_read(&mm->mmap_sem);
- if (unlikely(khugepaged_test_exit(mm)))
- vma = NULL;
- else
- vma = find_vma(mm, khugepaged_scan.address);
+ /* Find the mm_struct */
+ mm = get_task_mm(task);
+ put_task_struct(task);
+
+ if (!mm) {
+ ret = -EINVAL;
+ goto out;
+ }
- progress++;
- for (; vma; vma = vma->vm_next) {
- unsigned long hstart, hend;
+ pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
+ pid, vaddr_start, vaddr_end, new_order, in_folio_offset);
- cond_resched();
- if (unlikely(khugepaged_test_exit(mm))) {
- progress++;
+ mmap_read_lock(mm);
+ /*
+ * always increase addr by PAGE_SIZE, since we could have a PTE page
+ * table filled with PTE-mapped THPs, each of which is distinct.
+ */
+ for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
+ struct vm_area_struct *vma = vma_lookup(mm, addr);
+ struct folio_walk fw;
+ struct folio *folio;
+ struct address_space *mapping;
+ unsigned int target_order = new_order;
+
+ if (!vma)
break;
- }
- if (!hugepage_vma_check(vma)) {
-skip:
- progress++;
+
+ /* skip special VMA and hugetlb VMA */
+ if (vma_not_suitable_for_thp_split(vma)) {
+ addr = vma->vm_end;
continue;
}
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
- if (hstart >= hend)
- goto skip;
- if (khugepaged_scan.address > hend)
- goto skip;
- if (khugepaged_scan.address < hstart)
- khugepaged_scan.address = hstart;
- VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
-
- while (khugepaged_scan.address < hend) {
- int ret;
- cond_resched();
- if (unlikely(khugepaged_test_exit(mm)))
- goto breakouterloop;
-
- VM_BUG_ON(khugepaged_scan.address < hstart ||
- khugepaged_scan.address + HPAGE_PMD_SIZE >
- hend);
- ret = khugepaged_scan_pmd(mm, vma,
- khugepaged_scan.address,
- hpage);
- /* move to next address */
- khugepaged_scan.address += HPAGE_PMD_SIZE;
- progress += HPAGE_PMD_NR;
- if (ret)
- /* we released mmap_sem so break loop */
- goto breakouterloop_mmap_sem;
- if (progress >= pages)
- goto breakouterloop;
+
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (!folio)
+ continue;
+
+ if (!is_transparent_hugepage(folio))
+ goto next;
+
+ if (!folio_test_anon(folio)) {
+ mapping = folio->mapping;
+ target_order = max(new_order,
+ mapping_min_folio_order(mapping));
}
- }
-breakouterloop:
- up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
-breakouterloop_mmap_sem:
- spin_lock(&khugepaged_mm_lock);
- VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
- /*
- * Release the current mm_slot if this mm is about to die, or
- * if we scanned all vmas of this mm.
- */
- if (khugepaged_test_exit(mm) || !vma) {
+ if (target_order >= folio_order(folio))
+ goto next;
+
+ total++;
/*
- * Make sure that if mm_users is reaching zero while
- * khugepaged runs here, khugepaged_exit will find
- * mm_slot not pointing to the exiting mm.
+ * For folios with private, split_huge_page_to_list_to_order()
+ * will try to drop it before split and then check if the folio
+ * can be split or not. So skip the check here.
*/
- if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
- khugepaged_scan.mm_slot = list_entry(
- mm_slot->mm_node.next,
- struct mm_slot, mm_node);
- khugepaged_scan.address = 0;
+ if (!folio_test_private(folio) &&
+ !can_split_folio(folio, 0, NULL))
+ goto next;
+
+ if (!folio_trylock(folio))
+ goto next;
+ folio_get(folio);
+ folio_walk_end(&fw, vma);
+
+ if (!folio_test_anon(folio) && folio->mapping != mapping)
+ goto unlock;
+
+ if (in_folio_offset < 0 ||
+ in_folio_offset >= folio_nr_pages(folio)) {
+ if (!split_folio_to_order(folio, target_order))
+ split++;
} else {
- khugepaged_scan.mm_slot = NULL;
- khugepaged_full_scans++;
+ struct page *split_at = folio_page(folio,
+ in_folio_offset);
+ if (!folio_split(folio, target_order, split_at, NULL))
+ split++;
}
- collect_mm_slot(mm_slot);
+unlock:
+
+ folio_unlock(folio);
+ folio_put(folio);
+
+ cond_resched();
+ continue;
+next:
+ folio_walk_end(&fw, vma);
+ cond_resched();
}
+ mmap_read_unlock(mm);
+ mmput(mm);
- return progress;
-}
+ pr_debug("%lu of %lu THP split\n", split, total);
-static int khugepaged_has_work(void)
-{
- return !list_empty(&khugepaged_scan.mm_head) &&
- khugepaged_enabled();
+out:
+ return ret;
}
-static int khugepaged_wait_event(void)
+static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
+ pgoff_t off_end, unsigned int new_order,
+ long in_folio_offset)
{
- return !list_empty(&khugepaged_scan.mm_head) ||
- kthread_should_stop();
-}
+ struct filename *file;
+ struct file *candidate;
+ struct address_space *mapping;
+ int ret = -EINVAL;
+ pgoff_t index;
+ int nr_pages = 1;
+ unsigned long total = 0, split = 0;
+ unsigned int min_order;
+ unsigned int target_order;
+
+ file = getname_kernel(file_path);
+ if (IS_ERR(file))
+ return ret;
-static void khugepaged_do_scan(void)
-{
- struct page *hpage = NULL;
- unsigned int progress = 0, pass_through_head = 0;
- unsigned int pages = khugepaged_pages_to_scan;
- bool wait = true;
+ candidate = file_open_name(file, O_RDONLY, 0);
+ if (IS_ERR(candidate))
+ goto out;
- barrier(); /* write khugepaged_pages_to_scan to local stack */
+ pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
+ file_path, off_start, off_end, new_order, in_folio_offset);
- while (progress < pages) {
- if (!khugepaged_prealloc_page(&hpage, &wait))
- break;
+ mapping = candidate->f_mapping;
+ min_order = mapping_min_folio_order(mapping);
+ target_order = max(new_order, min_order);
- cond_resched();
+ for (index = off_start; index < off_end; index += nr_pages) {
+ struct folio *folio = filemap_get_folio(mapping, index);
- if (unlikely(kthread_should_stop() || freezing(current)))
- break;
+ nr_pages = 1;
+ if (IS_ERR(folio))
+ continue;
- spin_lock(&khugepaged_mm_lock);
- if (!khugepaged_scan.mm_slot)
- pass_through_head++;
- if (khugepaged_has_work() &&
- pass_through_head < 2)
- progress += khugepaged_scan_mm_slot(pages - progress,
- &hpage);
- else
- progress = pages;
- spin_unlock(&khugepaged_mm_lock);
- }
+ if (!folio_test_large(folio))
+ goto next;
- if (!IS_ERR_OR_NULL(hpage))
- put_page(hpage);
-}
+ total++;
+ nr_pages = folio_nr_pages(folio);
-static void khugepaged_wait_work(void)
-{
- try_to_freeze();
+ if (target_order >= folio_order(folio))
+ goto next;
- if (khugepaged_has_work()) {
- if (!khugepaged_scan_sleep_millisecs)
- return;
+ if (!folio_trylock(folio))
+ goto next;
- wait_event_freezable_timeout(khugepaged_wait,
- kthread_should_stop(),
- msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
- return;
+ if (folio->mapping != mapping)
+ goto unlock;
+
+ if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
+ if (!split_folio_to_order(folio, target_order))
+ split++;
+ } else {
+ struct page *split_at = folio_page(folio,
+ in_folio_offset);
+ if (!folio_split(folio, target_order, split_at, NULL))
+ split++;
+ }
+
+unlock:
+ folio_unlock(folio);
+next:
+ folio_put(folio);
+ cond_resched();
}
- if (khugepaged_enabled())
- wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
+ filp_close(candidate, NULL);
+ ret = 0;
+
+ pr_debug("%lu of %lu file-backed THP split\n", split, total);
+out:
+ putname(file);
+ return ret;
}
-static int khugepaged(void *none)
+#define MAX_INPUT_BUF_SZ 255
+
+static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppops)
{
- struct mm_slot *mm_slot;
+ static DEFINE_MUTEX(split_debug_mutex);
+ ssize_t ret;
+ /*
+ * hold pid, start_vaddr, end_vaddr, new_order or
+ * file_path, off_start, off_end, new_order
+ */
+ char input_buf[MAX_INPUT_BUF_SZ];
+ int pid;
+ unsigned long vaddr_start, vaddr_end;
+ unsigned int new_order = 0;
+ long in_folio_offset = -1;
- set_freezable();
- set_user_nice(current, 19);
+ ret = mutex_lock_interruptible(&split_debug_mutex);
+ if (ret)
+ return ret;
- while (!kthread_should_stop()) {
- khugepaged_do_scan();
- khugepaged_wait_work();
- }
+ ret = -EFAULT;
- spin_lock(&khugepaged_mm_lock);
- mm_slot = khugepaged_scan.mm_slot;
- khugepaged_scan.mm_slot = NULL;
- if (mm_slot)
- collect_mm_slot(mm_slot);
- spin_unlock(&khugepaged_mm_lock);
- return 0;
-}
+ memset(input_buf, 0, MAX_INPUT_BUF_SZ);
+ if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
+ goto out;
-static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
- unsigned long haddr, pmd_t *pmd)
-{
- struct mm_struct *mm = vma->vm_mm;
- pgtable_t pgtable;
- pmd_t _pmd;
- int i;
+ input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
- pmdp_clear_flush(vma, haddr, pmd);
- /* leave pmd empty until pte is filled */
+ if (input_buf[0] == '/') {
+ char *tok;
+ char *tok_buf = input_buf;
+ char file_path[MAX_INPUT_BUF_SZ];
+ pgoff_t off_start = 0, off_end = 0;
+ size_t input_len = strlen(input_buf);
- pgtable = pgtable_trans_huge_withdraw(mm, pmd);
- pmd_populate(mm, &_pmd, pgtable);
+ tok = strsep(&tok_buf, ",");
+ if (tok && tok_buf) {
+ strscpy(file_path, tok);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
- pte_t *pte, entry;
- entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
- entry = pte_mkspecial(entry);
- pte = pte_offset_map(&_pmd, haddr);
- VM_BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
- pte_unmap(pte);
- }
- smp_wmb(); /* make pte visible before pmd */
- pmd_populate(mm, pmd, pgtable);
- put_huge_zero_page();
-}
+ ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end,
+ &new_order, &in_folio_offset);
+ if (ret != 2 && ret != 3 && ret != 4) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = split_huge_pages_in_file(file_path, off_start, off_end,
+ new_order, in_folio_offset);
+ if (!ret)
+ ret = input_len;
-void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd)
-{
- struct page *page;
- struct mm_struct *mm = vma->vm_mm;
- unsigned long haddr = address & HPAGE_PMD_MASK;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
-
- BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
-
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_trans_huge(*pmd))) {
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- return;
+ goto out;
}
- if (is_huge_zero_pmd(*pmd)) {
- __split_huge_zero_page_pmd(vma, haddr, pmd);
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- return;
+
+ ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start,
+ &vaddr_end, &new_order, &in_folio_offset);
+ if (ret == 1 && pid == 1) {
+ split_huge_pages_all();
+ ret = strlen(input_buf);
+ goto out;
+ } else if (ret != 3 && ret != 4 && ret != 5) {
+ ret = -EINVAL;
+ goto out;
}
- page = pmd_page(*pmd);
- VM_BUG_ON(!page_count(page));
- get_page(page);
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- split_huge_page(page);
+ ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order,
+ in_folio_offset);
+ if (!ret)
+ ret = strlen(input_buf);
+out:
+ mutex_unlock(&split_debug_mutex);
+ return ret;
- put_page(page);
- BUG_ON(pmd_trans_huge(*pmd));
}
-void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd)
-{
- struct vm_area_struct *vma;
+static const struct file_operations split_huge_pages_fops = {
+ .owner = THIS_MODULE,
+ .write = split_huge_pages_write,
+};
- vma = find_vma(mm, address);
- BUG_ON(vma == NULL);
- split_huge_page_pmd(vma, address, pmd);
+static int __init split_huge_pages_debugfs(void)
+{
+ debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
+ &split_huge_pages_fops);
+ return 0;
}
+late_initcall(split_huge_pages_debugfs);
+#endif
-static void split_huge_page_address(struct mm_struct *mm,
- unsigned long address)
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ struct page *page)
{
- pmd_t *pmd;
+ struct folio *folio = page_folio(page);
+ struct vm_area_struct *vma = pvmw->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address = pvmw->address;
+ bool anon_exclusive;
+ pmd_t pmdval;
+ swp_entry_t entry;
+ pmd_t pmdswp;
- VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
+ if (!(pvmw->pmd && !pvmw->pte))
+ return 0;
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- return;
- /*
- * Caller holds the mmap_sem write mode, so a huge pmd cannot
- * materialize from under us.
- */
- split_huge_page_pmd_mm(mm, address, pmd);
+ flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
+ if (unlikely(!pmd_present(*pvmw->pmd)))
+ pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
+ else
+ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+
+ /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
+ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
+ if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
+ set_pmd_at(mm, address, pvmw->pmd, pmdval);
+ return -EBUSY;
+ }
+
+ if (pmd_dirty(pmdval))
+ folio_mark_dirty(folio);
+ if (pmd_write(pmdval))
+ entry = make_writable_migration_entry(page_to_pfn(page));
+ else if (anon_exclusive)
+ entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
+ else
+ entry = make_readable_migration_entry(page_to_pfn(page));
+ if (pmd_young(pmdval))
+ entry = make_migration_entry_young(entry);
+ if (pmd_dirty(pmdval))
+ entry = make_migration_entry_dirty(entry);
+ pmdswp = swp_entry_to_pmd(entry);
+ if (pmd_soft_dirty(pmdval))
+ pmdswp = pmd_swp_mksoft_dirty(pmdswp);
+ if (pmd_uffd_wp(pmdval))
+ pmdswp = pmd_swp_mkuffd_wp(pmdswp);
+ set_pmd_at(mm, address, pvmw->pmd, pmdswp);
+ folio_remove_rmap_pmd(folio, page, vma);
+ folio_put(folio);
+ trace_set_migration_pmd(address, pmd_val(pmdswp));
+
+ return 0;
}
-void __vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next)
+void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
{
- /*
- * If the new start address isn't hpage aligned and it could
- * previously contain an hugepage: check if we need to split
- * an huge pmd.
- */
- if (start & ~HPAGE_PMD_MASK &&
- (start & HPAGE_PMD_MASK) >= vma->vm_start &&
- (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
- split_huge_page_address(vma->vm_mm, start);
+ struct folio *folio = page_folio(new);
+ struct vm_area_struct *vma = pvmw->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address = pvmw->address;
+ unsigned long haddr = address & HPAGE_PMD_MASK;
+ pmd_t pmde;
+ softleaf_t entry;
- /*
- * If the new end address isn't hpage aligned and it could
- * previously contain an hugepage: check if we need to split
- * an huge pmd.
- */
- if (end & ~HPAGE_PMD_MASK &&
- (end & HPAGE_PMD_MASK) >= vma->vm_start &&
- (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
- split_huge_page_address(vma->vm_mm, end);
+ if (!(pvmw->pmd && !pvmw->pte))
+ return;
- /*
- * If we're also updating the vma->vm_next->vm_start, if the new
- * vm_next->vm_start isn't page aligned and it could previously
- * contain an hugepage: check if we need to split an huge pmd.
- */
- if (adjust_next > 0) {
- struct vm_area_struct *next = vma->vm_next;
- unsigned long nstart = next->vm_start;
- nstart += adjust_next << PAGE_SHIFT;
- if (nstart & ~HPAGE_PMD_MASK &&
- (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
- (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
- split_huge_page_address(next->vm_mm, nstart);
+ entry = softleaf_from_pmd(*pvmw->pmd);
+ folio_get(folio);
+ pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
+
+ if (pmd_swp_soft_dirty(*pvmw->pmd))
+ pmde = pmd_mksoft_dirty(pmde);
+ if (softleaf_is_migration_write(entry))
+ pmde = pmd_mkwrite(pmde, vma);
+ if (pmd_swp_uffd_wp(*pvmw->pmd))
+ pmde = pmd_mkuffd_wp(pmde);
+ if (!softleaf_is_migration_young(entry))
+ pmde = pmd_mkold(pmde);
+ /* NOTE: this may contain setting soft-dirty on some archs */
+ if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
+ pmde = pmd_mkdirty(pmde);
+
+ if (folio_is_device_private(folio)) {
+ swp_entry_t entry;
+
+ if (pmd_write(pmde))
+ entry = make_writable_device_private_entry(
+ page_to_pfn(new));
+ else
+ entry = make_readable_device_private_entry(
+ page_to_pfn(new));
+ pmde = swp_entry_to_pmd(entry);
+
+ if (pmd_swp_soft_dirty(*pvmw->pmd))
+ pmde = pmd_swp_mksoft_dirty(pmde);
+ if (pmd_swp_uffd_wp(*pvmw->pmd))
+ pmde = pmd_swp_mkuffd_wp(pmde);
+ }
+
+ if (folio_test_anon(folio)) {
+ rmap_t rmap_flags = RMAP_NONE;
+
+ if (!softleaf_is_migration_read(entry))
+ rmap_flags |= RMAP_EXCLUSIVE;
+
+ folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
+ } else {
+ folio_add_file_rmap_pmd(folio, new, vma);
}
+ VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
+ set_pmd_at(mm, haddr, pvmw->pmd, pmde);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache_pmd(vma, address, pvmw->pmd);
+ trace_remove_migration_pmd(address, pmd_val(pmde));
}
+#endif