diff options
Diffstat (limited to 'mm/ksm.c')
| -rw-r--r-- | mm/ksm.c | 1591 |
1 files changed, 1043 insertions, 548 deletions
@@ -20,7 +20,7 @@ #include <linux/mman.h> #include <linux/sched.h> #include <linux/sched/mm.h> -#include <linux/sched/coredump.h> +#include <linux/sched/cputime.h> #include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/rmap.h> @@ -56,6 +56,8 @@ #define DO_NUMA(x) do { } while (0) #endif +typedef u8 rmap_age_t; + /** * DOC: Overview * @@ -193,6 +195,8 @@ struct ksm_stable_node { * @node: rb node of this rmap_item in the unstable tree * @head: pointer to stable_node heading this list in the stable tree * @hlist: link into hlist of rmap_items hanging off that stable_node + * @age: number of scan iterations since creation + * @remaining_skips: how many scans to skip */ struct ksm_rmap_item { struct ksm_rmap_item *rmap_list; @@ -205,6 +209,8 @@ struct ksm_rmap_item { struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ unsigned int oldchecksum; /* when unstable */ + rmap_age_t age; + rmap_age_t remaining_skips; union { struct rb_node node; /* when node of unstable tree */ struct { /* when listed from stable tree */ @@ -242,6 +248,9 @@ static struct kmem_cache *rmap_item_cache; static struct kmem_cache *stable_node_cache; static struct kmem_cache *mm_slot_cache; +/* Default number of pages to scan per batch */ +#define DEFAULT_PAGES_TO_SCAN 100 + /* The number of pages scanned */ static unsigned long ksm_pages_scanned; @@ -270,7 +279,7 @@ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; static int ksm_max_page_sharing = 256; /* Number of pages ksmd should scan in one batch */ -static unsigned int ksm_thread_pages_to_scan = 100; +static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; /* Milliseconds ksmd should sleep between batches */ static unsigned int ksm_thread_sleep_millisecs = 20; @@ -281,8 +290,181 @@ static unsigned int zero_checksum __read_mostly; /* Whether to merge empty (zeroed) pages with actual zero pages */ static bool ksm_use_zero_pages __read_mostly; +/* Skip pages that couldn't be de-duplicated previously */ +/* Default to true at least temporarily, for testing */ +static bool ksm_smart_scan = true; + /* The number of zero pages which is placed by KSM */ -unsigned long ksm_zero_pages; +atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); + +/* The number of pages that have been skipped due to "smart scanning" */ +static unsigned long ksm_pages_skipped; + +/* Don't scan more than max pages per batch. */ +static unsigned long ksm_advisor_max_pages_to_scan = 30000; + +/* Min CPU for scanning pages per scan */ +#define KSM_ADVISOR_MIN_CPU 10 + +/* Max CPU for scanning pages per scan */ +static unsigned int ksm_advisor_max_cpu = 70; + +/* Target scan time in seconds to analyze all KSM candidate pages. */ +static unsigned long ksm_advisor_target_scan_time = 200; + +/* Exponentially weighted moving average. */ +#define EWMA_WEIGHT 30 + +/** + * struct advisor_ctx - metadata for KSM advisor + * @start_scan: start time of the current scan + * @scan_time: scan time of previous scan + * @change: change in percent to pages_to_scan parameter + * @cpu_time: cpu time consumed by the ksmd thread in the previous scan + */ +struct advisor_ctx { + ktime_t start_scan; + unsigned long scan_time; + unsigned long change; + unsigned long long cpu_time; +}; +static struct advisor_ctx advisor_ctx; + +/* Define different advisor's */ +enum ksm_advisor_type { + KSM_ADVISOR_NONE, + KSM_ADVISOR_SCAN_TIME, +}; +static enum ksm_advisor_type ksm_advisor; + +#ifdef CONFIG_SYSFS +/* + * Only called through the sysfs control interface: + */ + +/* At least scan this many pages per batch. */ +static unsigned long ksm_advisor_min_pages_to_scan = 500; + +static void set_advisor_defaults(void) +{ + if (ksm_advisor == KSM_ADVISOR_NONE) { + ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; + } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) { + advisor_ctx = (const struct advisor_ctx){ 0 }; + ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan; + } +} +#endif /* CONFIG_SYSFS */ + +static inline void advisor_start_scan(void) +{ + if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) + advisor_ctx.start_scan = ktime_get(); +} + +/* + * Use previous scan time if available, otherwise use current scan time as an + * approximation for the previous scan time. + */ +static inline unsigned long prev_scan_time(struct advisor_ctx *ctx, + unsigned long scan_time) +{ + return ctx->scan_time ? ctx->scan_time : scan_time; +} + +/* Calculate exponential weighted moving average */ +static unsigned long ewma(unsigned long prev, unsigned long curr) +{ + return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100; +} + +/* + * The scan time advisor is based on the current scan rate and the target + * scan rate. + * + * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time) + * + * To avoid perturbations it calculates a change factor of previous changes. + * A new change factor is calculated for each iteration and it uses an + * exponentially weighted moving average. The new pages_to_scan value is + * multiplied with that change factor: + * + * new_pages_to_scan *= change factor + * + * The new_pages_to_scan value is limited by the cpu min and max values. It + * calculates the cpu percent for the last scan and calculates the new + * estimated cpu percent cost for the next scan. That value is capped by the + * cpu min and max setting. + * + * In addition the new pages_to_scan value is capped by the max and min + * limits. + */ +static void scan_time_advisor(void) +{ + unsigned int cpu_percent; + unsigned long cpu_time; + unsigned long cpu_time_diff; + unsigned long cpu_time_diff_ms; + unsigned long pages; + unsigned long per_page_cost; + unsigned long factor; + unsigned long change; + unsigned long last_scan_time; + unsigned long scan_time; + + /* Convert scan time to seconds */ + scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan), + MSEC_PER_SEC); + scan_time = scan_time ? scan_time : 1; + + /* Calculate CPU consumption of ksmd background thread */ + cpu_time = task_sched_runtime(current); + cpu_time_diff = cpu_time - advisor_ctx.cpu_time; + cpu_time_diff_ms = cpu_time_diff / 1000 / 1000; + + cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000); + cpu_percent = cpu_percent ? cpu_percent : 1; + last_scan_time = prev_scan_time(&advisor_ctx, scan_time); + + /* Calculate scan time as percentage of target scan time */ + factor = ksm_advisor_target_scan_time * 100 / scan_time; + factor = factor ? factor : 1; + + /* + * Calculate scan time as percentage of last scan time and use + * exponentially weighted average to smooth it + */ + change = scan_time * 100 / last_scan_time; + change = change ? change : 1; + change = ewma(advisor_ctx.change, change); + + /* Calculate new scan rate based on target scan rate. */ + pages = ksm_thread_pages_to_scan * 100 / factor; + /* Update pages_to_scan by weighted change percentage. */ + pages = pages * change / 100; + + /* Cap new pages_to_scan value */ + per_page_cost = ksm_thread_pages_to_scan / cpu_percent; + per_page_cost = per_page_cost ? per_page_cost : 1; + + pages = min(pages, per_page_cost * ksm_advisor_max_cpu); + pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU); + pages = min(pages, ksm_advisor_max_pages_to_scan); + + /* Update advisor context */ + advisor_ctx.change = change; + advisor_ctx.scan_time = scan_time; + advisor_ctx.cpu_time = cpu_time; + + ksm_thread_pages_to_scan = pages; + trace_ksm_advisor(scan_time, pages, cpu_percent); +} + +static void advisor_stop_scan(void) +{ + if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) + scan_time_advisor(); +} #ifdef CONFIG_NUMA /* Zeroed when merging across nodes is not allowed */ @@ -305,21 +487,17 @@ static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); -#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ - sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL) - static int __init ksm_slab_init(void) { - rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); + rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0); if (!rmap_item_cache) goto out; - stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); + stable_node_cache = KMEM_CACHE(ksm_stable_node, 0); if (!stable_node_cache) goto out_free1; - mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); + mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0); if (!mm_slot_cache) goto out_free2; @@ -429,35 +607,51 @@ static inline bool ksm_test_exit(struct mm_struct *mm) return atomic_read(&mm->mm_users) == 0; } -static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, +static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct page *page = NULL; + unsigned long *found_addr = (unsigned long *) walk->private; + struct mm_struct *mm = walk->mm; + pte_t *start_ptep, *ptep; spinlock_t *ptl; - pte_t *pte; - pte_t ptent; - int ret; + int found = 0; - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - if (!pte) + if (ksm_test_exit(walk->mm)) return 0; - ptent = ptep_get(pte); - if (pte_present(ptent)) { - page = vm_normal_page(walk->vma, addr, ptent); - } else if (!pte_none(ptent)) { - swp_entry_t entry = pte_to_swp_entry(ptent); + if (signal_pending(current)) + return -ERESTARTSYS; - /* - * As KSM pages remain KSM pages until freed, no need to wait - * here for migration to end. - */ - if (is_migration_entry(entry)) - page = pfn_swap_entry_to_page(entry); + start_ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!start_ptep) + return 0; + + for (ptep = start_ptep; addr < end; ptep++, addr += PAGE_SIZE) { + pte_t pte = ptep_get(ptep); + struct folio *folio = NULL; + + if (pte_present(pte)) { + folio = vm_normal_folio(walk->vma, addr, pte); + } else if (!pte_none(pte)) { + const softleaf_t entry = softleaf_from_pte(pte); + + /* + * As KSM pages remain KSM pages until freed, no need to wait + * here for migration to end. + */ + if (softleaf_is_migration(entry)) + folio = softleaf_to_folio(entry); + } + /* return 1 if the page is an normal ksm page or KSM-placed zero page */ + found = (folio && folio_test_ksm(folio)) || + (pte_present(pte) && is_ksm_zero_pte(pte)); + if (found) { + *found_addr = addr; + goto out_unlock; + } } - /* return 1 if the page is an normal ksm page or KSM-placed zero page */ - ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); - pte_unmap_unlock(pte, ptl); - return ret; +out_unlock: + pte_unmap_unlock(ptep, ptl); + return found; } static const struct mm_walk_ops break_ksm_ops = { @@ -471,6 +665,18 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = { }; /* + * Though it's very tempting to unmerge rmap_items from stable tree rather + * than check every pte of a given vma, the locking doesn't quite work for + * that - an rmap_item is assigned to the stable tree after inserting ksm + * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing + * rmap_items from parent to child at fork time (so as not to waste time + * if exit comes before the next scan reaches it). + * + * Similarly, although we'd like to remove rmap_items (so updating counts + * and freeing memory) when unmerging an area, it's easier to leave that + * to the next pass of ksmd - consider, for example, how ksmd might be + * in cmp_and_merge_page on one of the rmap_items we would be removing. + * * We use break_ksm to break COW on a ksm page by triggering unsharing, * such that the ksm page will get replaced by an exclusive anonymous page. * @@ -483,7 +689,8 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = { * of the process that owns 'vma'. We also do not want to enforce * protection keys here anyway. */ -static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) +static int break_ksm(struct vm_area_struct *vma, unsigned long addr, + unsigned long end, bool lock_vma) { vm_fault_t ret = 0; const struct mm_walk_ops *ops = lock_vma ? @@ -493,11 +700,9 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_v int ksm_page; cond_resched(); - ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL); - if (WARN_ON_ONCE(ksm_page < 0)) + ksm_page = walk_page_range_vma(vma, addr, end, ops, &addr); + if (ksm_page <= 0) return ksm_page; - if (!ksm_page) - return 0; ret = handle_mm_fault(vma, addr, FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, NULL); @@ -509,7 +714,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_v * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's - * okay, that truncation will have unmapped the PageKsm for us. + * okay, that truncation will have unmapped the KSM page for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the @@ -530,28 +735,32 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_v return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; } -static bool vma_ksm_compatible(struct vm_area_struct *vma) +static bool ksm_compatible(const struct file *file, vm_flags_t vm_flags) { - if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | - VM_IO | VM_DONTEXPAND | VM_HUGETLB | - VM_MIXEDMAP)) + if (vm_flags & (VM_SHARED | VM_MAYSHARE | VM_SPECIAL | + VM_HUGETLB | VM_DROPPABLE)) return false; /* just ignore the advice */ - if (vma_is_dax(vma)) + if (file_is_dax(file)) return false; #ifdef VM_SAO - if (vma->vm_flags & VM_SAO) + if (vm_flags & VM_SAO) return false; #endif #ifdef VM_SPARC_ADI - if (vma->vm_flags & VM_SPARC_ADI) + if (vm_flags & VM_SPARC_ADI) return false; #endif return true; } +static bool vma_ksm_compatible(struct vm_area_struct *vma) +{ + return ksm_compatible(vma->vm_file, vma->vm_flags); +} + static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, unsigned long addr) { @@ -579,7 +788,7 @@ static void break_cow(struct ksm_rmap_item *rmap_item) mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (vma) - break_ksm(vma, addr, false); + break_ksm(vma, addr, addr + PAGE_SIZE, false); mmap_read_unlock(mm); } @@ -588,26 +797,28 @@ static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; - struct page *page; + struct page *page = NULL; + struct folio_walk fw; + struct folio *folio; mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (!vma) goto out; - page = follow_page(vma, addr, FOLL_GET); - if (IS_ERR_OR_NULL(page)) - goto out; - if (is_zone_device_page(page)) - goto out_putpage; - if (PageAnon(page)) { + folio = folio_walk_start(&fw, vma, addr, 0); + if (folio) { + if (!folio_is_zone_device(folio) && + folio_test_anon(folio)) { + folio_get(folio); + page = fw.page; + } + folio_walk_end(&fw, vma); + } +out: + if (page) { flush_anon_page(vma, page, addr); flush_dcache_page(page); - } else { -out_putpage: - put_page(page); -out: - page = NULL; } mmap_read_unlock(mm); return page; @@ -707,14 +918,14 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) free_stable_node(stable_node); } -enum get_ksm_page_flags { - GET_KSM_PAGE_NOLOCK, - GET_KSM_PAGE_LOCK, - GET_KSM_PAGE_TRYLOCK +enum ksm_get_folio_flags { + KSM_GET_FOLIO_NOLOCK, + KSM_GET_FOLIO_LOCK, + KSM_GET_FOLIO_TRYLOCK }; /* - * get_ksm_page: checks if the page indicated by the stable node + * ksm_get_folio: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, @@ -732,19 +943,19 @@ enum get_ksm_page_flags { * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ -static struct page *get_ksm_page(struct ksm_stable_node *stable_node, - enum get_ksm_page_flags flags) +static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, + enum ksm_get_folio_flags flags) { - struct page *page; + struct folio *folio; void *expected_mapping; unsigned long kpfn; expected_mapping = (void *)((unsigned long)stable_node | - PAGE_MAPPING_KSM); + FOLIO_MAPPING_KSM); again: kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ - page = pfn_to_page(kpfn); - if (READ_ONCE(page->mapping) != expected_mapping) + folio = pfn_folio(kpfn); + if (READ_ONCE(folio->mapping) != expected_mapping) goto stale; /* @@ -757,45 +968,46 @@ again: * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ - while (!get_page_unless_zero(page)) { + while (!folio_try_get(folio)) { /* - * Another check for page->mapping != expected_mapping would - * work here too. We have chosen the !PageSwapCache test to - * optimize the common case, when the page is or is about to - * be freed: PageSwapCache is cleared (under spin_lock_irq) - * in the ref_freeze section of __remove_mapping(); but Anon - * page->mapping reset to NULL later, in free_pages_prepare(). + * Another check for folio->mapping != expected_mapping + * would work here too. We have chosen to test the + * swapcache flag to optimize the common case, when the + * folio is or is about to be freed: the swapcache flag + * is cleared (under spin_lock_irq) in the ref_freeze + * section of __remove_mapping(); but anon folio->mapping + * is reset to NULL later, in free_pages_prepare(). */ - if (!PageSwapCache(page)) + if (!folio_test_swapcache(folio)) goto stale; cpu_relax(); } - if (READ_ONCE(page->mapping) != expected_mapping) { - put_page(page); + if (READ_ONCE(folio->mapping) != expected_mapping) { + folio_put(folio); goto stale; } - if (flags == GET_KSM_PAGE_TRYLOCK) { - if (!trylock_page(page)) { - put_page(page); + if (flags == KSM_GET_FOLIO_TRYLOCK) { + if (!folio_trylock(folio)) { + folio_put(folio); return ERR_PTR(-EBUSY); } - } else if (flags == GET_KSM_PAGE_LOCK) - lock_page(page); + } else if (flags == KSM_GET_FOLIO_LOCK) + folio_lock(folio); - if (flags != GET_KSM_PAGE_NOLOCK) { - if (READ_ONCE(page->mapping) != expected_mapping) { - unlock_page(page); - put_page(page); + if (flags != KSM_GET_FOLIO_NOLOCK) { + if (READ_ONCE(folio->mapping) != expected_mapping) { + folio_unlock(folio); + folio_put(folio); goto stale; } } - return page; + return folio; stale: /* - * We come here from above when page->mapping or !PageSwapCache + * We come here from above when folio->mapping or the swapcache flag * suggests that the node is stale; but it might be under migration. * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), * before checking whether node->kpfn has been changed. @@ -815,16 +1027,16 @@ static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct ksm_stable_node *stable_node; - struct page *page; + struct folio *folio; stable_node = rmap_item->head; - page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); - if (!page) + folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); + if (!folio) goto out; hlist_del(&rmap_item->hlist); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); if (!hlist_empty(&stable_node->hlist)) ksm_pages_sharing--; @@ -871,51 +1083,17 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) } } -/* - * Though it's very tempting to unmerge rmap_items from stable tree rather - * than check every pte of a given vma, the locking doesn't quite work for - * that - an rmap_item is assigned to the stable tree after inserting ksm - * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing - * rmap_items from parent to child at fork time (so as not to waste time - * if exit comes before the next scan reaches it). - * - * Similarly, although we'd like to remove rmap_items (so updating counts - * and freeing memory) when unmerging an area, it's easier to leave that - * to the next pass of ksmd - consider, for example, how ksmd might be - * in cmp_and_merge_page on one of the rmap_items we would be removing. - */ -static int unmerge_ksm_pages(struct vm_area_struct *vma, - unsigned long start, unsigned long end, bool lock_vma) -{ - unsigned long addr; - int err = 0; - - for (addr = start; addr < end && !err; addr += PAGE_SIZE) { - if (ksm_test_exit(vma->vm_mm)) - break; - if (signal_pending(current)) - err = -ERESTARTSYS; - else - err = break_ksm(vma, addr, lock_vma); - } - return err; -} - -static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) +static inline +struct ksm_stable_node *folio_stable_node(const struct folio *folio) { return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; } -static inline struct ksm_stable_node *page_stable_node(struct page *page) +static inline void folio_set_stable_node(struct folio *folio, + struct ksm_stable_node *stable_node) { - return folio_stable_node(page_folio(page)); -} - -static inline void set_page_stable_node(struct page *page, - struct ksm_stable_node *stable_node) -{ - VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); - page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); + VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); + folio->mapping = (void *)((unsigned long)stable_node | FOLIO_MAPPING_KSM); } #ifdef CONFIG_SYSFS @@ -924,13 +1102,13 @@ static inline void set_page_stable_node(struct page *page, */ static int remove_stable_node(struct ksm_stable_node *stable_node) { - struct page *page; + struct folio *folio; int err; - page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); - if (!page) { + folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); + if (!folio) { /* - * get_ksm_page did remove_node_from_stable_tree itself. + * ksm_get_folio did remove_node_from_stable_tree itself. */ return 0; } @@ -941,22 +1119,22 @@ static int remove_stable_node(struct ksm_stable_node *stable_node) * merge_across_nodes/max_page_sharing be switched. */ err = -EBUSY; - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { /* - * The stable node did not yet appear stale to get_ksm_page(), - * since that allows for an unmapped ksm page to be recognized + * The stable node did not yet appear stale to ksm_get_folio(), + * since that allows for an unmapped ksm folio to be recognized * right up until it is freed; but the node is safe to remove. - * This page might be in an LRU cache waiting to be freed, - * or it might be PageSwapCache (perhaps under writeback), + * This folio might be in an LRU cache waiting to be freed, + * or it might be in the swapcache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ - set_page_stable_node(page, NULL); + folio_set_stable_node(folio, NULL); remove_node_from_stable_tree(stable_node); err = 0; } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return err; } @@ -1042,8 +1220,7 @@ static int unmerge_and_remove_all_rmap_items(void) for_each_vma(vmi, vma) { if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; - err = unmerge_ksm_pages(vma, - vma->vm_start, vma->vm_end, false); + err = break_ksm(vma, vma->vm_start, vma->vm_end, false); if (err) goto error; } @@ -1062,8 +1239,8 @@ mm_exiting: spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); - clear_bit(MMF_VM_MERGEABLE, &mm->flags); - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); + mm_flags_clear(MMF_VM_MERGEABLE, mm); + mm_flags_clear(MMF_VM_MERGE_ANY, mm); mmdrop(mm); } else spin_unlock(&ksm_mmlist_lock); @@ -1086,29 +1263,30 @@ error: static u32 calc_checksum(struct page *page) { u32 checksum; - void *addr = kmap_atomic(page); + void *addr = kmap_local_page(page); checksum = xxhash(addr, PAGE_SIZE, 0); - kunmap_atomic(addr); + kunmap_local(addr); return checksum; } -static int write_protect_page(struct vm_area_struct *vma, struct page *page, +static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; pte_t entry; - pvmw.address = page_address_in_vma(page, vma); + if (WARN_ON_ONCE(folio_test_large(folio))) + return err; + + pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma); if (pvmw.address == -EFAULT) goto out; - BUG_ON(PageTransCompound(page)); - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, pvmw.address + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); @@ -1118,12 +1296,19 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - anon_exclusive = PageAnonExclusive(page); entry = ptep_get(pvmw.pte); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that actually + * map pages: give up just like the next folio_walk would. + */ + if (unlikely(!pte_present(entry))) + goto out_unlock; + + anon_exclusive = PageAnonExclusive(&folio->page); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { - swapped = PageSwapCache(page); - flush_cache_page(vma, pvmw.address, page_to_pfn(page)); + swapped = folio_test_swapcache(folio); + flush_cache_page(vma, pvmw.address, folio_pfn(folio)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make @@ -1143,25 +1328,26 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * Check that no O_DIRECT or similar I/O is in progress on the * page */ - if (page_mapcount(page) + 1 + swapped != page_count(page)) { + if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } - /* See page_try_share_anon_rmap(): clear PTE first. */ - if (anon_exclusive && page_try_share_anon_rmap(page)) { + /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ + if (anon_exclusive && + folio_try_share_anon_rmap_pte(folio, &folio->page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } if (pte_dirty(entry)) - set_page_dirty(page); + folio_mark_dirty(folio); entry = pte_mkclean(entry); if (pte_write(entry)) entry = pte_wrprotect(entry); - set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); + set_pte_at(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = entry; err = 0; @@ -1186,8 +1372,9 @@ out: static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { + struct folio *kfolio = page_folio(kpage); struct mm_struct *mm = vma->vm_mm; - struct folio *folio; + struct folio *folio = page_folio(page); pmd_t *pmd; pmd_t pmde; pte_t *ptep; @@ -1197,7 +1384,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, int err = -EFAULT; struct mmu_notifier_range range; - addr = page_address_in_vma(page, vma); + addr = page_address_in_vma(folio, page, vma); if (addr == -EFAULT) goto out; @@ -1225,15 +1412,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, goto out_mn; } VM_BUG_ON_PAGE(PageAnonExclusive(page), page); - VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); + VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), + kfolio); /* * No need to check ksm_use_zero_pages here: we can only have a * zero_page here if ksm_use_zero_pages was enabled already. */ if (!is_zero_pfn(page_to_pfn(kpage))) { - get_page(kpage); - page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); + folio_get(kfolio); + folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); newpte = mk_pte(kpage, vma->vm_page_prot); } else { /* @@ -1242,8 +1430,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * the dirty bit in zero page's PTE is set. */ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); - ksm_zero_pages++; - mm->ksm_zero_pages++; + ksm_map_zero_page(mm); /* * We're replacing an anonymous page with a zero page, which is * not anonymous. We need to do proper accounting otherwise we @@ -1261,10 +1448,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * See Documentation/mm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, newpte); + set_pte_at(mm, addr, ptep, newpte); - folio = page_folio(page); - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(folio, page, vma); if (!folio_mapped(folio)) folio_free_swap(folio); folio_put(folio); @@ -1281,7 +1467,7 @@ out: * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage - * @kpage: the PageKsm page that we want to map instead of page, + * @kpage: the KSM page that we want to map instead of page, * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. @@ -1289,28 +1475,29 @@ out: static int try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { + struct folio *folio = page_folio(page); pte_t orig_pte = __pte(0); int err = -EFAULT; if (page == kpage) /* ksm page forked */ return 0; - if (!PageAnon(page)) + if (!folio_test_anon(folio)) goto out; /* - * We need the page lock to read a stable PageSwapCache in - * write_protect_page(). We use trylock_page() instead of - * lock_page() because we don't want to wait here - we - * prefer to continue scanning and merging different pages, - * then come back to this page when it is unlocked. + * We need the folio lock to read a stable swapcache flag in + * write_protect_page(). We trylock because we don't want to wait + * here - we prefer to continue scanning and merging different + * pages, then come back to this page when it is unlocked. */ - if (!trylock_page(page)) + if (!folio_trylock(folio)) goto out; - if (PageTransCompound(page)) { + if (folio_test_large(folio)) { if (split_huge_page(page)) goto out_unlock; + folio = page_folio(page); } /* @@ -1319,33 +1506,71 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, page, &orig_pte) == 0) { + if (write_protect_page(vma, folio, &orig_pte) == 0) { if (!kpage) { /* - * While we hold page lock, upgrade page from - * PageAnon+anon_vma to PageKsm+NULL stable_node: + * While we hold folio lock, upgrade folio from + * anon to a NULL stable_node with the KSM flag set: * stable_tree_insert() will update stable_node. */ - set_page_stable_node(page, NULL); - mark_page_accessed(page); + folio_set_stable_node(folio, NULL); + folio_mark_accessed(folio); /* - * Page reclaim just frees a clean page with no dirty + * Page reclaim just frees a clean folio with no dirty * ptes: make sure that the ksm page would be swapped. */ - if (!PageDirty(page)) - SetPageDirty(page); + if (!folio_test_dirty(folio)) + folio_mark_dirty(folio); err = 0; } else if (pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); } out_unlock: - unlock_page(page); + folio_unlock(folio); out: return err; } /* + * This function returns 0 if the pages were merged or if they are + * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise. + */ +static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item, + struct page *page) +{ + struct mm_struct *mm = rmap_item->mm; + int err = -EFAULT; + + /* + * Same checksum as an empty page. We attempt to merge it with the + * appropriate zero page if the user enabled this via sysfs. + */ + if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) { + struct vm_area_struct *vma; + + mmap_read_lock(mm); + vma = find_mergeable_vma(mm, rmap_item->address); + if (vma) { + err = try_to_merge_one_page(vma, page, + ZERO_PAGE(rmap_item->address)); + trace_ksm_merge_one_page( + page_to_pfn(ZERO_PAGE(rmap_item->address)), + rmap_item, mm, err); + } else { + /* + * If the vma is out of date, we do not need to + * continue. + */ + err = 0; + } + mmap_read_unlock(mm); + } + + return err; +} + +/* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. * @@ -1390,7 +1615,7 @@ out: * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ -static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, +static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, struct page *page, struct ksm_rmap_item *tree_rmap_item, struct page *tree_page) @@ -1408,7 +1633,7 @@ static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, if (err) break_cow(rmap_item); } - return err ? NULL : page; + return err ? NULL : page_folio(page); } static __always_inline @@ -1431,15 +1656,14 @@ bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) return __is_page_sharing_candidate(stable_node, 0); } -static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; struct hlist_node *hlist_safe; - struct page *_tree_page, *tree_page = NULL; - int nr = 0; + struct folio *folio, *tree_folio = NULL; int found_rmap_hlist_len; if (!prune_stale_stable_nodes || @@ -1457,42 +1681,35 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, * We must walk all stable_node_dup to prune the stale * stable nodes during lookup. * - * get_ksm_page can drop the nodes from the + * ksm_get_folio can drop the nodes from the * stable_node->hlist if they point to freed pages * (that's why we do a _safe walk). The "dup" * stable_node parameter itself will be freed from * under us if it returns NULL. */ - _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); - if (!_tree_page) + folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK); + if (!folio) + continue; + /* Pick the best candidate if possible. */ + if (!found || (is_page_sharing_candidate(dup) && + (!is_page_sharing_candidate(found) || + dup->rmap_hlist_len > found_rmap_hlist_len))) { + if (found) + folio_put(tree_folio); + found = dup; + found_rmap_hlist_len = found->rmap_hlist_len; + tree_folio = folio; + /* skip put_page for found candidate */ + if (!prune_stale_stable_nodes && + is_page_sharing_candidate(found)) + break; continue; - nr += 1; - if (is_page_sharing_candidate(dup)) { - if (!found || - dup->rmap_hlist_len > found_rmap_hlist_len) { - if (found) - put_page(tree_page); - found = dup; - found_rmap_hlist_len = found->rmap_hlist_len; - tree_page = _tree_page; - - /* skip put_page for found dup */ - if (!prune_stale_stable_nodes) - break; - continue; - } } - put_page(_tree_page); + folio_put(folio); } if (found) { - /* - * nr is counting all dups in the chain only if - * prune_stale_stable_nodes is true, otherwise we may - * break the loop at nr == 1 even if there are - * multiple entries. - */ - if (prune_stale_stable_nodes && nr == 1) { + if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) { /* * If there's not just one entry it would * corrupt memory, better BUG_ON. In KSM @@ -1544,27 +1761,17 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, hlist_add_head(&found->hlist_dup, &stable_node->hlist); } + } else { + /* Its hlist must be empty if no one found. */ + free_stable_node_chain(stable_node, root); } *_stable_node_dup = found; - return tree_page; -} - -static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, - struct rb_root *root) -{ - if (!is_stable_node_chain(stable_node)) - return stable_node; - if (hlist_empty(&stable_node->hlist)) { - free_stable_node_chain(stable_node, root); - return NULL; - } - return hlist_entry(stable_node->hlist.first, - typeof(*stable_node), hlist_dup); + return tree_folio; } /* - * Like for get_ksm_page, this function can free the *_stable_node and + * Like for ksm_get_folio, this function can free the *_stable_node and * *_stable_node_dup if the returned tree_page is NULL. * * It can also free and overwrite *_stable_node with the found @@ -1577,46 +1784,33 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ -static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *stable_node = *_stable_node; + if (!is_stable_node_chain(stable_node)) { - if (is_page_sharing_candidate(stable_node)) { - *_stable_node_dup = stable_node; - return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); - } - /* - * _stable_node_dup set to NULL means the stable_node - * reached the ksm_max_page_sharing limit. - */ - *_stable_node_dup = NULL; - return NULL; + *_stable_node_dup = stable_node; + return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); } return stable_node_dup(_stable_node_dup, _stable_node, root, prune_stale_stable_nodes); } -static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, - struct ksm_stable_node **s_n, - struct rb_root *root) +static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, + struct ksm_stable_node **s_n, + struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } -static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, - struct ksm_stable_node *s_n, - struct rb_root *root) +static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, + struct ksm_stable_node **s_n, + struct rb_root *root) { - struct ksm_stable_node *old_stable_node = s_n; - struct page *tree_page; - - tree_page = __stable_node_chain(s_n_d, &s_n, root, false); - /* not pruning dups so s_n cannot have changed */ - VM_BUG_ON(s_n != old_stable_node); - return tree_page; + return __stable_node_chain(s_n_d, s_n, root, false); } /* @@ -1626,79 +1820,43 @@ static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, * with identical content to the page that we are scanning right now. * * This function returns the stable tree node of identical content if found, - * NULL otherwise. + * -EBUSY if the stable node's page is being migrated, NULL otherwise. */ -static struct page *stable_tree_search(struct page *page) +static struct folio *stable_tree_search(struct page *page) { int nid; struct rb_root *root; struct rb_node **new; struct rb_node *parent; - struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; + struct ksm_stable_node *stable_node, *stable_node_dup; struct ksm_stable_node *page_node; + struct folio *folio; - page_node = page_stable_node(page); + folio = page_folio(page); + page_node = folio_stable_node(folio); if (page_node && page_node->head != &migrate_nodes) { /* ksm page forked */ - get_page(page); - return page; + folio_get(folio); + return folio; } - nid = get_kpfn_nid(page_to_pfn(page)); + nid = get_kpfn_nid(folio_pfn(folio)); root = root_stable_tree + nid; again: new = &root->rb_node; parent = NULL; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); - stable_node_any = NULL; - tree_page = chain_prune(&stable_node_dup, &stable_node, root); - /* - * NOTE: stable_node may have been freed by - * chain_prune() if the returned stable_node_dup is - * not NULL. stable_node_dup may have been inserted in - * the rbtree instead as a regular stable_node (in - * order to collapse the stable_node chain if a single - * stable_node dup was found in it). In such case the - * stable_node is overwritten by the callee to point - * to the stable_node_dup that was collapsed in the - * stable rbtree and stable_node will be equal to - * stable_node_dup like if the chain never existed. - */ - if (!stable_node_dup) { - /* - * Either all stable_node dups were full in - * this stable_node chain, or this chain was - * empty and should be rb_erased. - */ - stable_node_any = stable_node_dup_any(stable_node, - root); - if (!stable_node_any) { - /* rb_erase just run */ - goto again; - } - /* - * Take any of the stable_node dups page of - * this stable_node chain to let the tree walk - * continue. All KSM pages belonging to the - * stable_node dups in a stable_node chain - * have the same content and they're - * write protected at all times. Any will work - * fine to continue the walk. - */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); - } - VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + tree_folio = chain_prune(&stable_node_dup, &stable_node, root); + if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -1708,8 +1866,8 @@ again: goto again; } - ret = memcmp_pages(page, tree_page); - put_page(tree_page); + ret = memcmp_pages(page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -1720,16 +1878,19 @@ again: if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); /* - * Test if the migrated page should be merged - * into a stable node dup. If the mapcount is - * 1 we can migrate it with another KSM page - * without adding it to the chain. + * If the mapcount of our migrated KSM folio is + * at most 1, we can merge it with another + * KSM folio where we know that we have space + * for one more mapping without exceeding the + * ksm_max_page_sharing limit: see + * chain_prune(). This way, we can avoid adding + * this stable node to the chain. */ - if (page_mapcount(page) > 1) + if (folio_mapcount(folio) > 1) goto chain_append; } - if (!stable_node_dup) { + if (!is_page_sharing_candidate(stable_node_dup)) { /* * If the stable_node is a chain and * we got a payload match in memcmp @@ -1752,26 +1913,26 @@ again: * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ - tree_page = get_ksm_page(stable_node_dup, - GET_KSM_PAGE_TRYLOCK); + tree_folio = ksm_get_folio(stable_node_dup, + KSM_GET_FOLIO_TRYLOCK); - if (PTR_ERR(tree_page) == -EBUSY) + if (PTR_ERR(tree_folio) == -EBUSY) return ERR_PTR(-EBUSY); - if (unlikely(!tree_page)) + if (unlikely(!tree_folio)) /* * The tree may have been rebalanced, * so re-evaluate parent and new. */ goto again; - unlock_page(tree_page); + folio_unlock(tree_folio); if (get_kpfn_nid(stable_node_dup->kpfn) != NUMA(stable_node_dup->nid)) { - put_page(tree_page); + folio_put(tree_folio); goto replace; } - return tree_page; + return tree_folio; } } @@ -1784,8 +1945,8 @@ again: rb_insert_color(&page_node->node, root); out: if (is_page_sharing_candidate(page_node)) { - get_page(page); - return page; + folio_get(folio); + return folio; } else return NULL; @@ -1810,12 +1971,12 @@ replace: &page_node->node, root); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page = NULL; + folio = NULL; } else { rb_erase(&stable_node_dup->node, root); - page = NULL; + folio = NULL; } } else { VM_BUG_ON(!is_stable_node_chain(stable_node)); @@ -1826,21 +1987,18 @@ replace: DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page = NULL; + folio = NULL; } else { - page = NULL; + folio = NULL; } } stable_node_dup->head = &migrate_nodes; list_add(&stable_node_dup->list, stable_node_dup->head); - return page; + return folio; chain_append: - /* stable_node_dup could be null if it reached the limit */ - if (!stable_node_dup) - stable_node_dup = stable_node_any; /* * If stable_node was a chain and chain_prune collapsed it, * stable_node has been updated to be the new regular @@ -1878,17 +2036,17 @@ chain_append: * This function returns the stable tree node just allocated on success, * NULL otherwise. */ -static struct ksm_stable_node *stable_tree_insert(struct page *kpage) +static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) { int nid; unsigned long kpfn; struct rb_root *root; struct rb_node **new; struct rb_node *parent; - struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; + struct ksm_stable_node *stable_node, *stable_node_dup; bool need_chain = false; - kpfn = page_to_pfn(kpage); + kpfn = folio_pfn(kfolio); nid = get_kpfn_nid(kpfn); root = root_stable_tree + nid; again: @@ -1896,42 +2054,16 @@ again: new = &root->rb_node; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); - stable_node_any = NULL; - tree_page = chain(&stable_node_dup, stable_node, root); - if (!stable_node_dup) { - /* - * Either all stable_node dups were full in - * this stable_node chain, or this chain was - * empty and should be rb_erased. - */ - stable_node_any = stable_node_dup_any(stable_node, - root); - if (!stable_node_any) { - /* rb_erase just run */ - goto again; - } - /* - * Take any of the stable_node dups page of - * this stable_node chain to let the tree walk - * continue. All KSM pages belonging to the - * stable_node dups in a stable_node chain - * have the same content and they're - * write protected at all times. Any will work - * fine to continue the walk. - */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); - } - VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + tree_folio = chain(&stable_node_dup, &stable_node, root); + if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -1941,8 +2073,8 @@ again: goto again; } - ret = memcmp_pages(kpage, tree_page); - put_page(tree_page); + ret = memcmp_pages(&kfolio->page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -1961,7 +2093,6 @@ again: INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn = kpfn; - set_page_stable_node(kpage, stable_node_dup); stable_node_dup->rmap_hlist_len = 0; DO_NUMA(stable_node_dup->nid = nid); if (!need_chain) { @@ -1980,6 +2111,8 @@ again: stable_node_chain_add_dup(stable_node_dup, stable_node); } + folio_set_stable_node(kfolio, stable_node_dup); + return stable_node_dup; } @@ -2114,16 +2247,16 @@ static void stable_tree_append(struct ksm_rmap_item *rmap_item, */ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) { - struct mm_struct *mm = rmap_item->mm; + struct folio *folio = page_folio(page); struct ksm_rmap_item *tree_rmap_item; struct page *tree_page = NULL; struct ksm_stable_node *stable_node; - struct page *kpage; + struct folio *kfolio; unsigned int checksum; int err; bool max_page_sharing_bypass = false; - stable_node = page_stable_node(page); + stable_node = folio_stable_node(folio); if (stable_node) { if (stable_node->head != &migrate_nodes && get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != @@ -2141,84 +2274,59 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite */ if (!is_page_sharing_candidate(stable_node)) max_page_sharing_bypass = true; + } else { + remove_rmap_item_from_tree(rmap_item); + + /* + * If the hash value of the page has changed from the last time + * we calculated it, this page is changing frequently: therefore we + * don't want to insert it in the unstable tree, and we don't want + * to waste our time searching for something identical to it there. + */ + checksum = calc_checksum(page); + if (rmap_item->oldchecksum != checksum) { + rmap_item->oldchecksum = checksum; + return; + } + + if (!try_to_merge_with_zero_page(rmap_item, page)) + return; } - /* We first start with searching the page inside the stable tree */ - kpage = stable_tree_search(page); - if (kpage == page && rmap_item->head == stable_node) { - put_page(kpage); + /* Start by searching for the folio in the stable tree */ + kfolio = stable_tree_search(page); + if (kfolio == folio && rmap_item->head == stable_node) { + folio_put(kfolio); return; } remove_rmap_item_from_tree(rmap_item); - if (kpage) { - if (PTR_ERR(kpage) == -EBUSY) + if (kfolio) { + if (kfolio == ERR_PTR(-EBUSY)) return; - err = try_to_merge_with_ksm_page(rmap_item, page, kpage); + err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ - lock_page(kpage); - stable_tree_append(rmap_item, page_stable_node(kpage), + folio_lock(kfolio); + stable_tree_append(rmap_item, folio_stable_node(kfolio), max_page_sharing_bypass); - unlock_page(kpage); + folio_unlock(kfolio); } - put_page(kpage); + folio_put(kfolio); return; } - /* - * If the hash value of the page has changed from the last time - * we calculated it, this page is changing frequently: therefore we - * don't want to insert it in the unstable tree, and we don't want - * to waste our time searching for something identical to it there. - */ - checksum = calc_checksum(page); - if (rmap_item->oldchecksum != checksum) { - rmap_item->oldchecksum = checksum; - return; - } - - /* - * Same checksum as an empty page. We attempt to merge it with the - * appropriate zero page if the user enabled this via sysfs. - */ - if (ksm_use_zero_pages && (checksum == zero_checksum)) { - struct vm_area_struct *vma; - - mmap_read_lock(mm); - vma = find_mergeable_vma(mm, rmap_item->address); - if (vma) { - err = try_to_merge_one_page(vma, page, - ZERO_PAGE(rmap_item->address)); - trace_ksm_merge_one_page( - page_to_pfn(ZERO_PAGE(rmap_item->address)), - rmap_item, mm, err); - } else { - /* - * If the vma is out of date, we do not need to - * continue. - */ - err = 0; - } - mmap_read_unlock(mm); - /* - * In case of failure, the page was not really empty, so we - * need to continue. Otherwise we're done. - */ - if (!err) - return; - } tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { bool split; - kpage = try_to_merge_two_pages(rmap_item, page, + kfolio = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); /* * If both pages we tried to merge belong to the same compound @@ -2233,20 +2341,20 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite split = PageTransCompound(page) && compound_head(page) == compound_head(tree_page); put_page(tree_page); - if (kpage) { + if (kfolio) { /* * The pages were successfully merged: insert new * node in the stable tree and add both rmap_items. */ - lock_page(kpage); - stable_node = stable_tree_insert(kpage); + folio_lock(kfolio); + stable_node = stable_tree_insert(kfolio); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); stable_tree_append(rmap_item, stable_node, false); } - unlock_page(kpage); + folio_unlock(kfolio); /* * If we fail to insert the page into the stable tree, @@ -2268,10 +2376,11 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite * the page is locked, it is better to skip it and * perhaps try again later. */ - if (!trylock_page(page)) + if (!folio_trylock(folio)) return; split_huge_page(page); - unlock_page(page); + folio = page_folio(page); + folio_unlock(folio); } } } @@ -2305,6 +2414,163 @@ static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, return rmap_item; } +/* + * Calculate skip age for the ksm page age. The age determines how often + * de-duplicating has already been tried unsuccessfully. If the age is + * smaller, the scanning of this page is skipped for less scans. + * + * @age: rmap_item age of page + */ +static unsigned int skip_age(rmap_age_t age) +{ + if (age <= 3) + return 1; + if (age <= 5) + return 2; + if (age <= 8) + return 4; + + return 8; +} + +/* + * Determines if a page should be skipped for the current scan. + * + * @folio: folio containing the page to check + * @rmap_item: associated rmap_item of page + */ +static bool should_skip_rmap_item(struct folio *folio, + struct ksm_rmap_item *rmap_item) +{ + rmap_age_t age; + + if (!ksm_smart_scan) + return false; + + /* + * Never skip pages that are already KSM; pages cmp_and_merge_page() + * will essentially ignore them, but we still have to process them + * properly. + */ + if (folio_test_ksm(folio)) + return false; + + age = rmap_item->age; + if (age != U8_MAX) + rmap_item->age++; + + /* + * Smaller ages are not skipped, they need to get a chance to go + * through the different phases of the KSM merging. + */ + if (age < 3) + return false; + + /* + * Are we still allowed to skip? If not, then don't skip it + * and determine how much more often we are allowed to skip next. + */ + if (!rmap_item->remaining_skips) { + rmap_item->remaining_skips = skip_age(age); + return false; + } + + /* Skip this page */ + ksm_pages_skipped++; + rmap_item->remaining_skips--; + remove_rmap_item_from_tree(rmap_item); + return true; +} + +struct ksm_next_page_arg { + struct folio *folio; + struct page *page; + unsigned long addr; +}; + +static int ksm_next_page_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + struct ksm_next_page_arg *private = walk->private; + struct vm_area_struct *vma = walk->vma; + pte_t *start_ptep = NULL, *ptep, pte; + struct mm_struct *mm = walk->mm; + struct folio *folio; + struct page *page; + spinlock_t *ptl; + pmd_t pmd; + + if (ksm_test_exit(mm)) + return 0; + + cond_resched(); + + pmd = pmdp_get_lockless(pmdp); + if (!pmd_present(pmd)) + return 0; + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && pmd_leaf(pmd)) { + ptl = pmd_lock(mm, pmdp); + pmd = pmdp_get(pmdp); + + if (!pmd_present(pmd)) { + goto not_found_unlock; + } else if (pmd_leaf(pmd)) { + page = vm_normal_page_pmd(vma, addr, pmd); + if (!page) + goto not_found_unlock; + folio = page_folio(page); + + if (folio_is_zone_device(folio) || !folio_test_anon(folio)) + goto not_found_unlock; + + page += ((addr & (PMD_SIZE - 1)) >> PAGE_SHIFT); + goto found_unlock; + } + spin_unlock(ptl); + } + + start_ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!start_ptep) + return 0; + + for (ptep = start_ptep; addr < end; ptep++, addr += PAGE_SIZE) { + pte = ptep_get(ptep); + + if (!pte_present(pte)) + continue; + + page = vm_normal_page(vma, addr, pte); + if (!page) + continue; + folio = page_folio(page); + + if (folio_is_zone_device(folio) || !folio_test_anon(folio)) + continue; + goto found_unlock; + } + +not_found_unlock: + spin_unlock(ptl); + if (start_ptep) + pte_unmap(start_ptep); + return 0; +found_unlock: + folio_get(folio); + spin_unlock(ptl); + if (start_ptep) + pte_unmap(start_ptep); + private->page = page; + private->folio = folio; + private->addr = addr; + return 1; +} + +static struct mm_walk_ops ksm_next_page_ops = { + .pmd_entry = ksm_next_page_pmd_entry, + .walk_lock = PGWALK_RDLOCK, +}; + static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) { struct mm_struct *mm; @@ -2320,6 +2586,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) mm_slot = ksm_scan.mm_slot; if (mm_slot == &ksm_mm_head) { + advisor_start_scan(); trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); /* @@ -2342,14 +2609,14 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) */ if (!ksm_merge_across_nodes) { struct ksm_stable_node *stable_node, *next; - struct page *page; + struct folio *folio; list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { - page = get_ksm_page(stable_node, - GET_KSM_PAGE_NOLOCK); - if (page) - put_page(page); + folio = ksm_get_folio(stable_node, + KSM_GET_FOLIO_NOLOCK); + if (folio) + folio_put(folio); cond_resched(); } } @@ -2391,32 +2658,52 @@ next_mm: ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { + struct ksm_next_page_arg ksm_next_page_arg; + struct page *tmp_page = NULL; + struct folio *folio; + if (ksm_test_exit(mm)) break; - *page = follow_page(vma, ksm_scan.address, FOLL_GET); - if (IS_ERR_OR_NULL(*page)) { - ksm_scan.address += PAGE_SIZE; - cond_resched(); - continue; + + int found; + + found = walk_page_range_vma(vma, ksm_scan.address, + vma->vm_end, + &ksm_next_page_ops, + &ksm_next_page_arg); + + if (found > 0) { + folio = ksm_next_page_arg.folio; + tmp_page = ksm_next_page_arg.page; + ksm_scan.address = ksm_next_page_arg.addr; + } else { + VM_WARN_ON_ONCE(found < 0); + ksm_scan.address = vma->vm_end - PAGE_SIZE; } - if (is_zone_device_page(*page)) - goto next_page; - if (PageAnon(*page)) { - flush_anon_page(vma, *page, ksm_scan.address); - flush_dcache_page(*page); + + if (tmp_page) { + flush_anon_page(vma, tmp_page, ksm_scan.address); + flush_dcache_page(tmp_page); rmap_item = get_next_rmap_item(mm_slot, ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { ksm_scan.rmap_list = &rmap_item->rmap_list; + + if (should_skip_rmap_item(folio, rmap_item)) { + folio_put(folio); + goto next_page; + } + ksm_scan.address += PAGE_SIZE; - } else - put_page(*page); + *page = tmp_page; + } else { + folio_put(folio); + } mmap_read_unlock(mm); return rmap_item; } next_page: - put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); } @@ -2452,8 +2739,14 @@ no_vmas: spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); - clear_bit(MMF_VM_MERGEABLE, &mm->flags); - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); + /* + * Only clear MMF_VM_MERGEABLE. We must not clear + * MMF_VM_MERGE_ANY, because for those MMF_VM_MERGE_ANY process, + * perhaps their mm_struct has just been added to ksm_mm_slot + * list, and its process has not yet officially started running + * or has not yet performed mmap/brk to allocate anonymous VMAS. + */ + mm_flags_clear(MMF_VM_MERGEABLE, mm); mmap_read_unlock(mm); mmdrop(mm); } else { @@ -2473,6 +2766,8 @@ no_vmas: if (mm_slot != &ksm_mm_head) goto next_mm; + advisor_stop_scan(); + trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); ksm_scan.seqnr++; return NULL; @@ -2486,18 +2781,16 @@ static void ksm_do_scan(unsigned int scan_npages) { struct ksm_rmap_item *rmap_item; struct page *page; - unsigned int npages = scan_npages; - while (npages-- && likely(!freezing(current))) { + while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; cmp_and_merge_page(page, rmap_item); put_page(page); + ksm_pages_scanned++; } - - ksm_pages_scanned += scan_npages - npages; } static int ksmd_should_run(void) @@ -2519,11 +2812,9 @@ static int ksm_scan_thread(void *nothing) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); - try_to_freeze(); - if (ksmd_should_run()) { sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); - wait_event_interruptible_timeout(ksm_iter_wait, + wait_event_freezable_timeout(ksm_iter_wait, sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), msecs_to_jiffies(sleep_ms)); } else { @@ -2534,14 +2825,17 @@ static int ksm_scan_thread(void *nothing) return 0; } -static void __ksm_add_vma(struct vm_area_struct *vma) +static bool __ksm_should_add_vma(const struct file *file, vm_flags_t vm_flags) { - unsigned long vm_flags = vma->vm_flags; - if (vm_flags & VM_MERGEABLE) - return; + return false; + + return ksm_compatible(file, vm_flags); +} - if (vma_ksm_compatible(vma)) +static void __ksm_add_vma(struct vm_area_struct *vma) +{ + if (__ksm_should_add_vma(vma->vm_file, vma->vm_flags)) vm_flags_set(vma, VM_MERGEABLE); } @@ -2553,7 +2847,7 @@ static int __ksm_del_vma(struct vm_area_struct *vma) return 0; if (vma->anon_vma) { - err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); + err = break_ksm(vma, vma->vm_start, vma->vm_end, true); if (err) return err; } @@ -2562,16 +2856,30 @@ static int __ksm_del_vma(struct vm_area_struct *vma) return 0; } /** - * ksm_add_vma - Mark vma as mergeable if compatible + * ksm_vma_flags - Update VMA flags to mark as mergeable if compatible * - * @vma: Pointer to vma + * @mm: Proposed VMA's mm_struct + * @file: Proposed VMA's file-backed mapping, if any. + * @vm_flags: Proposed VMA"s flags. + * + * Returns: @vm_flags possibly updated to mark mergeable. */ -void ksm_add_vma(struct vm_area_struct *vma) +vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, + vm_flags_t vm_flags) { - struct mm_struct *mm = vma->vm_mm; + if (mm_flags_test(MMF_VM_MERGE_ANY, mm) && + __ksm_should_add_vma(file, vm_flags)) { + vm_flags |= VM_MERGEABLE; + /* + * Generally, the flags here always include MMF_VM_MERGEABLE. + * However, in rare cases, this flag may be cleared by ksmd who + * scans a cycle without finding any mergeable vma. + */ + if (unlikely(!mm_flags_test(MMF_VM_MERGEABLE, mm))) + __ksm_enter(mm); + } - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) - __ksm_add_vma(vma); + return vm_flags; } static void ksm_add_vmas(struct mm_struct *mm) @@ -2609,16 +2917,16 @@ int ksm_enable_merge_any(struct mm_struct *mm) { int err; - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) return 0; - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { + if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) { err = __ksm_enter(mm); if (err) return err; } - set_bit(MMF_VM_MERGE_ANY, &mm->flags); + mm_flags_set(MMF_VM_MERGE_ANY, mm); ksm_add_vmas(mm); return 0; @@ -2640,7 +2948,7 @@ int ksm_disable_merge_any(struct mm_struct *mm) { int err; - if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + if (!mm_flags_test(MMF_VM_MERGE_ANY, mm)) return 0; err = ksm_del_vmas(mm); @@ -2649,7 +2957,7 @@ int ksm_disable_merge_any(struct mm_struct *mm) return err; } - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); + mm_flags_clear(MMF_VM_MERGE_ANY, mm); return 0; } @@ -2657,15 +2965,15 @@ int ksm_disable(struct mm_struct *mm) { mmap_assert_write_locked(mm); - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) + if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) return 0; - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) return ksm_disable_merge_any(mm); return ksm_del_vmas(mm); } int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags) + unsigned long end, int advice, vm_flags_t *vm_flags) { struct mm_struct *mm = vma->vm_mm; int err; @@ -2677,7 +2985,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (!vma_ksm_compatible(vma)) return 0; - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { + if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) { err = __ksm_enter(mm); if (err) return err; @@ -2691,7 +2999,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; /* just ignore the advice */ if (vma->anon_vma) { - err = unmerge_ksm_pages(vma, start, end, true); + err = break_ksm(vma, start, end, true); if (err) return err; } @@ -2737,7 +3045,7 @@ int __ksm_enter(struct mm_struct *mm) list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); - set_bit(MMF_VM_MERGEABLE, &mm->flags); + mm_flags_set(MMF_VM_MERGEABLE, mm); mmgrab(mm); if (needs_wakeup) @@ -2749,7 +3057,7 @@ int __ksm_enter(struct mm_struct *mm) void __ksm_exit(struct mm_struct *mm) { - struct ksm_mm_slot *mm_slot; + struct ksm_mm_slot *mm_slot = NULL; struct mm_slot *slot; int easy_to_free = 0; @@ -2764,23 +3072,26 @@ void __ksm_exit(struct mm_struct *mm) spin_lock(&ksm_mmlist_lock); slot = mm_slot_lookup(mm_slots_hash, mm); + if (!slot) + goto unlock; mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); - if (mm_slot && ksm_scan.mm_slot != mm_slot) { - if (!mm_slot->rmap_list) { - hash_del(&slot->hash); - list_del(&slot->mm_node); - easy_to_free = 1; - } else { - list_move(&slot->mm_node, - &ksm_scan.mm_slot->slot.mm_node); - } + if (ksm_scan.mm_slot == mm_slot) + goto unlock; + if (!mm_slot->rmap_list) { + hash_del(&slot->hash); + list_del(&slot->mm_node); + easy_to_free = 1; + } else { + list_move(&slot->mm_node, + &ksm_scan.mm_slot->slot.mm_node); } +unlock: spin_unlock(&ksm_mmlist_lock); if (easy_to_free) { mm_slot_free(mm_slot_cache, mm_slot); - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); - clear_bit(MMF_VM_MERGEABLE, &mm->flags); + mm_flags_clear(MMF_VM_MERGE_ANY, mm); + mm_flags_clear(MMF_VM_MERGEABLE, mm); mmdrop(mm); } else if (mm_slot) { mmap_write_lock(mm); @@ -2790,49 +3101,52 @@ void __ksm_exit(struct mm_struct *mm) trace_ksm_exit(mm); } -struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) +struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = page_folio(page); + struct page *page = folio_page(folio, 0); struct anon_vma *anon_vma = folio_anon_vma(folio); - struct page *new_page; + struct folio *new_folio; + + if (folio_test_large(folio)) + return folio; - if (PageKsm(page)) { - if (page_stable_node(page) && + if (folio_test_ksm(folio)) { + if (folio_stable_node(folio) && !(ksm_run & KSM_RUN_UNMERGE)) - return page; /* no need to copy it */ + return folio; /* no need to copy it */ } else if (!anon_vma) { - return page; /* no need to copy it */ - } else if (page->index == linear_page_index(vma, address) && + return folio; /* no need to copy it */ + } else if (folio->index == linear_page_index(vma, addr) && anon_vma->root == vma->anon_vma->root) { - return page; /* still no need to copy it */ + return folio; /* still no need to copy it */ } if (PageHWPoison(page)) return ERR_PTR(-EHWPOISON); - if (!PageUptodate(page)) - return page; /* let do_swap_page report the error */ - - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); - if (new_page && - mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { - put_page(new_page); - new_page = NULL; - } - if (new_page) { - if (copy_mc_user_highpage(new_page, page, address, vma)) { - put_page(new_page); - memory_failure_queue(page_to_pfn(page), 0); + if (!folio_test_uptodate(folio)) + return folio; /* let do_swap_page report the error */ + + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); + if (new_folio && + mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { + folio_put(new_folio); + new_folio = NULL; + } + if (new_folio) { + if (copy_mc_user_highpage(folio_page(new_folio, 0), page, + addr, vma)) { + folio_put(new_folio); return ERR_PTR(-EHWPOISON); } - SetPageDirty(new_page); - __SetPageUptodate(new_page); - __SetPageLocked(new_page); + folio_set_dirty(new_folio); + __folio_mark_uptodate(new_folio); + __folio_set_locked(new_folio); #ifdef CONFIG_SWAP count_vm_event(KSM_SWPIN_COPY); #endif } - return new_page; + return new_folio; } void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) @@ -2909,12 +3223,11 @@ again: /* * Collect processes when the error hit an ksm page. */ -void collect_procs_ksm(struct page *page, struct list_head *to_kill, - int force_early) +void collect_procs_ksm(const struct folio *folio, const struct page *page, + struct list_head *to_kill, int force_early) { struct ksm_stable_node *stable_node; struct ksm_rmap_item *rmap_item; - struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; @@ -2925,7 +3238,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill, struct anon_vma *av = rmap_item->anon_vma; anon_vma_lock_read(av); - read_lock(&tasklist_lock); + rcu_read_lock(); for_each_process(tsk) { struct anon_vma_chain *vmac; unsigned long addr; @@ -2944,7 +3257,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill, } } } - read_unlock(&tasklist_lock); + rcu_read_unlock(); anon_vma_unlock_read(av); } } @@ -2966,11 +3279,11 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) /* * newfolio->mapping was set in advance; now we need smp_wmb() * to make sure that the new stable_node->kpfn is visible - * to get_ksm_page() before it can see that folio->mapping - * has gone stale (or that folio_test_swapcache has been cleared). + * to ksm_get_folio() before it can see that folio->mapping + * has gone stale (or that the swapcache flag has been cleared). */ smp_wmb(); - set_page_stable_node(&folio->page, NULL); + folio_set_stable_node(folio, NULL); } } #endif /* CONFIG_MIGRATION */ @@ -2993,7 +3306,7 @@ static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) { /* - * Don't get_ksm_page, page has already gone: + * Don't ksm_get_folio, page has already gone: * which is why we keep kpfn instead of page* */ remove_node_from_stable_tree(stable_node); @@ -3068,7 +3381,7 @@ static int ksm_memory_callback(struct notifier_block *self, * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() * and remove_all_stable_nodes() while memory is going offline: * it is unsafe for them to touch the stable tree at this time. - * But unmerge_ksm_pages(), rmap lookups and other entry points + * But break_ksm(), rmap lookups and other entry points * which do not need the ksm_thread_mutex are all safe. */ mutex_lock(&ksm_thread_mutex); @@ -3081,7 +3394,7 @@ static int ksm_memory_callback(struct notifier_block *self, * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree, - * otherwise get_ksm_page() might later try to access a + * otherwise ksm_get_folio() might later try to access a * non-existent struct page. */ ksm_check_stable_tree(mn->start_pfn, @@ -3105,9 +3418,28 @@ static void wait_while_offlining(void) #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_PROC_FS +/* + * The process is mergeable only if any VMA is currently + * applicable to KSM. + * + * The mmap lock must be held in read mode. + */ +bool ksm_process_mergeable(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + mmap_assert_locked(mm); + VMA_ITERATOR(vmi, mm, 0); + for_each_vma(vmi, vma) + if (vma->vm_flags & VM_MERGEABLE) + return true; + + return false; +} + long ksm_process_profit(struct mm_struct *mm) { - return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - + return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); } #endif /* CONFIG_PROC_FS */ @@ -3159,6 +3491,9 @@ static ssize_t pages_to_scan_store(struct kobject *kobj, unsigned int nr_pages; int err; + if (ksm_advisor != KSM_ADVISOR_NONE) + return -EINVAL; + err = kstrtouint(buf, 10, &nr_pages); if (err) return -EINVAL; @@ -3383,10 +3718,17 @@ static ssize_t pages_volatile_show(struct kobject *kobj, } KSM_ATTR_RO(pages_volatile); +static ssize_t pages_skipped_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%lu\n", ksm_pages_skipped); +} +KSM_ATTR_RO(pages_skipped); + static ssize_t ksm_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sysfs_emit(buf, "%ld\n", ksm_zero_pages); + return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); } KSM_ATTR_RO(ksm_zero_pages); @@ -3395,7 +3737,7 @@ static ssize_t general_profit_show(struct kobject *kobj, { long general_profit; - general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - + general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - ksm_rmap_items * sizeof(struct ksm_rmap_item); return sysfs_emit(buf, "%ld\n", general_profit); @@ -3449,6 +3791,152 @@ static ssize_t full_scans_show(struct kobject *kobj, } KSM_ATTR_RO(full_scans); +static ssize_t smart_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", ksm_smart_scan); +} + +static ssize_t smart_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + bool value; + + err = kstrtobool(buf, &value); + if (err) + return -EINVAL; + + ksm_smart_scan = value; + return count; +} +KSM_ATTR(smart_scan); + +static ssize_t advisor_mode_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + const char *output; + + if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) + output = "none [scan-time]"; + else + output = "[none] scan-time"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t advisor_mode_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + enum ksm_advisor_type curr_advisor = ksm_advisor; + + if (sysfs_streq("scan-time", buf)) + ksm_advisor = KSM_ADVISOR_SCAN_TIME; + else if (sysfs_streq("none", buf)) + ksm_advisor = KSM_ADVISOR_NONE; + else + return -EINVAL; + + /* Set advisor default values */ + if (curr_advisor != ksm_advisor) + set_advisor_defaults(); + + return count; +} +KSM_ATTR(advisor_mode); + +static ssize_t advisor_max_cpu_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu); +} + +static ssize_t advisor_max_cpu_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long value; + + err = kstrtoul(buf, 10, &value); + if (err) + return -EINVAL; + + ksm_advisor_max_cpu = value; + return count; +} +KSM_ATTR(advisor_max_cpu); + +static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan); +} + +static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long value; + + err = kstrtoul(buf, 10, &value); + if (err) + return -EINVAL; + + ksm_advisor_min_pages_to_scan = value; + return count; +} +KSM_ATTR(advisor_min_pages_to_scan); + +static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan); +} + +static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long value; + + err = kstrtoul(buf, 10, &value); + if (err) + return -EINVAL; + + ksm_advisor_max_pages_to_scan = value; + return count; +} +KSM_ATTR(advisor_max_pages_to_scan); + +static ssize_t advisor_target_scan_time_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time); +} + +static ssize_t advisor_target_scan_time_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long value; + + err = kstrtoul(buf, 10, &value); + if (err) + return -EINVAL; + if (value < 1) + return -EINVAL; + + ksm_advisor_target_scan_time = value; + return count; +} +KSM_ATTR(advisor_target_scan_time); + static struct attribute *ksm_attrs[] = { &sleep_millisecs_attr.attr, &pages_to_scan_attr.attr, @@ -3458,6 +3946,7 @@ static struct attribute *ksm_attrs[] = { &pages_sharing_attr.attr, &pages_unshared_attr.attr, &pages_volatile_attr.attr, + &pages_skipped_attr.attr, &ksm_zero_pages_attr.attr, &full_scans_attr.attr, #ifdef CONFIG_NUMA @@ -3469,6 +3958,12 @@ static struct attribute *ksm_attrs[] = { &stable_node_chains_prune_millisecs_attr.attr, &use_zero_pages_attr.attr, &general_profit_attr.attr, + &smart_scan_attr.attr, + &advisor_mode_attr.attr, + &advisor_max_cpu_attr.attr, + &advisor_min_pages_to_scan_attr.attr, + &advisor_max_pages_to_scan_attr.attr, + &advisor_target_scan_time_attr.attr, NULL, }; |
