diff options
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 805 |
1 files changed, 359 insertions, 446 deletions
@@ -20,7 +20,6 @@ #include <linux/mman.h> #include <linux/sched.h> #include <linux/sched/mm.h> -#include <linux/sched/coredump.h> #include <linux/sched/cputime.h> #include <linux/rwsem.h> #include <linux/pagemap.h> @@ -296,7 +295,7 @@ static bool ksm_use_zero_pages __read_mostly; static bool ksm_smart_scan = true; /* The number of zero pages which is placed by KSM */ -unsigned long ksm_zero_pages; +atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); /* The number of pages that have been skipped due to "smart scanning" */ static unsigned long ksm_pages_skipped; @@ -488,21 +487,17 @@ static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); -#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ - sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL) - static int __init ksm_slab_init(void) { - rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); + rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0); if (!rmap_item_cache) goto out; - stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); + stable_node_cache = KMEM_CACHE(ksm_stable_node, 0); if (!stable_node_cache) goto out_free1; - mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); + mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0); if (!mm_slot_cache) goto out_free2; @@ -612,47 +607,6 @@ static inline bool ksm_test_exit(struct mm_struct *mm) return atomic_read(&mm->mm_users) == 0; } -static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, - struct mm_walk *walk) -{ - struct page *page = NULL; - spinlock_t *ptl; - pte_t *pte; - pte_t ptent; - int ret; - - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - if (!pte) - return 0; - ptent = ptep_get(pte); - if (pte_present(ptent)) { - page = vm_normal_page(walk->vma, addr, ptent); - } else if (!pte_none(ptent)) { - swp_entry_t entry = pte_to_swp_entry(ptent); - - /* - * As KSM pages remain KSM pages until freed, no need to wait - * here for migration to end. - */ - if (is_migration_entry(entry)) - page = pfn_swap_entry_to_page(entry); - } - /* return 1 if the page is an normal ksm page or KSM-placed zero page */ - ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent); - pte_unmap_unlock(pte, ptl); - return ret; -} - -static const struct mm_walk_ops break_ksm_ops = { - .pmd_entry = break_ksm_pmd_entry, - .walk_lock = PGWALK_RDLOCK, -}; - -static const struct mm_walk_ops break_ksm_lock_vma_ops = { - .pmd_entry = break_ksm_pmd_entry, - .walk_lock = PGWALK_WRLOCK, -}; - /* * We use break_ksm to break COW on a ksm page by triggering unsharing, * such that the ksm page will get replaced by an exclusive anonymous page. @@ -669,16 +623,26 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = { static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) { vm_fault_t ret = 0; - const struct mm_walk_ops *ops = lock_vma ? - &break_ksm_lock_vma_ops : &break_ksm_ops; + + if (lock_vma) + vma_start_write(vma); do { - int ksm_page; + bool ksm_page = false; + struct folio_walk fw; + struct folio *folio; cond_resched(); - ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL); - if (WARN_ON_ONCE(ksm_page < 0)) - return ksm_page; + folio = folio_walk_start(&fw, vma, addr, + FW_MIGRATION | FW_ZEROPAGE); + if (folio) { + /* Small folio implies FW_LEVEL_PTE. */ + if (!folio_test_large(folio) && + (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte))) + ksm_page = true; + folio_walk_end(&fw, vma); + } + if (!ksm_page) return 0; ret = handle_mm_fault(vma, addr, @@ -692,7 +656,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_v * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's - * okay, that truncation will have unmapped the PageKsm for us. + * okay, that truncation will have unmapped the KSM page for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the @@ -717,7 +681,7 @@ static bool vma_ksm_compatible(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_HUGETLB | - VM_MIXEDMAP)) + VM_MIXEDMAP| VM_DROPPABLE)) return false; /* just ignore the advice */ if (vma_is_dax(vma)) @@ -771,26 +735,28 @@ static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; - struct page *page; + struct page *page = NULL; + struct folio_walk fw; + struct folio *folio; mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (!vma) goto out; - page = follow_page(vma, addr, FOLL_GET); - if (IS_ERR_OR_NULL(page)) - goto out; - if (is_zone_device_page(page)) - goto out_putpage; - if (PageAnon(page)) { + folio = folio_walk_start(&fw, vma, addr, 0); + if (folio) { + if (!folio_is_zone_device(folio) && + folio_test_anon(folio)) { + folio_get(folio); + page = fw.page; + } + folio_walk_end(&fw, vma); + } +out: + if (page) { flush_anon_page(vma, page, addr); flush_dcache_page(page); - } else { -out_putpage: - put_page(page); -out: - page = NULL; } mmap_read_unlock(mm); return page; @@ -890,14 +856,14 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) free_stable_node(stable_node); } -enum get_ksm_page_flags { - GET_KSM_PAGE_NOLOCK, - GET_KSM_PAGE_LOCK, - GET_KSM_PAGE_TRYLOCK +enum ksm_get_folio_flags { + KSM_GET_FOLIO_NOLOCK, + KSM_GET_FOLIO_LOCK, + KSM_GET_FOLIO_TRYLOCK }; /* - * get_ksm_page: checks if the page indicated by the stable node + * ksm_get_folio: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, @@ -915,10 +881,10 @@ enum get_ksm_page_flags { * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ -static struct page *get_ksm_page(struct ksm_stable_node *stable_node, - enum get_ksm_page_flags flags) +static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, + enum ksm_get_folio_flags flags) { - struct page *page; + struct folio *folio; void *expected_mapping; unsigned long kpfn; @@ -926,8 +892,8 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, PAGE_MAPPING_KSM); again: kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ - page = pfn_to_page(kpfn); - if (READ_ONCE(page->mapping) != expected_mapping) + folio = pfn_folio(kpfn); + if (READ_ONCE(folio->mapping) != expected_mapping) goto stale; /* @@ -940,45 +906,46 @@ again: * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ - while (!get_page_unless_zero(page)) { + while (!folio_try_get(folio)) { /* - * Another check for page->mapping != expected_mapping would - * work here too. We have chosen the !PageSwapCache test to - * optimize the common case, when the page is or is about to - * be freed: PageSwapCache is cleared (under spin_lock_irq) - * in the ref_freeze section of __remove_mapping(); but Anon - * page->mapping reset to NULL later, in free_pages_prepare(). + * Another check for folio->mapping != expected_mapping + * would work here too. We have chosen to test the + * swapcache flag to optimize the common case, when the + * folio is or is about to be freed: the swapcache flag + * is cleared (under spin_lock_irq) in the ref_freeze + * section of __remove_mapping(); but anon folio->mapping + * is reset to NULL later, in free_pages_prepare(). */ - if (!PageSwapCache(page)) + if (!folio_test_swapcache(folio)) goto stale; cpu_relax(); } - if (READ_ONCE(page->mapping) != expected_mapping) { - put_page(page); + if (READ_ONCE(folio->mapping) != expected_mapping) { + folio_put(folio); goto stale; } - if (flags == GET_KSM_PAGE_TRYLOCK) { - if (!trylock_page(page)) { - put_page(page); + if (flags == KSM_GET_FOLIO_TRYLOCK) { + if (!folio_trylock(folio)) { + folio_put(folio); return ERR_PTR(-EBUSY); } - } else if (flags == GET_KSM_PAGE_LOCK) - lock_page(page); + } else if (flags == KSM_GET_FOLIO_LOCK) + folio_lock(folio); - if (flags != GET_KSM_PAGE_NOLOCK) { - if (READ_ONCE(page->mapping) != expected_mapping) { - unlock_page(page); - put_page(page); + if (flags != KSM_GET_FOLIO_NOLOCK) { + if (READ_ONCE(folio->mapping) != expected_mapping) { + folio_unlock(folio); + folio_put(folio); goto stale; } } - return page; + return folio; stale: /* - * We come here from above when page->mapping or !PageSwapCache + * We come here from above when folio->mapping or the swapcache flag * suggests that the node is stale; but it might be under migration. * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), * before checking whether node->kpfn has been changed. @@ -998,16 +965,16 @@ static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct ksm_stable_node *stable_node; - struct page *page; + struct folio *folio; stable_node = rmap_item->head; - page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); - if (!page) + folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); + if (!folio) goto out; hlist_del(&rmap_item->hlist); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); if (!hlist_empty(&stable_node->hlist)) ksm_pages_sharing--; @@ -1084,7 +1051,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma, return err; } -static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) +static inline +struct ksm_stable_node *folio_stable_node(const struct folio *folio) { return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; } @@ -1094,11 +1062,11 @@ static inline struct ksm_stable_node *page_stable_node(struct page *page) return folio_stable_node(page_folio(page)); } -static inline void set_page_stable_node(struct page *page, - struct ksm_stable_node *stable_node) +static inline void folio_set_stable_node(struct folio *folio, + struct ksm_stable_node *stable_node) { - VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); - page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); + VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); + folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); } #ifdef CONFIG_SYSFS @@ -1107,13 +1075,13 @@ static inline void set_page_stable_node(struct page *page, */ static int remove_stable_node(struct ksm_stable_node *stable_node) { - struct page *page; + struct folio *folio; int err; - page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); - if (!page) { + folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); + if (!folio) { /* - * get_ksm_page did remove_node_from_stable_tree itself. + * ksm_get_folio did remove_node_from_stable_tree itself. */ return 0; } @@ -1124,22 +1092,22 @@ static int remove_stable_node(struct ksm_stable_node *stable_node) * merge_across_nodes/max_page_sharing be switched. */ err = -EBUSY; - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { /* - * The stable node did not yet appear stale to get_ksm_page(), - * since that allows for an unmapped ksm page to be recognized + * The stable node did not yet appear stale to ksm_get_folio(), + * since that allows for an unmapped ksm folio to be recognized * right up until it is freed; but the node is safe to remove. - * This page might be in an LRU cache waiting to be freed, - * or it might be PageSwapCache (perhaps under writeback), + * This folio might be in an LRU cache waiting to be freed, + * or it might be in the swapcache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ - set_page_stable_node(page, NULL); + folio_set_stable_node(folio, NULL); remove_node_from_stable_tree(stable_node); err = 0; } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return err; } @@ -1275,23 +1243,24 @@ static u32 calc_checksum(struct page *page) return checksum; } -static int write_protect_page(struct vm_area_struct *vma, struct page *page, +static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; pte_t entry; - pvmw.address = page_address_in_vma(page, vma); + if (WARN_ON_ONCE(folio_test_large(folio))) + return err; + + pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma); if (pvmw.address == -EFAULT) goto out; - BUG_ON(PageTransCompound(page)); - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, pvmw.address + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); @@ -1301,12 +1270,12 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - anon_exclusive = PageAnonExclusive(page); + anon_exclusive = PageAnonExclusive(&folio->page); entry = ptep_get(pvmw.pte); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { - swapped = PageSwapCache(page); - flush_cache_page(vma, pvmw.address, page_to_pfn(page)); + swapped = folio_test_swapcache(folio); + flush_cache_page(vma, pvmw.address, folio_pfn(folio)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make @@ -1326,26 +1295,26 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * Check that no O_DIRECT or similar I/O is in progress on the * page */ - if (page_mapcount(page) + 1 + swapped != page_count(page)) { + if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (anon_exclusive && - folio_try_share_anon_rmap_pte(page_folio(page), page)) { + folio_try_share_anon_rmap_pte(folio, &folio->page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } if (pte_dirty(entry)) - set_page_dirty(page); + folio_mark_dirty(folio); entry = pte_mkclean(entry); if (pte_write(entry)) entry = pte_wrprotect(entry); - set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); + set_pte_at(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = entry; err = 0; @@ -1372,7 +1341,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, { struct folio *kfolio = page_folio(kpage); struct mm_struct *mm = vma->vm_mm; - struct folio *folio; + struct folio *folio = page_folio(page); pmd_t *pmd; pmd_t pmde; pte_t *ptep; @@ -1382,7 +1351,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, int err = -EFAULT; struct mmu_notifier_range range; - addr = page_address_in_vma(page, vma); + addr = page_address_in_vma(folio, page, vma); if (addr == -EFAULT) goto out; @@ -1428,8 +1397,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * the dirty bit in zero page's PTE is set. */ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); - ksm_zero_pages++; - mm->ksm_zero_pages++; + ksm_map_zero_page(mm); /* * We're replacing an anonymous page with a zero page, which is * not anonymous. We need to do proper accounting otherwise we @@ -1447,9 +1415,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * See Documentation/mm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, newpte); + set_pte_at(mm, addr, ptep, newpte); - folio = page_folio(page); folio_remove_rmap_pte(folio, page, vma); if (!folio_mapped(folio)) folio_free_swap(folio); @@ -1467,7 +1434,7 @@ out: * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage - * @kpage: the PageKsm page that we want to map instead of page, + * @kpage: the KSM page that we want to map instead of page, * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. @@ -1475,28 +1442,29 @@ out: static int try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { + struct folio *folio = page_folio(page); pte_t orig_pte = __pte(0); int err = -EFAULT; if (page == kpage) /* ksm page forked */ return 0; - if (!PageAnon(page)) + if (!folio_test_anon(folio)) goto out; /* - * We need the page lock to read a stable PageSwapCache in - * write_protect_page(). We use trylock_page() instead of - * lock_page() because we don't want to wait here - we - * prefer to continue scanning and merging different pages, - * then come back to this page when it is unlocked. + * We need the folio lock to read a stable swapcache flag in + * write_protect_page(). We trylock because we don't want to wait + * here - we prefer to continue scanning and merging different + * pages, then come back to this page when it is unlocked. */ - if (!trylock_page(page)) + if (!folio_trylock(folio)) goto out; - if (PageTransCompound(page)) { + if (folio_test_large(folio)) { if (split_huge_page(page)) goto out_unlock; + folio = page_folio(page); } /* @@ -1505,33 +1473,71 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, page, &orig_pte) == 0) { + if (write_protect_page(vma, folio, &orig_pte) == 0) { if (!kpage) { /* - * While we hold page lock, upgrade page from - * PageAnon+anon_vma to PageKsm+NULL stable_node: + * While we hold folio lock, upgrade folio from + * anon to a NULL stable_node with the KSM flag set: * stable_tree_insert() will update stable_node. */ - set_page_stable_node(page, NULL); - mark_page_accessed(page); + folio_set_stable_node(folio, NULL); + folio_mark_accessed(folio); /* - * Page reclaim just frees a clean page with no dirty + * Page reclaim just frees a clean folio with no dirty * ptes: make sure that the ksm page would be swapped. */ - if (!PageDirty(page)) - SetPageDirty(page); + if (!folio_test_dirty(folio)) + folio_mark_dirty(folio); err = 0; } else if (pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); } out_unlock: - unlock_page(page); + folio_unlock(folio); out: return err; } /* + * This function returns 0 if the pages were merged or if they are + * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise. + */ +static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item, + struct page *page) +{ + struct mm_struct *mm = rmap_item->mm; + int err = -EFAULT; + + /* + * Same checksum as an empty page. We attempt to merge it with the + * appropriate zero page if the user enabled this via sysfs. + */ + if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) { + struct vm_area_struct *vma; + + mmap_read_lock(mm); + vma = find_mergeable_vma(mm, rmap_item->address); + if (vma) { + err = try_to_merge_one_page(vma, page, + ZERO_PAGE(rmap_item->address)); + trace_ksm_merge_one_page( + page_to_pfn(ZERO_PAGE(rmap_item->address)), + rmap_item, mm, err); + } else { + /* + * If the vma is out of date, we do not need to + * continue. + */ + err = 0; + } + mmap_read_unlock(mm); + } + + return err; +} + +/* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. * @@ -1576,7 +1582,7 @@ out: * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ -static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, +static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, struct page *page, struct ksm_rmap_item *tree_rmap_item, struct page *tree_page) @@ -1594,7 +1600,7 @@ static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, if (err) break_cow(rmap_item); } - return err ? NULL : page; + return err ? NULL : page_folio(page); } static __always_inline @@ -1617,15 +1623,14 @@ bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) return __is_page_sharing_candidate(stable_node, 0); } -static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; struct hlist_node *hlist_safe; - struct page *_tree_page, *tree_page = NULL; - int nr = 0; + struct folio *folio, *tree_folio = NULL; int found_rmap_hlist_len; if (!prune_stale_stable_nodes || @@ -1643,42 +1648,35 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, * We must walk all stable_node_dup to prune the stale * stable nodes during lookup. * - * get_ksm_page can drop the nodes from the + * ksm_get_folio can drop the nodes from the * stable_node->hlist if they point to freed pages * (that's why we do a _safe walk). The "dup" * stable_node parameter itself will be freed from * under us if it returns NULL. */ - _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); - if (!_tree_page) + folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK); + if (!folio) + continue; + /* Pick the best candidate if possible. */ + if (!found || (is_page_sharing_candidate(dup) && + (!is_page_sharing_candidate(found) || + dup->rmap_hlist_len > found_rmap_hlist_len))) { + if (found) + folio_put(tree_folio); + found = dup; + found_rmap_hlist_len = found->rmap_hlist_len; + tree_folio = folio; + /* skip put_page for found candidate */ + if (!prune_stale_stable_nodes && + is_page_sharing_candidate(found)) + break; continue; - nr += 1; - if (is_page_sharing_candidate(dup)) { - if (!found || - dup->rmap_hlist_len > found_rmap_hlist_len) { - if (found) - put_page(tree_page); - found = dup; - found_rmap_hlist_len = found->rmap_hlist_len; - tree_page = _tree_page; - - /* skip put_page for found dup */ - if (!prune_stale_stable_nodes) - break; - continue; - } } - put_page(_tree_page); + folio_put(folio); } if (found) { - /* - * nr is counting all dups in the chain only if - * prune_stale_stable_nodes is true, otherwise we may - * break the loop at nr == 1 even if there are - * multiple entries. - */ - if (prune_stale_stable_nodes && nr == 1) { + if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) { /* * If there's not just one entry it would * corrupt memory, better BUG_ON. In KSM @@ -1730,27 +1728,17 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, hlist_add_head(&found->hlist_dup, &stable_node->hlist); } + } else { + /* Its hlist must be empty if no one found. */ + free_stable_node_chain(stable_node, root); } *_stable_node_dup = found; - return tree_page; -} - -static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, - struct rb_root *root) -{ - if (!is_stable_node_chain(stable_node)) - return stable_node; - if (hlist_empty(&stable_node->hlist)) { - free_stable_node_chain(stable_node, root); - return NULL; - } - return hlist_entry(stable_node->hlist.first, - typeof(*stable_node), hlist_dup); + return tree_folio; } /* - * Like for get_ksm_page, this function can free the *_stable_node and + * Like for ksm_get_folio, this function can free the *_stable_node and * *_stable_node_dup if the returned tree_page is NULL. * * It can also free and overwrite *_stable_node with the found @@ -1763,46 +1751,33 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ -static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *stable_node = *_stable_node; + if (!is_stable_node_chain(stable_node)) { - if (is_page_sharing_candidate(stable_node)) { - *_stable_node_dup = stable_node; - return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); - } - /* - * _stable_node_dup set to NULL means the stable_node - * reached the ksm_max_page_sharing limit. - */ - *_stable_node_dup = NULL; - return NULL; + *_stable_node_dup = stable_node; + return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); } return stable_node_dup(_stable_node_dup, _stable_node, root, prune_stale_stable_nodes); } -static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, - struct ksm_stable_node **s_n, - struct rb_root *root) +static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, + struct ksm_stable_node **s_n, + struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } -static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, - struct ksm_stable_node *s_n, - struct rb_root *root) +static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, + struct ksm_stable_node **s_n, + struct rb_root *root) { - struct ksm_stable_node *old_stable_node = s_n; - struct page *tree_page; - - tree_page = __stable_node_chain(s_n_d, &s_n, root, false); - /* not pruning dups so s_n cannot have changed */ - VM_BUG_ON(s_n != old_stable_node); - return tree_page; + return __stable_node_chain(s_n_d, s_n, root, false); } /* @@ -1812,79 +1787,43 @@ static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, * with identical content to the page that we are scanning right now. * * This function returns the stable tree node of identical content if found, - * NULL otherwise. + * -EBUSY if the stable node's page is being migrated, NULL otherwise. */ -static struct page *stable_tree_search(struct page *page) +static struct folio *stable_tree_search(struct page *page) { int nid; struct rb_root *root; struct rb_node **new; struct rb_node *parent; - struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; + struct ksm_stable_node *stable_node, *stable_node_dup; struct ksm_stable_node *page_node; + struct folio *folio; - page_node = page_stable_node(page); + folio = page_folio(page); + page_node = folio_stable_node(folio); if (page_node && page_node->head != &migrate_nodes) { /* ksm page forked */ - get_page(page); - return page; + folio_get(folio); + return folio; } - nid = get_kpfn_nid(page_to_pfn(page)); + nid = get_kpfn_nid(folio_pfn(folio)); root = root_stable_tree + nid; again: new = &root->rb_node; parent = NULL; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); - stable_node_any = NULL; - tree_page = chain_prune(&stable_node_dup, &stable_node, root); - /* - * NOTE: stable_node may have been freed by - * chain_prune() if the returned stable_node_dup is - * not NULL. stable_node_dup may have been inserted in - * the rbtree instead as a regular stable_node (in - * order to collapse the stable_node chain if a single - * stable_node dup was found in it). In such case the - * stable_node is overwritten by the callee to point - * to the stable_node_dup that was collapsed in the - * stable rbtree and stable_node will be equal to - * stable_node_dup like if the chain never existed. - */ - if (!stable_node_dup) { - /* - * Either all stable_node dups were full in - * this stable_node chain, or this chain was - * empty and should be rb_erased. - */ - stable_node_any = stable_node_dup_any(stable_node, - root); - if (!stable_node_any) { - /* rb_erase just run */ - goto again; - } - /* - * Take any of the stable_node dups page of - * this stable_node chain to let the tree walk - * continue. All KSM pages belonging to the - * stable_node dups in a stable_node chain - * have the same content and they're - * write protected at all times. Any will work - * fine to continue the walk. - */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); - } - VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + tree_folio = chain_prune(&stable_node_dup, &stable_node, root); + if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -1894,8 +1833,8 @@ again: goto again; } - ret = memcmp_pages(page, tree_page); - put_page(tree_page); + ret = memcmp_pages(page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -1906,16 +1845,19 @@ again: if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); /* - * Test if the migrated page should be merged - * into a stable node dup. If the mapcount is - * 1 we can migrate it with another KSM page - * without adding it to the chain. + * If the mapcount of our migrated KSM folio is + * at most 1, we can merge it with another + * KSM folio where we know that we have space + * for one more mapping without exceeding the + * ksm_max_page_sharing limit: see + * chain_prune(). This way, we can avoid adding + * this stable node to the chain. */ - if (page_mapcount(page) > 1) + if (folio_mapcount(folio) > 1) goto chain_append; } - if (!stable_node_dup) { + if (!is_page_sharing_candidate(stable_node_dup)) { /* * If the stable_node is a chain and * we got a payload match in memcmp @@ -1938,26 +1880,26 @@ again: * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ - tree_page = get_ksm_page(stable_node_dup, - GET_KSM_PAGE_TRYLOCK); + tree_folio = ksm_get_folio(stable_node_dup, + KSM_GET_FOLIO_TRYLOCK); - if (PTR_ERR(tree_page) == -EBUSY) + if (PTR_ERR(tree_folio) == -EBUSY) return ERR_PTR(-EBUSY); - if (unlikely(!tree_page)) + if (unlikely(!tree_folio)) /* * The tree may have been rebalanced, * so re-evaluate parent and new. */ goto again; - unlock_page(tree_page); + folio_unlock(tree_folio); if (get_kpfn_nid(stable_node_dup->kpfn) != NUMA(stable_node_dup->nid)) { - put_page(tree_page); + folio_put(tree_folio); goto replace; } - return tree_page; + return tree_folio; } } @@ -1970,8 +1912,8 @@ again: rb_insert_color(&page_node->node, root); out: if (is_page_sharing_candidate(page_node)) { - get_page(page); - return page; + folio_get(folio); + return folio; } else return NULL; @@ -1996,12 +1938,12 @@ replace: &page_node->node, root); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page = NULL; + folio = NULL; } else { rb_erase(&stable_node_dup->node, root); - page = NULL; + folio = NULL; } } else { VM_BUG_ON(!is_stable_node_chain(stable_node)); @@ -2012,21 +1954,18 @@ replace: DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page = NULL; + folio = NULL; } else { - page = NULL; + folio = NULL; } } stable_node_dup->head = &migrate_nodes; list_add(&stable_node_dup->list, stable_node_dup->head); - return page; + return folio; chain_append: - /* stable_node_dup could be null if it reached the limit */ - if (!stable_node_dup) - stable_node_dup = stable_node_any; /* * If stable_node was a chain and chain_prune collapsed it, * stable_node has been updated to be the new regular @@ -2064,17 +2003,17 @@ chain_append: * This function returns the stable tree node just allocated on success, * NULL otherwise. */ -static struct ksm_stable_node *stable_tree_insert(struct page *kpage) +static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) { int nid; unsigned long kpfn; struct rb_root *root; struct rb_node **new; struct rb_node *parent; - struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; + struct ksm_stable_node *stable_node, *stable_node_dup; bool need_chain = false; - kpfn = page_to_pfn(kpage); + kpfn = folio_pfn(kfolio); nid = get_kpfn_nid(kpfn); root = root_stable_tree + nid; again: @@ -2082,42 +2021,16 @@ again: new = &root->rb_node; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); - stable_node_any = NULL; - tree_page = chain(&stable_node_dup, stable_node, root); - if (!stable_node_dup) { - /* - * Either all stable_node dups were full in - * this stable_node chain, or this chain was - * empty and should be rb_erased. - */ - stable_node_any = stable_node_dup_any(stable_node, - root); - if (!stable_node_any) { - /* rb_erase just run */ - goto again; - } - /* - * Take any of the stable_node dups page of - * this stable_node chain to let the tree walk - * continue. All KSM pages belonging to the - * stable_node dups in a stable_node chain - * have the same content and they're - * write protected at all times. Any will work - * fine to continue the walk. - */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); - } - VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + tree_folio = chain(&stable_node_dup, &stable_node, root); + if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -2127,8 +2040,8 @@ again: goto again; } - ret = memcmp_pages(kpage, tree_page); - put_page(tree_page); + ret = memcmp_pages(&kfolio->page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -2147,7 +2060,6 @@ again: INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn = kpfn; - set_page_stable_node(kpage, stable_node_dup); stable_node_dup->rmap_hlist_len = 0; DO_NUMA(stable_node_dup->nid = nid); if (!need_chain) { @@ -2166,6 +2078,8 @@ again: stable_node_chain_add_dup(stable_node_dup, stable_node); } + folio_set_stable_node(kfolio, stable_node_dup); + return stable_node_dup; } @@ -2300,11 +2214,10 @@ static void stable_tree_append(struct ksm_rmap_item *rmap_item, */ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) { - struct mm_struct *mm = rmap_item->mm; struct ksm_rmap_item *tree_rmap_item; struct page *tree_page = NULL; struct ksm_stable_node *stable_node; - struct page *kpage; + struct folio *kfolio; unsigned int checksum; int err; bool max_page_sharing_bypass = false; @@ -2327,84 +2240,59 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite */ if (!is_page_sharing_candidate(stable_node)) max_page_sharing_bypass = true; + } else { + remove_rmap_item_from_tree(rmap_item); + + /* + * If the hash value of the page has changed from the last time + * we calculated it, this page is changing frequently: therefore we + * don't want to insert it in the unstable tree, and we don't want + * to waste our time searching for something identical to it there. + */ + checksum = calc_checksum(page); + if (rmap_item->oldchecksum != checksum) { + rmap_item->oldchecksum = checksum; + return; + } + + if (!try_to_merge_with_zero_page(rmap_item, page)) + return; } - /* We first start with searching the page inside the stable tree */ - kpage = stable_tree_search(page); - if (kpage == page && rmap_item->head == stable_node) { - put_page(kpage); + /* Start by searching for the folio in the stable tree */ + kfolio = stable_tree_search(page); + if (&kfolio->page == page && rmap_item->head == stable_node) { + folio_put(kfolio); return; } remove_rmap_item_from_tree(rmap_item); - if (kpage) { - if (PTR_ERR(kpage) == -EBUSY) + if (kfolio) { + if (kfolio == ERR_PTR(-EBUSY)) return; - err = try_to_merge_with_ksm_page(rmap_item, page, kpage); + err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ - lock_page(kpage); - stable_tree_append(rmap_item, page_stable_node(kpage), + folio_lock(kfolio); + stable_tree_append(rmap_item, folio_stable_node(kfolio), max_page_sharing_bypass); - unlock_page(kpage); + folio_unlock(kfolio); } - put_page(kpage); + folio_put(kfolio); return; } - /* - * If the hash value of the page has changed from the last time - * we calculated it, this page is changing frequently: therefore we - * don't want to insert it in the unstable tree, and we don't want - * to waste our time searching for something identical to it there. - */ - checksum = calc_checksum(page); - if (rmap_item->oldchecksum != checksum) { - rmap_item->oldchecksum = checksum; - return; - } - - /* - * Same checksum as an empty page. We attempt to merge it with the - * appropriate zero page if the user enabled this via sysfs. - */ - if (ksm_use_zero_pages && (checksum == zero_checksum)) { - struct vm_area_struct *vma; - - mmap_read_lock(mm); - vma = find_mergeable_vma(mm, rmap_item->address); - if (vma) { - err = try_to_merge_one_page(vma, page, - ZERO_PAGE(rmap_item->address)); - trace_ksm_merge_one_page( - page_to_pfn(ZERO_PAGE(rmap_item->address)), - rmap_item, mm, err); - } else { - /* - * If the vma is out of date, we do not need to - * continue. - */ - err = 0; - } - mmap_read_unlock(mm); - /* - * In case of failure, the page was not really empty, so we - * need to continue. Otherwise we're done. - */ - if (!err) - return; - } tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { bool split; - kpage = try_to_merge_two_pages(rmap_item, page, + kfolio = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); /* * If both pages we tried to merge belong to the same compound @@ -2419,20 +2307,20 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite split = PageTransCompound(page) && compound_head(page) == compound_head(tree_page); put_page(tree_page); - if (kpage) { + if (kfolio) { /* * The pages were successfully merged: insert new * node in the stable tree and add both rmap_items. */ - lock_page(kpage); - stable_node = stable_tree_insert(kpage); + folio_lock(kfolio); + stable_node = stable_tree_insert(kfolio); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); stable_tree_append(rmap_item, stable_node, false); } - unlock_page(kpage); + folio_unlock(kfolio); /* * If we fail to insert the page into the stable tree, @@ -2513,10 +2401,10 @@ static unsigned int skip_age(rmap_age_t age) /* * Determines if a page should be skipped for the current scan. * - * @page: page to check + * @folio: folio containing the page to check * @rmap_item: associated rmap_item of page */ -static bool should_skip_rmap_item(struct page *page, +static bool should_skip_rmap_item(struct folio *folio, struct ksm_rmap_item *rmap_item) { rmap_age_t age; @@ -2529,7 +2417,7 @@ static bool should_skip_rmap_item(struct page *page, * will essentially ignore them, but we still have to process them * properly. */ - if (PageKsm(page)) + if (folio_test_ksm(folio)) return false; age = rmap_item->age; @@ -2597,14 +2485,14 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) */ if (!ksm_merge_across_nodes) { struct ksm_stable_node *stable_node, *next; - struct page *page; + struct folio *folio; list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { - page = get_ksm_page(stable_node, - GET_KSM_PAGE_NOLOCK); - if (page) - put_page(page); + folio = ksm_get_folio(stable_node, + KSM_GET_FOLIO_NOLOCK); + if (folio) + folio_put(folio); cond_resched(); } } @@ -2646,36 +2534,46 @@ next_mm: ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { + struct page *tmp_page = NULL; + struct folio_walk fw; + struct folio *folio; + if (ksm_test_exit(mm)) break; - *page = follow_page(vma, ksm_scan.address, FOLL_GET); - if (IS_ERR_OR_NULL(*page)) { - ksm_scan.address += PAGE_SIZE; - cond_resched(); - continue; + + folio = folio_walk_start(&fw, vma, ksm_scan.address, 0); + if (folio) { + if (!folio_is_zone_device(folio) && + folio_test_anon(folio)) { + folio_get(folio); + tmp_page = fw.page; + } + folio_walk_end(&fw, vma); } - if (is_zone_device_page(*page)) - goto next_page; - if (PageAnon(*page)) { - flush_anon_page(vma, *page, ksm_scan.address); - flush_dcache_page(*page); + + if (tmp_page) { + flush_anon_page(vma, tmp_page, ksm_scan.address); + flush_dcache_page(tmp_page); rmap_item = get_next_rmap_item(mm_slot, ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { ksm_scan.rmap_list = &rmap_item->rmap_list; - if (should_skip_rmap_item(*page, rmap_item)) + if (should_skip_rmap_item(folio, rmap_item)) { + folio_put(folio); goto next_page; + } ksm_scan.address += PAGE_SIZE; - } else - put_page(*page); + *page = tmp_page; + } else { + folio_put(folio); + } mmap_read_unlock(mm); return rmap_item; } next_page: - put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); } @@ -2747,18 +2645,16 @@ static void ksm_do_scan(unsigned int scan_npages) { struct ksm_rmap_item *rmap_item; struct page *page; - unsigned int npages = scan_npages; - while (npages-- && likely(!freezing(current))) { + while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; cmp_and_merge_page(page, rmap_item); put_page(page); + ksm_pages_scanned++; } - - ksm_pages_scanned += scan_npages - npages; } static int ksmd_should_run(void) @@ -3074,7 +2970,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, if (!folio_test_uptodate(folio)) return folio; /* let do_swap_page report the error */ - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); if (new_folio && mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { folio_put(new_folio); @@ -3084,7 +2980,6 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, if (copy_mc_user_highpage(folio_page(new_folio, 0), page, addr, vma)) { folio_put(new_folio); - memory_failure_queue(folio_pfn(folio), 0); return ERR_PTR(-EHWPOISON); } folio_set_dirty(new_folio); @@ -3172,12 +3067,11 @@ again: /* * Collect processes when the error hit an ksm page. */ -void collect_procs_ksm(struct page *page, struct list_head *to_kill, - int force_early) +void collect_procs_ksm(const struct folio *folio, const struct page *page, + struct list_head *to_kill, int force_early) { struct ksm_stable_node *stable_node; struct ksm_rmap_item *rmap_item; - struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; @@ -3229,11 +3123,11 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) /* * newfolio->mapping was set in advance; now we need smp_wmb() * to make sure that the new stable_node->kpfn is visible - * to get_ksm_page() before it can see that folio->mapping - * has gone stale (or that folio_test_swapcache has been cleared). + * to ksm_get_folio() before it can see that folio->mapping + * has gone stale (or that the swapcache flag has been cleared). */ smp_wmb(); - set_page_stable_node(&folio->page, NULL); + folio_set_stable_node(folio, NULL); } } #endif /* CONFIG_MIGRATION */ @@ -3256,7 +3150,7 @@ static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) { /* - * Don't get_ksm_page, page has already gone: + * Don't ksm_get_folio, page has already gone: * which is why we keep kpfn instead of page* */ remove_node_from_stable_tree(stable_node); @@ -3344,7 +3238,7 @@ static int ksm_memory_callback(struct notifier_block *self, * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree, - * otherwise get_ksm_page() might later try to access a + * otherwise ksm_get_folio() might later try to access a * non-existent struct page. */ ksm_check_stable_tree(mn->start_pfn, @@ -3368,9 +3262,28 @@ static void wait_while_offlining(void) #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_PROC_FS +/* + * The process is mergeable only if any VMA is currently + * applicable to KSM. + * + * The mmap lock must be held in read mode. + */ +bool ksm_process_mergeable(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + mmap_assert_locked(mm); + VMA_ITERATOR(vmi, mm, 0); + for_each_vma(vmi, vma) + if (vma->vm_flags & VM_MERGEABLE) + return true; + + return false; +} + long ksm_process_profit(struct mm_struct *mm) { - return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - + return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); } #endif /* CONFIG_PROC_FS */ @@ -3659,7 +3572,7 @@ KSM_ATTR_RO(pages_skipped); static ssize_t ksm_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sysfs_emit(buf, "%ld\n", ksm_zero_pages); + return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); } KSM_ATTR_RO(ksm_zero_pages); @@ -3668,7 +3581,7 @@ static ssize_t general_profit_show(struct kobject *kobj, { long general_profit; - general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - + general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - ksm_rmap_items * sizeof(struct ksm_rmap_item); return sysfs_emit(buf, "%ld\n", general_profit); |