diff options
Diffstat (limited to 'mm/swap_state.c')
| -rw-r--r-- | mm/swap_state.c | 1031 |
1 files changed, 736 insertions, 295 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index f24ab0dff554..5f97c6ae70a2 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/swap_state.c * @@ -9,431 +10,871 @@ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> +#include <linux/mempolicy.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/init.h> #include <linux/pagemap.h> +#include <linux/pagevec.h> #include <linux/backing-dev.h> #include <linux/blkdev.h> -#include <linux/pagevec.h> #include <linux/migrate.h> -#include <linux/page_cgroup.h> - -#include <asm/pgtable.h> +#include <linux/vmalloc.h> +#include <linux/huge_mm.h> +#include <linux/shmem_fs.h> +#include "internal.h" +#include "swap_table.h" +#include "swap.h" /* * swapper_space is a fiction, retained to simplify the path through - * vmscan's shrink_page_list. + * vmscan's shrink_folio_list. */ static const struct address_space_operations swap_aops = { - .writepage = swap_writepage, - .set_page_dirty = swap_set_page_dirty, - .migratepage = migrate_page, + .dirty_folio = noop_dirty_folio, +#ifdef CONFIG_MIGRATION + .migrate_folio = migrate_folio, +#endif }; -static struct backing_dev_info swap_backing_dev_info = { - .name = "swap", - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, +/* Set swap_space as read only as swap cache is handled by swap table */ +struct address_space swap_space __ro_after_init = { + .a_ops = &swap_aops, }; -struct address_space swapper_spaces[MAX_SWAPFILES] = { - [0 ... MAX_SWAPFILES - 1] = { - .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .a_ops = &swap_aops, - .backing_dev_info = &swap_backing_dev_info, - } -}; +static bool enable_vma_readahead __read_mostly = true; -#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) +#define SWAP_RA_ORDER_CEILING 5 -static struct { - unsigned long add_total; - unsigned long del_total; - unsigned long find_success; - unsigned long find_total; -} swap_cache_info; +#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) +#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) +#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK +#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) -unsigned long total_swapcache_pages(void) -{ - int i; - unsigned long ret = 0; +#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) +#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) +#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) - for (i = 0; i < MAX_SWAPFILES; i++) - ret += swapper_spaces[i].nrpages; - return ret; -} +#define SWAP_RA_VAL(addr, win, hits) \ + (((addr) & PAGE_MASK) | \ + (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ + ((hits) & SWAP_RA_HITS_MASK)) + +/* Initial readahead hits is 4 to start up with a small window */ +#define GET_SWAP_RA_VAL(vma) \ + (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) + +static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages()); - printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", - swap_cache_info.add_total, swap_cache_info.del_total, - swap_cache_info.find_success, swap_cache_info.find_total); - printk("Free swap = %ldkB\n", - get_nr_swap_pages() << (PAGE_SHIFT - 10)); - printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); + printk("Free swap = %ldkB\n", K(get_nr_swap_pages())); + printk("Total swap = %lukB\n", K(total_swap_pages)); } -/* - * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, - * but sets SwapCache flag and private instead of mapping and index. +/** + * swap_cache_get_folio - Looks up a folio in the swap cache. + * @entry: swap entry used for the lookup. + * + * A found folio will be returned unlocked and with its refcount increased. + * + * Context: Caller must ensure @entry is valid and protect the swap device + * with reference count or locks. + * Return: Returns the found folio on success, NULL otherwise. The caller + * must lock nd check if the folio still matches the swap entry before + * use (e.g., folio_matches_swap_entry). */ -int __add_to_swap_cache(struct page *page, swp_entry_t entry) +struct folio *swap_cache_get_folio(swp_entry_t entry) { - int error; - struct address_space *address_space; - - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageSwapCache(page)); - VM_BUG_ON(!PageSwapBacked(page)); - - page_cache_get(page); - SetPageSwapCache(page); - set_page_private(page, entry.val); - - address_space = swap_address_space(entry); - spin_lock_irq(&address_space->tree_lock); - error = radix_tree_insert(&address_space->page_tree, - entry.val, page); - if (likely(!error)) { - address_space->nrpages++; - __inc_zone_page_state(page, NR_FILE_PAGES); - INC_CACHE_INFO(add_total); - } - spin_unlock_irq(&address_space->tree_lock); - - if (unlikely(error)) { - /* - * Only the context which have set SWAP_HAS_CACHE flag - * would call add_to_swap_cache(). - * So add_to_swap_cache() doesn't returns -EEXIST. - */ - VM_BUG_ON(error == -EEXIST); - set_page_private(page, 0UL); - ClearPageSwapCache(page); - page_cache_release(page); + unsigned long swp_tb; + struct folio *folio; + + for (;;) { + swp_tb = swap_table_get(__swap_entry_to_cluster(entry), + swp_cluster_offset(entry)); + if (!swp_tb_is_folio(swp_tb)) + return NULL; + folio = swp_tb_to_folio(swp_tb); + if (likely(folio_try_get(folio))) + return folio; } - return error; + return NULL; } - -int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) +/** + * swap_cache_get_shadow - Looks up a shadow in the swap cache. + * @entry: swap entry used for the lookup. + * + * Context: Caller must ensure @entry is valid and protect the swap device + * with reference count or locks. + * Return: Returns either NULL or an XA_VALUE (shadow). + */ +void *swap_cache_get_shadow(swp_entry_t entry) { - int error; + unsigned long swp_tb; - error = radix_tree_preload(gfp_mask); - if (!error) { - error = __add_to_swap_cache(page, entry); - radix_tree_preload_end(); - } - return error; + swp_tb = swap_table_get(__swap_entry_to_cluster(entry), + swp_cluster_offset(entry)); + if (swp_tb_is_shadow(swp_tb)) + return swp_tb_to_shadow(swp_tb); + return NULL; } -/* - * This must be called only on pages that have - * been verified to be in the swap cache. +/** + * swap_cache_add_folio - Add a folio into the swap cache. + * @folio: The folio to be added. + * @entry: The swap entry corresponding to the folio. + * @gfp: gfp_mask for XArray node allocation. + * @shadowp: If a shadow is found, return the shadow. + * + * Context: Caller must ensure @entry is valid and protect the swap device + * with reference count or locks. + * The caller also needs to update the corresponding swap_map slots with + * SWAP_HAS_CACHE bit to avoid race or conflict. */ -void __delete_from_swap_cache(struct page *page) +void swap_cache_add_folio(struct folio *folio, swp_entry_t entry, void **shadowp) { - swp_entry_t entry; - struct address_space *address_space; - - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(!PageSwapCache(page)); - VM_BUG_ON(PageWriteback(page)); - - entry.val = page_private(page); - address_space = swap_address_space(entry); - radix_tree_delete(&address_space->page_tree, page_private(page)); - set_page_private(page, 0); - ClearPageSwapCache(page); - address_space->nrpages--; - __dec_zone_page_state(page, NR_FILE_PAGES); - INC_CACHE_INFO(del_total); + void *shadow = NULL; + unsigned long old_tb, new_tb; + struct swap_cluster_info *ci; + unsigned int ci_start, ci_off, ci_end; + unsigned long nr_pages = folio_nr_pages(folio); + + VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio); + VM_WARN_ON_ONCE_FOLIO(!folio_test_swapbacked(folio), folio); + + new_tb = folio_to_swp_tb(folio); + ci_start = swp_cluster_offset(entry); + ci_end = ci_start + nr_pages; + ci_off = ci_start; + ci = swap_cluster_lock(__swap_entry_to_info(entry), swp_offset(entry)); + do { + old_tb = __swap_table_xchg(ci, ci_off, new_tb); + WARN_ON_ONCE(swp_tb_is_folio(old_tb)); + if (swp_tb_is_shadow(old_tb)) + shadow = swp_tb_to_shadow(old_tb); + } while (++ci_off < ci_end); + + folio_ref_add(folio, nr_pages); + folio_set_swapcache(folio); + folio->swap = entry; + swap_cluster_unlock(ci); + + node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages); + lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages); + + if (shadowp) + *shadowp = shadow; } /** - * add_to_swap - allocate swap space for a page - * @page: page we want to move to swap + * __swap_cache_del_folio - Removes a folio from the swap cache. + * @ci: The locked swap cluster. + * @folio: The folio. + * @entry: The first swap entry that the folio corresponds to. + * @shadow: shadow value to be filled in the swap cache. + * + * Removes a folio from the swap cache and fills a shadow in place. + * This won't put the folio's refcount. The caller has to do that. * - * Allocate swap space for the page and add the page to the - * swap cache. Caller needs to hold the page lock. + * Context: Caller must ensure the folio is locked and in the swap cache + * using the index of @entry, and lock the cluster that holds the entries. */ -int add_to_swap(struct page *page, struct list_head *list) +void __swap_cache_del_folio(struct swap_cluster_info *ci, struct folio *folio, + swp_entry_t entry, void *shadow) { - swp_entry_t entry; - int err; + unsigned long old_tb, new_tb; + unsigned int ci_start, ci_off, ci_end; + unsigned long nr_pages = folio_nr_pages(folio); + + VM_WARN_ON_ONCE(__swap_entry_to_cluster(entry) != ci); + VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio); + VM_WARN_ON_ONCE_FOLIO(folio_test_writeback(folio), folio); + + new_tb = shadow_swp_to_tb(shadow); + ci_start = swp_cluster_offset(entry); + ci_end = ci_start + nr_pages; + ci_off = ci_start; + do { + /* If shadow is NULL, we sets an empty shadow */ + old_tb = __swap_table_xchg(ci, ci_off, new_tb); + WARN_ON_ONCE(!swp_tb_is_folio(old_tb) || + swp_tb_to_folio(old_tb) != folio); + } while (++ci_off < ci_end); + + folio->swap.val = 0; + folio_clear_swapcache(folio); + node_stat_mod_folio(folio, NR_FILE_PAGES, -nr_pages); + lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr_pages); +} - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(!PageUptodate(page)); +/** + * swap_cache_del_folio - Removes a folio from the swap cache. + * @folio: The folio. + * + * Same as __swap_cache_del_folio, but handles lock and refcount. The + * caller must ensure the folio is either clean or has a swap count + * equal to zero, or it may cause data loss. + * + * Context: Caller must ensure the folio is locked and in the swap cache. + */ +void swap_cache_del_folio(struct folio *folio) +{ + struct swap_cluster_info *ci; + swp_entry_t entry = folio->swap; - entry = get_swap_page(); - if (!entry.val) - return 0; + ci = swap_cluster_lock(__swap_entry_to_info(entry), swp_offset(entry)); + __swap_cache_del_folio(ci, folio, entry, NULL); + swap_cluster_unlock(ci); - if (unlikely(PageTransHuge(page))) - if (unlikely(split_huge_page_to_list(page, list))) { - swapcache_free(entry, NULL); - return 0; - } + put_swap_folio(folio, entry); + folio_ref_sub(folio, folio_nr_pages(folio)); +} + +/** + * __swap_cache_replace_folio - Replace a folio in the swap cache. + * @ci: The locked swap cluster. + * @old: The old folio to be replaced. + * @new: The new folio. + * + * Replace an existing folio in the swap cache with a new folio. The + * caller is responsible for setting up the new folio's flag and swap + * entries. Replacement will take the new folio's swap entry value as + * the starting offset to override all slots covered by the new folio. + * + * Context: Caller must ensure both folios are locked, and lock the + * cluster that holds the old folio to be replaced. + */ +void __swap_cache_replace_folio(struct swap_cluster_info *ci, + struct folio *old, struct folio *new) +{ + swp_entry_t entry = new->swap; + unsigned long nr_pages = folio_nr_pages(new); + unsigned int ci_off = swp_cluster_offset(entry); + unsigned int ci_end = ci_off + nr_pages; + unsigned long old_tb, new_tb; + + VM_WARN_ON_ONCE(!folio_test_swapcache(old) || !folio_test_swapcache(new)); + VM_WARN_ON_ONCE(!folio_test_locked(old) || !folio_test_locked(new)); + VM_WARN_ON_ONCE(!entry.val); + + /* Swap cache still stores N entries instead of a high-order entry */ + new_tb = folio_to_swp_tb(new); + do { + old_tb = __swap_table_xchg(ci, ci_off, new_tb); + WARN_ON_ONCE(!swp_tb_is_folio(old_tb) || swp_tb_to_folio(old_tb) != old); + } while (++ci_off < ci_end); /* - * Radix-tree node allocations from PF_MEMALLOC contexts could - * completely exhaust the page allocator. __GFP_NOMEMALLOC - * stops emergency reserves from being allocated. - * - * TODO: this could cause a theoretical memory reclaim - * deadlock in the swap out path. - */ - /* - * Add it to the swap cache and mark it dirty + * If the old folio is partially replaced (e.g., splitting a large + * folio, the old folio is shrunk, and new split sub folios replace + * the shrunk part), ensure the new folio doesn't overlap it. */ - err = add_to_swap_cache(page, entry, - __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); - - if (!err) { /* Success */ - SetPageDirty(page); - return 1; - } else { /* -ENOMEM radix-tree allocation failure */ - /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. - */ - swapcache_free(entry, NULL); - return 0; + if (IS_ENABLED(CONFIG_DEBUG_VM) && + folio_order(old) != folio_order(new)) { + ci_off = swp_cluster_offset(old->swap); + ci_end = ci_off + folio_nr_pages(old); + while (ci_off++ < ci_end) + WARN_ON_ONCE(swp_tb_to_folio(__swap_table_get(ci, ci_off)) != old); } } -/* - * This must be called only on pages that have - * been verified to be in the swap cache and locked. - * It will never put the page into the free list, - * the caller has a reference on the page. +/** + * swap_cache_clear_shadow - Clears a set of shadows in the swap cache. + * @entry: The starting index entry. + * @nr_ents: How many slots need to be cleared. + * + * Context: Caller must ensure the range is valid, all in one single cluster, + * not occupied by any folio, and lock the cluster. */ -void delete_from_swap_cache(struct page *page) +void __swap_cache_clear_shadow(swp_entry_t entry, int nr_ents) { - swp_entry_t entry; - struct address_space *address_space; - - entry.val = page_private(page); + struct swap_cluster_info *ci = __swap_entry_to_cluster(entry); + unsigned int ci_off = swp_cluster_offset(entry), ci_end; + unsigned long old; - address_space = swap_address_space(entry); - spin_lock_irq(&address_space->tree_lock); - __delete_from_swap_cache(page); - spin_unlock_irq(&address_space->tree_lock); - - swapcache_free(entry, page); - page_cache_release(page); + ci_end = ci_off + nr_ents; + do { + old = __swap_table_xchg(ci, ci_off, null_to_swp_tb()); + WARN_ON_ONCE(swp_tb_is_folio(old)); + } while (++ci_off < ci_end); } -/* - * If we are the only user, then try to free up the swap cache. - * - * Its ok to check for PageSwapCache without the page lock +/* + * If we are the only user, then try to free up the swap cache. + * + * Its ok to check the swapcache flag without the folio lock * here because we are going to recheck again inside - * try_to_free_swap() _with_ the lock. + * folio_free_swap() _with_ the lock. * - Marcelo */ -static inline void free_swap_cache(struct page *page) +void free_swap_cache(struct folio *folio) { - if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { - try_to_free_swap(page); - unlock_page(page); + if (folio_test_swapcache(folio) && !folio_mapped(folio) && + folio_trylock(folio)) { + folio_free_swap(folio); + folio_unlock(folio); } } -/* - * Perform a free_page(), also freeing any swap cache associated with - * this page if it is the last user of the page. +/* + * Freeing a folio and also freeing any swap cache associated with + * this folio if it is the last user. */ -void free_page_and_swap_cache(struct page *page) +void free_folio_and_swap_cache(struct folio *folio) { - free_swap_cache(page); - page_cache_release(page); + free_swap_cache(folio); + if (!is_huge_zero_folio(folio)) + folio_put(folio); } /* * Passed an array of pages, drop them all from swapcache and then release * them. They are removed from the LRU and freed if this is their last use. */ -void free_pages_and_swap_cache(struct page **pages, int nr) +void free_pages_and_swap_cache(struct encoded_page **pages, int nr) { - struct page **pagep = pages; + struct folio_batch folios; + unsigned int refs[PAGEVEC_SIZE]; - lru_add_drain(); - while (nr) { - int todo = min(nr, PAGEVEC_SIZE); - int i; - - for (i = 0; i < todo; i++) - free_swap_cache(pagep[i]); - release_pages(pagep, todo, 0); - pagep += todo; - nr -= todo; + folio_batch_init(&folios); + for (int i = 0; i < nr; i++) { + struct folio *folio = page_folio(encoded_page_ptr(pages[i])); + + free_swap_cache(folio); + refs[folios.nr] = 1; + if (unlikely(encoded_page_flags(pages[i]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + refs[folios.nr] = encoded_nr_pages(pages[++i]); + + if (folio_batch_add(&folios, folio) == 0) + folios_put_refs(&folios, refs); } + if (folios.nr) + folios_put_refs(&folios, refs); } -/* - * Lookup a swap entry in the swap cache. A found page will be returned - * unlocked and with its refcount incremented - we rely on the kernel - * lock getting page table operations atomic even if we drop the page - * lock before returning. - */ -struct page * lookup_swap_cache(swp_entry_t entry) +static inline bool swap_use_vma_readahead(void) { - struct page *page; + return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); +} - page = find_get_page(swap_address_space(entry), entry.val); +/** + * swap_update_readahead - Update the readahead statistics of VMA or globally. + * @folio: the swap cache folio that just got hit. + * @vma: the VMA that should be updated, could be NULL for global update. + * @addr: the addr that triggered the swapin, ignored if @vma is NULL. + */ +void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma, + unsigned long addr) +{ + bool readahead, vma_ra = swap_use_vma_readahead(); - if (page) - INC_CACHE_INFO(find_success); + /* + * At the moment, we don't support PG_readahead for anon THP + * so let's bail out rather than confusing the readahead stat. + */ + if (unlikely(folio_test_large(folio))) + return; + + readahead = folio_test_clear_readahead(folio); + if (vma && vma_ra) { + unsigned long ra_val; + int win, hits; + + ra_val = GET_SWAP_RA_VAL(vma); + win = SWAP_RA_WIN(ra_val); + hits = SWAP_RA_HITS(ra_val); + if (readahead) + hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); + atomic_long_set(&vma->swap_readahead_info, + SWAP_RA_VAL(addr, win, hits)); + } - INC_CACHE_INFO(find_total); - return page; + if (readahead) { + count_vm_event(SWAP_RA_HIT); + if (!vma || !vma_ra) + atomic_inc(&swapin_readahead_hits); + } } -/* - * Locate a page of swap in physical memory, reserving swap cache space - * and reading the disk if it is not already cached. - * A failure return means that either the page allocation failed or that - * the swap entry is no longer in use. - */ -struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, + bool skip_if_exists) { - struct page *found_page, *new_page = NULL; - int err; + struct swap_info_struct *si = __swap_entry_to_info(entry); + struct folio *folio; + struct folio *new_folio = NULL; + struct folio *result = NULL; + void *shadow = NULL; + + *new_page_allocated = false; + for (;;) { + int err; - do { /* - * First check the swap cache. Since this is normally - * called after lookup_swap_cache() failed, re-calling - * that would confuse statistics. + * Check the swap cache first, if a cached folio is found, + * return it unlocked. The caller will lock and check it. */ - found_page = find_get_page(swap_address_space(entry), - entry.val); - if (found_page) - break; + folio = swap_cache_get_folio(entry); + if (folio) + goto got_folio; /* - * Get a new page to read into from swap. + * Just skip read ahead for unused swap slot. */ - if (!new_page) { - new_page = alloc_page_vma(gfp_mask, vma, addr); - if (!new_page) - break; /* Out of memory */ - } + if (!swap_entry_swapped(si, entry)) + goto put_and_return; /* - * call radix_tree_preload() while we can wait. + * Get a new folio to read into from swap. Allocate it now if + * new_folio not exist, before marking swap_map SWAP_HAS_CACHE, + * when -EEXIST will cause any racers to loop around until we + * add it to cache. */ - err = radix_tree_preload(gfp_mask & GFP_KERNEL); - if (err) - break; + if (!new_folio) { + new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); + if (!new_folio) + goto put_and_return; + } /* * Swap entry may have been freed since our caller observed it. */ - err = swapcache_prepare(entry); - if (err == -EEXIST) { - radix_tree_preload_end(); - /* - * We might race against get_swap_page() and stumble - * across a SWAP_HAS_CACHE swap_map entry whose page - * has not been brought into the swapcache yet, while - * the other end is scheduled away waiting on discard - * I/O completion at scan_swap_map(). - * - * In order to avoid turning this transitory state - * into a permanent loop around this -EEXIST case - * if !CONFIG_PREEMPT and the I/O completion happens - * to be waiting on the CPU waitqueue where we are now - * busy looping, we just conditionally invoke the - * scheduler here, if there are some more important - * tasks to run. - */ - cond_resched(); - continue; - } - if (err) { /* swp entry is obsolete ? */ - radix_tree_preload_end(); + err = swapcache_prepare(entry, 1); + if (!err) break; - } + else if (err != -EEXIST) + goto put_and_return; - /* May fail (-ENOMEM) if radix-tree node allocation failed. */ - __set_page_locked(new_page); - SetPageSwapBacked(new_page); - err = __add_to_swap_cache(new_page, entry); - if (likely(!err)) { - radix_tree_preload_end(); - /* - * Initiate read into locked page and return. - */ - lru_cache_add_anon(new_page); - swap_readpage(new_page); - return new_page; - } - radix_tree_preload_end(); - ClearPageSwapBacked(new_page); - __clear_page_locked(new_page); /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. + * Protect against a recursive call to __read_swap_cache_async() + * on the same entry waiting forever here because SWAP_HAS_CACHE + * is set but the folio is not the swap cache yet. This can + * happen today if mem_cgroup_swapin_charge_folio() below + * triggers reclaim through zswap, which may call + * __read_swap_cache_async() in the writeback path. + */ + if (skip_if_exists) + goto put_and_return; + + /* + * We might race against __swap_cache_del_folio(), and + * stumble across a swap_map entry whose SWAP_HAS_CACHE + * has not yet been cleared. Or race against another + * __read_swap_cache_async(), which has set SWAP_HAS_CACHE + * in swap_map, but not yet added its folio to swap cache. + */ + schedule_timeout_uninterruptible(1); + } + + /* + * The swap entry is ours to swap in. Prepare the new folio. + */ + __folio_set_locked(new_folio); + __folio_set_swapbacked(new_folio); + + if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) + goto fail_unlock; + + swap_cache_add_folio(new_folio, entry, &shadow); + memcg1_swapin(entry, 1); + + if (shadow) + workingset_refault(new_folio, shadow); + + /* Caller will initiate read into locked new_folio */ + folio_add_lru(new_folio); + *new_page_allocated = true; + folio = new_folio; +got_folio: + result = folio; + goto put_and_return; + +fail_unlock: + put_swap_folio(new_folio, entry); + folio_unlock(new_folio); +put_and_return: + if (!(*new_page_allocated) && new_folio) + folio_put(new_folio); + return result; +} + +/* + * Locate a page of swap in physical memory, reserving swap cache space + * and reading the disk if it is not already cached. + * A failure return means that either the page allocation failed or that + * the swap entry is no longer in use. + */ +struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr, + struct swap_iocb **plug) +{ + struct swap_info_struct *si; + bool page_allocated; + struct mempolicy *mpol; + pgoff_t ilx; + struct folio *folio; + + si = get_swap_device(entry); + if (!si) + return NULL; + + mpol = get_vma_policy(vma, addr, 0, &ilx); + folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, + &page_allocated, false); + mpol_cond_put(mpol); + + if (page_allocated) + swap_read_folio(folio, plug); + + put_swap_device(si); + return folio; +} + +static unsigned int __swapin_nr_pages(unsigned long prev_offset, + unsigned long offset, + int hits, + int max_pages, + int prev_win) +{ + unsigned int pages, last_ra; + + /* + * This heuristic has been found to work well on both sequential and + * random loads, swapping to hard disk or to SSD: please don't ask + * what the "+ 2" means, it just happens to work well, that's all. + */ + pages = hits + 2; + if (pages == 2) { + /* + * We can have no readahead hits to judge by: but must not get + * stuck here forever, so check for an adjacent offset instead + * (and don't even bother to check whether swap type is same). */ - swapcache_free(entry, NULL); - } while (err != -ENOMEM); + if (offset != prev_offset + 1 && offset != prev_offset - 1) + pages = 1; + } else { + unsigned int roundup = 4; + while (roundup < pages) + roundup <<= 1; + pages = roundup; + } + + if (pages > max_pages) + pages = max_pages; + + /* Don't shrink readahead too fast */ + last_ra = prev_win / 2; + if (pages < last_ra) + pages = last_ra; + + return pages; +} + +static unsigned long swapin_nr_pages(unsigned long offset) +{ + static unsigned long prev_offset; + unsigned int hits, pages, max_pages; + static atomic_t last_readahead_pages; + + max_pages = 1 << READ_ONCE(page_cluster); + if (max_pages <= 1) + return 1; + + hits = atomic_xchg(&swapin_readahead_hits, 0); + pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, + max_pages, + atomic_read(&last_readahead_pages)); + if (!hits) + WRITE_ONCE(prev_offset, offset); + atomic_set(&last_readahead_pages, pages); - if (new_page) - page_cache_release(new_page); - return found_page; + return pages; } /** - * swapin_readahead - swap in pages in hope we need them soon + * swap_cluster_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags - * @vma: user vma this address belongs to - * @addr: target address for mempolicy + * @mpol: NUMA memory allocation policy to be applied + * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE * - * Returns the struct page for entry and addr, after queueing swapin. + * Returns the struct folio for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * - * This has been extended to use the NUMA policies from the mm triggering - * the readahead. - * - * Caller must hold down_read on the vma->vm_mm if vma is not NULL. + * Note: it is intentional that the same NUMA policy and interleave index + * are used for every page of the readahead: neighbouring pages on swap + * are fairly likely to have been swapped out from the same node. */ -struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, + struct mempolicy *mpol, pgoff_t ilx) { - struct page *page; - unsigned long offset = swp_offset(entry); + struct folio *folio; + unsigned long entry_offset = swp_offset(entry); + unsigned long offset = entry_offset; unsigned long start_offset, end_offset; - unsigned long mask = (1UL << page_cluster) - 1; + unsigned long mask; + struct swap_info_struct *si = __swap_entry_to_info(entry); struct blk_plug plug; + struct swap_iocb *splug = NULL; + bool page_allocated; + + mask = swapin_nr_pages(offset) - 1; + if (!mask) + goto skip; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; + if (end_offset >= si->max) + end_offset = si->max - 1; blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ - page = read_swap_cache_async(swp_entry(swp_type(entry), offset), - gfp_mask, vma, addr); - if (!page) + folio = __read_swap_cache_async( + swp_entry(swp_type(entry), offset), + gfp_mask, mpol, ilx, &page_allocated, false); + if (!folio) continue; - page_cache_release(page); + if (page_allocated) { + swap_read_folio(folio, &splug); + if (offset != entry_offset) { + folio_set_readahead(folio); + count_vm_event(SWAP_RA); + } + } + folio_put(folio); } blk_finish_plug(&plug); - + swap_read_unplug(splug); lru_add_drain(); /* Push any new pages onto the LRU now */ - return read_swap_cache_async(entry, gfp_mask, vma, addr); +skip: + /* The page was likely read above, so no need for plugging here */ + folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, + &page_allocated, false); + if (unlikely(page_allocated)) + swap_read_folio(folio, NULL); + return folio; +} + +static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start, + unsigned long *end) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long ra_val; + unsigned long faddr, prev_faddr, left, right; + unsigned int max_win, hits, prev_win, win; + + max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); + if (max_win == 1) + return 1; + + faddr = vmf->address; + ra_val = GET_SWAP_RA_VAL(vma); + prev_faddr = SWAP_RA_ADDR(ra_val); + prev_win = SWAP_RA_WIN(ra_val); + hits = SWAP_RA_HITS(ra_val); + win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits, + max_win, prev_win); + atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); + if (win == 1) + return 1; + + if (faddr == prev_faddr + PAGE_SIZE) + left = faddr; + else if (prev_faddr == faddr + PAGE_SIZE) + left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE; + else + left = faddr - (((win - 1) / 2) << PAGE_SHIFT); + right = left + (win << PAGE_SHIFT); + if ((long)left < 0) + left = 0; + *start = max3(left, vma->vm_start, faddr & PMD_MASK); + *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE); + + return win; +} + +/** + * swap_vma_readahead - swap in pages in hope we need them soon + * @targ_entry: swap entry of the targeted memory + * @gfp_mask: memory allocation flags + * @mpol: NUMA memory allocation policy to be applied + * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE + * @vmf: fault information + * + * Returns the struct folio for entry and addr, after queueing swapin. + * + * Primitive swap readahead code. We simply read in a few pages whose + * virtual addresses are around the fault address in the same vma. + * + * Caller must hold read mmap_lock if vmf->vma is not NULL. + * + */ +static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, + struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) +{ + struct blk_plug plug; + struct swap_iocb *splug = NULL; + struct folio *folio; + pte_t *pte = NULL, pentry; + int win; + unsigned long start, end, addr; + pgoff_t ilx; + bool page_allocated; + + win = swap_vma_ra_win(vmf, &start, &end); + if (win == 1) + goto skip; + + ilx = targ_ilx - PFN_DOWN(vmf->address - start); + + blk_start_plug(&plug); + for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) { + struct swap_info_struct *si = NULL; + softleaf_t entry; + + if (!pte++) { + pte = pte_offset_map(vmf->pmd, addr); + if (!pte) + break; + } + pentry = ptep_get_lockless(pte); + entry = softleaf_from_pte(pentry); + + if (!softleaf_is_swap(entry)) + continue; + pte_unmap(pte); + pte = NULL; + /* + * Readahead entry may come from a device that we are not + * holding a reference to, try to grab a reference, or skip. + */ + if (swp_type(entry) != swp_type(targ_entry)) { + si = get_swap_device(entry); + if (!si) + continue; + } + folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, + &page_allocated, false); + if (si) + put_swap_device(si); + if (!folio) + continue; + if (page_allocated) { + swap_read_folio(folio, &splug); + if (addr != vmf->address) { + folio_set_readahead(folio); + count_vm_event(SWAP_RA); + } + } + folio_put(folio); + } + if (pte) + pte_unmap(pte); + blk_finish_plug(&plug); + swap_read_unplug(splug); + lru_add_drain(); +skip: + /* The folio was likely read above, so no need for plugging here */ + folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx, + &page_allocated, false); + if (unlikely(page_allocated)) + swap_read_folio(folio, NULL); + return folio; +} + +/** + * swapin_readahead - swap in pages in hope we need them soon + * @entry: swap entry of this memory + * @gfp_mask: memory allocation flags + * @vmf: fault information + * + * Returns the struct folio for entry and addr, after queueing swapin. + * + * It's a main entry function for swap readahead. By the configuration, + * it will read ahead blocks by cluster-based(ie, physical disk based) + * or vma-based(ie, virtual address based on faulty address) readahead. + */ +struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, + struct vm_fault *vmf) +{ + struct mempolicy *mpol; + pgoff_t ilx; + struct folio *folio; + + mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); + folio = swap_use_vma_readahead() ? + swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : + swap_cluster_readahead(entry, gfp_mask, mpol, ilx); + mpol_cond_put(mpol); + + return folio; +} + +#ifdef CONFIG_SYSFS +static ssize_t vma_ra_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead)); +} +static ssize_t vma_ra_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + ssize_t ret; + + ret = kstrtobool(buf, &enable_vma_readahead); + if (ret) + return ret; + + return count; +} +static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); + +static struct attribute *swap_attrs[] = { + &vma_ra_enabled_attr.attr, + NULL, +}; + +static const struct attribute_group swap_attr_group = { + .attrs = swap_attrs, +}; + +static int __init swap_init(void) +{ + int err; + struct kobject *swap_kobj; + + swap_kobj = kobject_create_and_add("swap", mm_kobj); + if (!swap_kobj) { + pr_err("failed to create swap kobject\n"); + return -ENOMEM; + } + err = sysfs_create_group(swap_kobj, &swap_attr_group); + if (err) { + pr_err("failed to register swap group\n"); + goto delete_obj; + } + /* Swap cache writeback is LRU based, no tags for it */ + mapping_set_no_writeback_tags(&swap_space); + return 0; + +delete_obj: + kobject_put(swap_kobj); + return err; } +subsys_initcall(swap_init); +#endif |
