diff options
Diffstat (limited to 'mm/page_io.c')
| -rw-r--r-- | mm/page_io.c | 709 |
1 files changed, 507 insertions, 202 deletions
diff --git a/mm/page_io.c b/mm/page_io.c index ba05b64e5d8d..3c342db77ce3 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/page_io.c * @@ -17,119 +18,60 @@ #include <linux/swap.h> #include <linux/bio.h> #include <linux/swapops.h> -#include <linux/buffer_head.h> #include <linux/writeback.h> -#include <linux/frontswap.h> -#include <linux/aio.h> #include <linux/blkdev.h> -#include <asm/pgtable.h> - -static struct bio *get_swap_bio(gfp_t gfp_flags, - struct page *page, bio_end_io_t end_io) -{ - struct bio *bio; - - bio = bio_alloc(gfp_flags, 1); - if (bio) { - bio->bi_sector = map_swap_page(page, &bio->bi_bdev); - bio->bi_sector <<= PAGE_SHIFT - 9; - bio->bi_io_vec[0].bv_page = page; - bio->bi_io_vec[0].bv_len = PAGE_SIZE; - bio->bi_io_vec[0].bv_offset = 0; - bio->bi_vcnt = 1; - bio->bi_size = PAGE_SIZE; - bio->bi_end_io = end_io; - } - return bio; -} - -void end_swap_bio_write(struct bio *bio, int err) +#include <linux/psi.h> +#include <linux/uio.h> +#include <linux/sched/task.h> +#include <linux/delayacct.h> +#include <linux/zswap.h> +#include "swap.h" + +static void __end_swap_bio_write(struct bio *bio) { - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct page *page = bio->bi_io_vec[0].bv_page; + struct folio *folio = bio_first_folio_all(bio); - if (!uptodate) { - SetPageError(page); + if (bio->bi_status) { /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * - * Also clear PG_reclaim to avoid rotate_reclaimable_page() + * Also clear PG_reclaim to avoid folio_rotate_reclaimable() */ - set_page_dirty(page); - printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", - imajor(bio->bi_bdev->bd_inode), - iminor(bio->bi_bdev->bd_inode), - (unsigned long long)bio->bi_sector); - ClearPageReclaim(page); + folio_mark_dirty(folio); + pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), + (unsigned long long)bio->bi_iter.bi_sector); + folio_clear_reclaim(folio); } - end_page_writeback(page); - bio_put(bio); + folio_end_writeback(folio); } -void end_swap_bio_read(struct bio *bio, int err) +static void end_swap_bio_write(struct bio *bio) { - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct page *page = bio->bi_io_vec[0].bv_page; - - if (!uptodate) { - SetPageError(page); - ClearPageUptodate(page); - printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", - imajor(bio->bi_bdev->bd_inode), - iminor(bio->bi_bdev->bd_inode), - (unsigned long long)bio->bi_sector); - goto out; - } + __end_swap_bio_write(bio); + bio_put(bio); +} - SetPageUptodate(page); +static void __end_swap_bio_read(struct bio *bio) +{ + struct folio *folio = bio_first_folio_all(bio); - /* - * There is no guarantee that the page is in swap cache - the software - * suspend code (at least) uses end_swap_bio_read() against a non- - * swapcache page. So we must check PG_swapcache before proceeding with - * this optimization. - */ - if (likely(PageSwapCache(page))) { - struct swap_info_struct *sis; - - sis = page_swap_info(page); - if (sis->flags & SWP_BLKDEV) { - /* - * The swap subsystem performs lazy swap slot freeing, - * expecting that the page will be swapped out again. - * So we can avoid an unnecessary write if the page - * isn't redirtied. - * This is good for real swap storage because we can - * reduce unnecessary I/O and enhance wear-leveling - * if an SSD is used as the as swap device. - * But if in-memory swap device (eg zram) is used, - * this causes a duplicated copy between uncompressed - * data in VM-owned memory and compressed data in - * zram-owned memory. So let's free zram-owned memory - * and make the VM-owned decompressed page *dirty*, - * so the page should be swapped out somewhere again if - * we again wish to reclaim it. - */ - struct gendisk *disk = sis->bdev->bd_disk; - if (disk->fops->swap_slot_free_notify) { - swp_entry_t entry; - unsigned long offset; - - entry.val = page_private(page); - offset = swp_offset(entry); - - SetPageDirty(page); - disk->fops->swap_slot_free_notify(sis->bdev, - offset); - } - } + if (bio->bi_status) { + pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), + (unsigned long long)bio->bi_iter.bi_sector); + } else { + folio_mark_uptodate(folio); } + folio_unlock(folio); +} -out: - unlock_page(page); +static void end_swap_bio_read(struct bio *bio) +{ + __end_swap_bio_read(bio); bio_put(bio); } @@ -153,7 +95,7 @@ int generic_swapfile_activate(struct swap_info_struct *sis, blocks_per_page = PAGE_SIZE >> blkbits; /* - * Map all the blocks into the extent list. This code doesn't try + * Map all the blocks into the extent tree. This code doesn't try * to be very smart. */ probe_block = 0; @@ -164,8 +106,11 @@ int generic_swapfile_activate(struct swap_info_struct *sis, unsigned block_in_page; sector_t first_block; - first_block = bmap(inode, probe_block); - if (first_block == 0) + cond_resched(); + + first_block = probe_block; + ret = bmap(inode, &first_block); + if (ret || !first_block) goto bad_bmap; /* @@ -180,9 +125,11 @@ int generic_swapfile_activate(struct swap_info_struct *sis, block_in_page++) { sector_t block; - block = bmap(inode, probe_block + block_in_page); - if (block == 0) + block = probe_block + block_in_page; + ret = bmap(inode, &block); + if (ret || !block) goto bad_bmap; + if (block != first_block + block_in_page) { /* Discontiguity */ probe_block++; @@ -216,149 +163,507 @@ reprobe: page_no = 1; /* force Empty message */ sis->max = page_no; sis->pages = page_no - 1; - sis->highest_bit = page_no - 1; out: return ret; bad_bmap: - printk(KERN_ERR "swapon: swapfile has holes\n"); + pr_err("swapon: swapfile has holes\n"); ret = -EINVAL; goto out; } +static bool is_folio_zero_filled(struct folio *folio) +{ + unsigned int pos, last_pos; + unsigned long *data; + unsigned int i; + + last_pos = PAGE_SIZE / sizeof(*data) - 1; + for (i = 0; i < folio_nr_pages(folio); i++) { + data = kmap_local_folio(folio, i * PAGE_SIZE); + /* + * Check last word first, incase the page is zero-filled at + * the start and has non-zero data at the end, which is common + * in real-world workloads. + */ + if (data[last_pos]) { + kunmap_local(data); + return false; + } + for (pos = 0; pos < last_pos; pos++) { + if (data[pos]) { + kunmap_local(data); + return false; + } + } + kunmap_local(data); + } + + return true; +} + +static void swap_zeromap_folio_set(struct folio *folio) +{ + struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); + struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); + int nr_pages = folio_nr_pages(folio); + swp_entry_t entry; + unsigned int i; + + for (i = 0; i < folio_nr_pages(folio); i++) { + entry = page_swap_entry(folio_page(folio, i)); + set_bit(swp_offset(entry), sis->zeromap); + } + + count_vm_events(SWPOUT_ZERO, nr_pages); + if (objcg) { + count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); + obj_cgroup_put(objcg); + } +} + +static void swap_zeromap_folio_clear(struct folio *folio) +{ + struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); + swp_entry_t entry; + unsigned int i; + + for (i = 0; i < folio_nr_pages(folio); i++) { + entry = page_swap_entry(folio_page(folio, i)); + clear_bit(swp_offset(entry), sis->zeromap); + } +} + /* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ -int swap_writepage(struct page *page, struct writeback_control *wbc) +int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug) { int ret = 0; - if (try_to_free_swap(page)) { - unlock_page(page); - goto out; + if (folio_free_swap(folio)) + goto out_unlock; + + /* + * Arch code may have to preserve more data than just the page + * contents, e.g. memory tags. + */ + ret = arch_prepare_to_swap(folio); + if (ret) { + folio_mark_dirty(folio); + goto out_unlock; } - if (frontswap_store(page) == 0) { - set_page_writeback(page); - unlock_page(page); - end_page_writeback(page); - goto out; + + /* + * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages. + * The bits in zeromap are protected by the locked swapcache folio + * and atomic updates are used to protect against read-modify-write + * corruption due to other zero swap entries seeing concurrent updates. + */ + if (is_folio_zero_filled(folio)) { + swap_zeromap_folio_set(folio); + goto out_unlock; } - ret = __swap_writepage(page, wbc, end_swap_bio_write); -out: + + /* + * Clear bits this folio occupies in the zeromap to prevent zero data + * being read in from any previous zero writes that occupied the same + * swap entries. + */ + swap_zeromap_folio_clear(folio); + + if (zswap_store(folio)) { + count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); + goto out_unlock; + } + if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) { + folio_mark_dirty(folio); + return AOP_WRITEPAGE_ACTIVATE; + } + + __swap_writepage(folio, swap_plug); + return 0; +out_unlock: + folio_unlock(folio); return ret; } -int __swap_writepage(struct page *page, struct writeback_control *wbc, - void (*end_write_func)(struct bio *, int)) +static inline void count_swpout_vm_event(struct folio *folio) { - struct bio *bio; - int ret = 0, rw = WRITE; - struct swap_info_struct *sis = page_swap_info(page); - - if (sis->flags & SWP_FILE) { - struct kiocb kiocb; - struct file *swap_file = sis->swap_file; - struct address_space *mapping = swap_file->f_mapping; - struct iovec iov = { - .iov_base = kmap(page), - .iov_len = PAGE_SIZE, - }; - - init_sync_kiocb(&kiocb, swap_file); - kiocb.ki_pos = page_file_offset(page); - kiocb.ki_left = PAGE_SIZE; - kiocb.ki_nbytes = PAGE_SIZE; - - set_page_writeback(page); - unlock_page(page); - ret = mapping->a_ops->direct_IO(KERNEL_WRITE, - &kiocb, &iov, - kiocb.ki_pos, 1); - kunmap(page); - if (ret == PAGE_SIZE) { - count_vm_event(PSWPOUT); - ret = 0; - } else { - /* - * In the case of swap-over-nfs, this can be a - * temporary failure if the system has limited - * memory for allocating transmit buffers. - * Mark the page dirty and avoid - * rotate_reclaimable_page but rate-limit the - * messages but do not flag PageError like - * the normal direct-to-bio case as it could - * be temporary. - */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (unlikely(folio_test_pmd_mappable(folio))) { + count_memcg_folio_events(folio, THP_SWPOUT, 1); + count_vm_event(THP_SWPOUT); + } +#endif + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); + count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio)); + count_vm_events(PSWPOUT, folio_nr_pages(folio)); +} + +#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) +{ + struct cgroup_subsys_state *css; + struct mem_cgroup *memcg; + + memcg = folio_memcg(folio); + if (!memcg) + return; + + rcu_read_lock(); + css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); + bio_associate_blkg_from_css(bio, css); + rcu_read_unlock(); +} +#else +#define bio_associate_blkg_from_page(bio, folio) do { } while (0) +#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ + +struct swap_iocb { + struct kiocb iocb; + struct bio_vec bvec[SWAP_CLUSTER_MAX]; + int pages; + int len; +}; +static mempool_t *sio_pool; + +int sio_pool_init(void) +{ + if (!sio_pool) { + mempool_t *pool = mempool_create_kmalloc_pool( + SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); + if (cmpxchg(&sio_pool, NULL, pool)) + mempool_destroy(pool); + } + if (!sio_pool) + return -ENOMEM; + return 0; +} + +static void sio_write_complete(struct kiocb *iocb, long ret) +{ + struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); + struct page *page = sio->bvec[0].bv_page; + int p; + + if (ret != sio->len) { + /* + * In the case of swap-over-nfs, this can be a + * temporary failure if the system has limited + * memory for allocating transmit buffers. + * Mark the page dirty and avoid + * folio_rotate_reclaimable but rate-limit the + * messages. + */ + pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", + ret, swap_dev_pos(page_swap_entry(page))); + for (p = 0; p < sio->pages; p++) { + page = sio->bvec[p].bv_page; set_page_dirty(page); ClearPageReclaim(page); - pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", - page_file_offset(page)); } - end_page_writeback(page); - return ret; } - bio = get_swap_bio(GFP_NOIO, page, end_write_func); - if (bio == NULL) { - set_page_dirty(page); - unlock_page(page); - ret = -ENOMEM; - goto out; + for (p = 0; p < sio->pages; p++) + end_page_writeback(sio->bvec[p].bv_page); + + mempool_free(sio, sio_pool); +} + +static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug) +{ + struct swap_iocb *sio = swap_plug ? *swap_plug : NULL; + struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); + struct file *swap_file = sis->swap_file; + loff_t pos = swap_dev_pos(folio->swap); + + count_swpout_vm_event(folio); + folio_start_writeback(folio); + folio_unlock(folio); + if (sio) { + if (sio->iocb.ki_filp != swap_file || + sio->iocb.ki_pos + sio->len != pos) { + swap_write_unplug(sio); + sio = NULL; + } } - if (wbc->sync_mode == WB_SYNC_ALL) - rw |= REQ_SYNC; - count_vm_event(PSWPOUT); - set_page_writeback(page); - unlock_page(page); - submit_bio(rw, bio); -out: - return ret; + if (!sio) { + sio = mempool_alloc(sio_pool, GFP_NOIO); + init_sync_kiocb(&sio->iocb, swap_file); + sio->iocb.ki_complete = sio_write_complete; + sio->iocb.ki_pos = pos; + sio->pages = 0; + sio->len = 0; + } + bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); + sio->len += folio_size(folio); + sio->pages += 1; + if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) { + swap_write_unplug(sio); + sio = NULL; + } + if (swap_plug) + *swap_plug = sio; +} + +static void swap_writepage_bdev_sync(struct folio *folio, + struct swap_info_struct *sis) +{ + struct bio_vec bv; + struct bio bio; + + bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP); + bio.bi_iter.bi_sector = swap_folio_sector(folio); + bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); + + bio_associate_blkg_from_page(&bio, folio); + count_swpout_vm_event(folio); + + folio_start_writeback(folio); + folio_unlock(folio); + + submit_bio_wait(&bio); + __end_swap_bio_write(&bio); } -int swap_readpage(struct page *page) +static void swap_writepage_bdev_async(struct folio *folio, + struct swap_info_struct *sis) { struct bio *bio; - int ret = 0; - struct swap_info_struct *sis = page_swap_info(page); - - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageUptodate(page)); - if (frontswap_load(page) == 0) { - SetPageUptodate(page); - unlock_page(page); - goto out; + + bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO); + bio->bi_iter.bi_sector = swap_folio_sector(folio); + bio->bi_end_io = end_swap_bio_write; + bio_add_folio_nofail(bio, folio, folio_size(folio), 0); + + bio_associate_blkg_from_page(bio, folio); + count_swpout_vm_event(folio); + folio_start_writeback(folio); + folio_unlock(folio); + submit_bio(bio); +} + +void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug) +{ + struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); + + VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); + /* + * ->flags can be updated non-atomicially (scan_swap_map_slots), + * but that will never affect SWP_FS_OPS, so the data_race + * is safe. + */ + if (data_race(sis->flags & SWP_FS_OPS)) + swap_writepage_fs(folio, swap_plug); + /* + * ->flags can be updated non-atomicially (scan_swap_map_slots), + * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race + * is safe. + */ + else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO)) + swap_writepage_bdev_sync(folio, sis); + else + swap_writepage_bdev_async(folio, sis); +} + +void swap_write_unplug(struct swap_iocb *sio) +{ + struct iov_iter from; + struct address_space *mapping = sio->iocb.ki_filp->f_mapping; + int ret; + + iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); + ret = mapping->a_ops->swap_rw(&sio->iocb, &from); + if (ret != -EIOCBQUEUED) + sio_write_complete(&sio->iocb, ret); +} + +static void sio_read_complete(struct kiocb *iocb, long ret) +{ + struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); + int p; + + if (ret == sio->len) { + for (p = 0; p < sio->pages; p++) { + struct folio *folio = page_folio(sio->bvec[p].bv_page); + + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); + count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); + folio_mark_uptodate(folio); + folio_unlock(folio); + } + count_vm_events(PSWPIN, sio->pages); + } else { + for (p = 0; p < sio->pages; p++) { + struct folio *folio = page_folio(sio->bvec[p].bv_page); + + folio_unlock(folio); + } + pr_alert_ratelimited("Read-error on swap-device\n"); } + mempool_free(sio, sio_pool); +} - if (sis->flags & SWP_FILE) { - struct file *swap_file = sis->swap_file; - struct address_space *mapping = swap_file->f_mapping; +static bool swap_read_folio_zeromap(struct folio *folio) +{ + int nr_pages = folio_nr_pages(folio); + struct obj_cgroup *objcg; + bool is_zeromap; - ret = mapping->a_ops->readpage(swap_file, page); - if (!ret) - count_vm_event(PSWPIN); - return ret; + /* + * Swapping in a large folio that is partially in the zeromap is not + * currently handled. Return true without marking the folio uptodate so + * that an IO error is emitted (e.g. do_swap_page() will sigbus). + */ + if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, + &is_zeromap) != nr_pages)) + return true; + + if (!is_zeromap) + return false; + + objcg = get_obj_cgroup_from_folio(folio); + count_vm_events(SWPIN_ZERO, nr_pages); + if (objcg) { + count_objcg_events(objcg, SWPIN_ZERO, nr_pages); + obj_cgroup_put(objcg); } - bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); - if (bio == NULL) { - unlock_page(page); - ret = -ENOMEM; - goto out; + folio_zero_range(folio, 0, folio_size(folio)); + folio_mark_uptodate(folio); + return true; +} + +static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug) +{ + struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); + struct swap_iocb *sio = NULL; + loff_t pos = swap_dev_pos(folio->swap); + + if (plug) + sio = *plug; + if (sio) { + if (sio->iocb.ki_filp != sis->swap_file || + sio->iocb.ki_pos + sio->len != pos) { + swap_read_unplug(sio); + sio = NULL; + } } - count_vm_event(PSWPIN); - submit_bio(READ, bio); -out: - return ret; + if (!sio) { + sio = mempool_alloc(sio_pool, GFP_KERNEL); + init_sync_kiocb(&sio->iocb, sis->swap_file); + sio->iocb.ki_pos = pos; + sio->iocb.ki_complete = sio_read_complete; + sio->pages = 0; + sio->len = 0; + } + bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); + sio->len += folio_size(folio); + sio->pages += 1; + if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { + swap_read_unplug(sio); + sio = NULL; + } + if (plug) + *plug = sio; +} + +static void swap_read_folio_bdev_sync(struct folio *folio, + struct swap_info_struct *sis) +{ + struct bio_vec bv; + struct bio bio; + + bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); + bio.bi_iter.bi_sector = swap_folio_sector(folio); + bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); + /* + * Keep this task valid during swap readpage because the oom killer may + * attempt to access it in the page fault retry time check. + */ + get_task_struct(current); + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); + count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); + count_vm_events(PSWPIN, folio_nr_pages(folio)); + submit_bio_wait(&bio); + __end_swap_bio_read(&bio); + put_task_struct(current); +} + +static void swap_read_folio_bdev_async(struct folio *folio, + struct swap_info_struct *sis) +{ + struct bio *bio; + + bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); + bio->bi_iter.bi_sector = swap_folio_sector(folio); + bio->bi_end_io = end_swap_bio_read; + bio_add_folio_nofail(bio, folio, folio_size(folio), 0); + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); + count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); + count_vm_events(PSWPIN, folio_nr_pages(folio)); + submit_bio(bio); } -int swap_set_page_dirty(struct page *page) +void swap_read_folio(struct folio *folio, struct swap_iocb **plug) { - struct swap_info_struct *sis = page_swap_info(page); + struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); + bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; + bool workingset = folio_test_workingset(folio); + unsigned long pflags; + bool in_thrashing; + + VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); + + /* + * Count submission time as memory stall and delay. When the device + * is congested, or the submitting cgroup IO-throttled, submission + * can be a significant part of overall IO time. + */ + if (workingset) { + delayacct_thrashing_start(&in_thrashing); + psi_memstall_enter(&pflags); + } + delayacct_swapin_start(); + + if (swap_read_folio_zeromap(folio)) { + folio_unlock(folio); + goto finish; + } - if (sis->flags & SWP_FILE) { - struct address_space *mapping = sis->swap_file->f_mapping; - return mapping->a_ops->set_page_dirty(page); + if (zswap_load(folio) != -ENOENT) + goto finish; + + /* We have to read from slower devices. Increase zswap protection. */ + zswap_folio_swapin(folio); + + if (data_race(sis->flags & SWP_FS_OPS)) { + swap_read_folio_fs(folio, plug); + } else if (synchronous) { + swap_read_folio_bdev_sync(folio, sis); } else { - return __set_page_dirty_no_writeback(page); + swap_read_folio_bdev_async(folio, sis); } + +finish: + if (workingset) { + delayacct_thrashing_end(&in_thrashing); + psi_memstall_leave(&pflags); + } + delayacct_swapin_end(); +} + +void __swap_read_unplug(struct swap_iocb *sio) +{ + struct iov_iter from; + struct address_space *mapping = sio->iocb.ki_filp->f_mapping; + int ret; + + iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); + ret = mapping->a_ops->swap_rw(&sio->iocb, &from); + if (ret != -EIOCBQUEUED) + sio_read_complete(&sio->iocb, ret); } |
