summaryrefslogtreecommitdiff
path: root/mm/page_io.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2022-05-09 18:20:48 -0700
committerakpm <akpm@linux-foundation.org>2022-05-09 18:20:48 -0700
commite1209d3a7a67c281260ba9989621060ba7328b8c (patch)
tree267b9e527bf7d26db3078fd32ce60447b8e45108 /mm/page_io.c
parentd791ea676b66489ef6dabd04cd655f5c77426e40 (diff)
mm: introduce ->swap_rw and use it for reads from SWP_FS_OPS swap-space
swap currently uses ->readpage to read swap pages. This can only request one page at a time from the filesystem, which is not most efficient. swap uses ->direct_IO for writes which while this is adequate is an inappropriate over-loading. ->direct_IO may need to had handle allocate space for holes or other details that are not relevant for swap. So this patch introduces a new address_space operation: ->swap_rw. In this patch it is used for reads, and a subsequent patch will switch writes to use it. No filesystem yet supports ->swap_rw, but that is not a problem because no filesystem actually works with filesystem-based swap. Only two filesystems set SWP_FS_OPS: - cifs sets the flag, but ->direct_IO always fails so swap cannot work. - nfs sets the flag, but ->direct_IO calls generic_write_checks() which has failed on swap files for several releases. To ensure that a NULL ->swap_rw isn't called, ->activate_swap() for both NFS and cifs are changed to fail if ->swap_rw is not set. This can be removed if/when the function is added. Future patches will restore swap-over-NFS functionality. To submit an async read with ->swap_rw() we need to allocate a structure to hold the kiocb and other details. swap_readpage() cannot handle transient failure, so we create a mempool to provide the structures. Link: https://lkml.kernel.org/r/164859778125.29473.13430559328221330589.stgit@noble.brown Signed-off-by: NeilBrown <neilb@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: David Howells <dhowells@redhat.com> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_io.c')
-rw-r--r--mm/page_io.c68
1 files changed, 62 insertions, 6 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index 77fa0fb263f4..2724d6389104 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -235,6 +235,25 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
#define bio_associate_blkg_from_page(bio, page) do { } while (0)
#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
+struct swap_iocb {
+ struct kiocb iocb;
+ struct bio_vec bvec;
+};
+static mempool_t *sio_pool;
+
+int sio_pool_init(void)
+{
+ if (!sio_pool) {
+ mempool_t *pool = mempool_create_kmalloc_pool(
+ SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
+ if (cmpxchg(&sio_pool, NULL, pool))
+ mempool_destroy(pool);
+ }
+ if (!sio_pool)
+ return -ENOMEM;
+ return 0;
+}
+
int __swap_writepage(struct page *page, struct writeback_control *wbc,
bio_end_io_t end_write_func)
{
@@ -306,6 +325,48 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
return 0;
}
+static void sio_read_complete(struct kiocb *iocb, long ret)
+{
+ struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
+ struct page *page = sio->bvec.bv_page;
+
+ if (ret != 0 && ret != PAGE_SIZE) {
+ SetPageError(page);
+ ClearPageUptodate(page);
+ pr_alert_ratelimited("Read-error on swap-device\n");
+ } else {
+ SetPageUptodate(page);
+ count_vm_event(PSWPIN);
+ }
+ unlock_page(page);
+ mempool_free(sio, sio_pool);
+}
+
+static int swap_readpage_fs(struct page *page)
+{
+ struct swap_info_struct *sis = page_swap_info(page);
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+ struct iov_iter from;
+ struct swap_iocb *sio;
+ loff_t pos = page_file_offset(page);
+ int ret;
+
+ sio = mempool_alloc(sio_pool, GFP_KERNEL);
+ init_sync_kiocb(&sio->iocb, swap_file);
+ sio->iocb.ki_pos = pos;
+ sio->iocb.ki_complete = sio_read_complete;
+ sio->bvec.bv_page = page;
+ sio->bvec.bv_len = PAGE_SIZE;
+ sio->bvec.bv_offset = 0;
+
+ iov_iter_bvec(&from, READ, &sio->bvec, 1, PAGE_SIZE);
+ ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
+ if (ret != -EIOCBQUEUED)
+ sio_read_complete(&sio->iocb, ret);
+ return ret;
+}
+
int swap_readpage(struct page *page, bool synchronous)
{
struct bio *bio;
@@ -334,12 +395,7 @@ int swap_readpage(struct page *page, bool synchronous)
}
if (data_race(sis->flags & SWP_FS_OPS)) {
- struct file *swap_file = sis->swap_file;
- struct address_space *mapping = swap_file->f_mapping;
-
- ret = mapping->a_ops->readpage(swap_file, page);
- if (!ret)
- count_vm_event(PSWPIN);
+ ret = swap_readpage_fs(page);
goto out;
}