summaryrefslogtreecommitdiff
path: root/mm/page_io.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-01-25 14:34:36 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:34 -0800
commit3222d8c2a7f888bf38b845b125e9470b12108a4d (patch)
treec6a764721e2ae8444f8c4d4a2d784226b7c19d9e /mm/page_io.c
parent05cda97ecb7046f4192a921741aae33b300dd628 (diff)
block: remove ->rw_page
The ->rw_page method is a special purpose bypass of the usual bio handling path that is limited to single-page reads and writes and synchronous which causes a lot of extra code in the drivers, callers and the block layer. The only remaining user is the MM swap code. Switch that swap code to simply submit a single-vec on-stack bio an synchronously wait on it based on a newly added QUEUE_FLAG_SYNCHRONOUS flag set by the drivers that currently implement ->rw_page instead. While this touches one extra cache line and executes extra code, it simplifies the block layer and drivers and ensures that all feastures are properly supported by all drivers, e.g. right now ->rw_page bypassed cgroup writeback entirely. [akpm@linux-foundation.org: fix comment typo, per Dan] Link: https://lkml.kernel.org/r/20230125133436.447864-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Keith Busch <kbusch@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_io.c')
-rw-r--r--mm/page_io.c53
1 files changed, 32 insertions, 21 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index 0a1a3b831344..a805117f7fd7 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -27,7 +27,7 @@
#include <linux/delayacct.h>
#include "swap.h"
-static void end_swap_bio_write(struct bio *bio)
+static void __end_swap_bio_write(struct bio *bio)
{
struct page *page = bio_first_page_all(bio);
@@ -48,6 +48,11 @@ static void end_swap_bio_write(struct bio *bio)
ClearPageReclaim(page);
}
end_page_writeback(page);
+}
+
+static void end_swap_bio_write(struct bio *bio)
+{
+ __end_swap_bio_write(bio);
bio_put(bio);
}
@@ -326,15 +331,31 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
*wbc->swap_plug = sio;
}
-static void swap_writepage_bdev(struct page *page,
+static void swap_writepage_bdev_sync(struct page *page,
struct writeback_control *wbc, struct swap_info_struct *sis)
{
- struct bio *bio;
+ struct bio_vec bv;
+ struct bio bio;
- if (!bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc)) {
- count_swpout_vm_event(page);
- return;
- }
+ bio_init(&bio, sis->bdev, &bv, 1,
+ REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
+ bio.bi_iter.bi_sector = swap_page_sector(page);
+ bio_add_page(&bio, page, thp_size(page), 0);
+
+ bio_associate_blkg_from_page(&bio, page);
+ count_swpout_vm_event(page);
+
+ set_page_writeback(page);
+ unlock_page(page);
+
+ submit_bio_wait(&bio);
+ __end_swap_bio_write(&bio);
+}
+
+static void swap_writepage_bdev_async(struct page *page,
+ struct writeback_control *wbc, struct swap_info_struct *sis)
+{
+ struct bio *bio;
bio = bio_alloc(sis->bdev, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
@@ -362,8 +383,10 @@ void __swap_writepage(struct page *page, struct writeback_control *wbc)
*/
if (data_race(sis->flags & SWP_FS_OPS))
swap_writepage_fs(page, wbc);
+ else if (sis->flags & SWP_SYNCHRONOUS_IO)
+ swap_writepage_bdev_sync(page, wbc, sis);
else
- swap_writepage_bdev(page, wbc, sis);
+ swap_writepage_bdev_async(page, wbc, sis);
}
void swap_write_unplug(struct swap_iocb *sio)
@@ -447,12 +470,6 @@ static void swap_readpage_bdev_sync(struct page *page,
struct bio_vec bv;
struct bio bio;
- if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
- !bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
- count_vm_event(PSWPIN);
- return;
- }
-
bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = swap_page_sector(page);
bio_add_page(&bio, page, thp_size(page), 0);
@@ -472,12 +489,6 @@ static void swap_readpage_bdev_async(struct page *page,
{
struct bio *bio;
- if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
- !bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
- count_vm_event(PSWPIN);
- return;
- }
-
bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_end_io = end_swap_bio_read;
@@ -513,7 +524,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
unlock_page(page);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
swap_readpage_fs(page, plug);
- } else if (synchronous) {
+ } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
swap_readpage_bdev_sync(page, sis);
} else {
swap_readpage_bdev_async(page, sis);