summaryrefslogtreecommitdiff
path: root/drivers/block/zram
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-08-05 07:55:37 +0200
committerJens Axboe <axboe@kernel.dk>2023-08-05 16:13:15 -0600
commit95848dcb9d676738411a8ff70a9704039f1b3982 (patch)
tree685aae478a33f338677ec4e06c073173d1273c4b /drivers/block/zram
parenta592ab6171bd943ce3b988bd271c79fe275a7bec (diff)
zram: take device and not only bvec offset into account
Commit af8b04c63708 ("zram: simplify bvec iteration in __zram_make_request") changed the bio iteration in zram to rely on the implicit capping to page boundaries in bio_for_each_segment. But it failed to care for the fact zram not only care about the page alignment of the bio payload, but also the page alignment into the device. For buffered I/O and swap those are the same, but for direct I/O or kernel internal I/O like XFS log buffer writes they can differ. Fix this by open coding bio_for_each_segment and limiting the bvec len so that it never crosses over a page alignment boundary in the device in addition to the payload boundary already taken care of by bio_iter_iovec. Cc: stable@vger.kernel.org Fixes: af8b04c63708 ("zram: simplify bvec iteration in __zram_make_request") Reported-by: Dusty Mabe <dusty@dustymabe.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org> Link: https://lore.kernel.org/r/20230805055537.147835-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/zram')
-rw-r--r--drivers/block/zram/zram_drv.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 5676e6dd5b16..06673c6ca255 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned long start_time;
+ unsigned long start_time = bio_start_io_acct(bio);
+ struct bvec_iter iter = bio->bi_iter;
- start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bv, bio, iter) {
+ do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
+ struct bio_vec bv = bio_iter_iovec(bio, iter);
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
@@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
- }
+
+ bio_advance_iter_single(bio, &iter, bv.bv_len);
+ } while (iter.bi_size);
+
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned long start_time;
+ unsigned long start_time = bio_start_io_acct(bio);
+ struct bvec_iter iter = bio->bi_iter;
- start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bv, bio, iter) {
+ do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
+ struct bio_vec bv = bio_iter_iovec(bio, iter);
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
@@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
- }
+
+ bio_advance_iter_single(bio, &iter, bv.bv_len);
+ } while (iter.bi_size);
+
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}