summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2025-01-20 16:29:49 +0800
committerMikulas Patocka <mpatocka@redhat.com>2025-01-21 22:02:12 +0100
commit9fdbbdbbc92b1474a87b89f8b964892a63734492 (patch)
tree25859d3b04f1539a2164a8a926d62b5ed412a335
parent2f8c28d0d97313edc36d62cbd505019f36111fd5 (diff)
dm-crypt: don't update io->sector after kcryptd_crypt_write_io_submit()
The updates of io->sector are the leftovers when dm-crypt allocated pages for partial write request. However, since commit cf2f1abfbd0db ("dm crypt: don't allocate pages for a partial request"), there is no partial request anymore. After the introduction of write request rb-tree, the updates of io->sectors may interfere the insertion procedure, because ->sectors of these write requests which have already been added in the rb-tree may be changed during the insertion of new write request. Fix it by removing these buggy updates of io->sectors. Considering these updates only effect the write request rb-tree, the commit which introduces the write request rb-tree is used as the fix tag. Fixes: b3c5fd305249 ("dm crypt: sort writes") Cc: stable@vger.kernel.org Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
-rw-r--r--drivers/md/dm-crypt.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8a4c24d4ee02..605859ef01b5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2090,7 +2090,6 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
struct crypt_config *cc = io->cc;
struct convert_context *ctx = &io->ctx;
int crypt_finished;
- sector_t sector = io->sector;
blk_status_t r;
wait_for_completion(&ctx->restart);
@@ -2107,10 +2106,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
}
/* Encryption was already finished, submit io now */
- if (crypt_finished) {
+ if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
- io->sector = sector;
- }
crypt_dec_pending(io);
}
@@ -2121,14 +2118,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct convert_context *ctx = &io->ctx;
struct bio *clone;
int crypt_finished;
- sector_t sector = io->sector;
blk_status_t r;
/*
* Prevent io from disappearing until this function completes.
*/
crypt_inc_pending(io);
- crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
+ crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
if (unlikely(!clone)) {
@@ -2145,8 +2141,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.iter_in = clone->bi_iter;
}
- sector += bio_sectors(clone);
-
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
@@ -2170,10 +2164,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
/* Encryption was already finished, submit io now */
- if (crypt_finished) {
+ if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
- io->sector = sector;
- }
dec:
crypt_dec_pending(io);