summaryrefslogtreecommitdiff
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c64
1 files changed, 48 insertions, 16 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9dfdb63220d7..5ef43231fe77 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -253,17 +253,35 @@ MODULE_PARM_DESC(max_read_size, "Maximum size of a read request");
static unsigned int max_write_size = 0;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
-static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
+
+static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
{
+ struct crypt_config *cc = ti->private;
unsigned val, sector_align;
- val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size);
- if (likely(!val))
- val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
- if (wrt || cc->used_tag_size) {
- if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
- val = BIO_MAX_VECS << PAGE_SHIFT;
- }
- sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size);
+ bool wrt = op_is_write(bio_op(bio));
+
+ if (wrt) {
+ /*
+ * For zoned devices, splitting write operations creates the
+ * risk of deadlocking queue freeze operations with zone write
+ * plugging BIO work when the reminder of a split BIO is
+ * issued. So always allow the entire BIO to proceed.
+ */
+ if (ti->emulate_zone_append)
+ return bio_sectors(bio);
+
+ val = min_not_zero(READ_ONCE(max_write_size),
+ DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
+ } else {
+ val = min_not_zero(READ_ONCE(max_read_size),
+ DM_CRYPT_DEFAULT_MAX_READ_SIZE);
+ }
+
+ if (wrt || cc->used_tag_size)
+ val = min(val, BIO_MAX_VECS << PAGE_SHIFT);
+
+ sector_align = max(bdev_logical_block_size(cc->dev->bdev),
+ (unsigned)cc->sector_size);
val = round_down(val, sector_align);
if (unlikely(!val))
val = sector_align;
@@ -517,7 +535,10 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
- struct md5_state md5state;
+ union {
+ struct md5_state md5state;
+ u8 state[CRYPTO_MD5_STATESIZE];
+ } u;
__le32 buf[4];
int i, r;
@@ -548,13 +569,13 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
return r;
/* No MD5 padding here */
- r = crypto_shash_export(desc, &md5state);
+ r = crypto_shash_export(desc, &u.md5state);
if (r)
return r;
for (i = 0; i < MD5_HASH_WORDS; i++)
- __cpu_to_le32s(&md5state.hash[i]);
- memcpy(iv, &md5state.hash, cc->iv_size);
+ __cpu_to_le32s(&u.md5state.hash[i]);
+ memcpy(iv, &u.md5state.hash, cc->iv_size);
return 0;
}
@@ -1189,11 +1210,11 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL;
}
- if (bi->tuple_size < cc->used_tag_size) {
+ if (bi->metadata_size < cc->used_tag_size) {
ti->error = "Integrity profile tag size mismatch.";
return -EINVAL;
}
- cc->tuple_size = bi->tuple_size;
+ cc->tuple_size = bi->metadata_size;
if (1 << bi->interval_exp != cc->sector_size) {
ti->error = "Integrity profile sector size mismatch.";
return -EINVAL;
@@ -3493,7 +3514,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
/*
* Check if bio is too large, split as needed.
*/
- max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
+ max_sectors = get_max_request_sectors(ti, bio);
if (unlikely(bio_sectors(bio) > max_sectors))
dm_accept_partial_bio(bio, max_sectors);
@@ -3730,6 +3751,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
max_t(unsigned int, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
limits->dma_alignment = limits->logical_block_size - 1;
+
+ /*
+ * For zoned dm-crypt targets, there will be no internal splitting of
+ * write BIOs to avoid exceeding BIO_MAX_VECS vectors per BIO. But
+ * without respecting this limit, crypt_alloc_buffer() will trigger a
+ * BUG(). Avoid this by forcing DM core to split write BIOs to this
+ * limit.
+ */
+ if (ti->emulate_zone_append)
+ limits->max_hw_sectors = min(limits->max_hw_sectors,
+ BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
}
static struct target_type crypt_target = {