diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
| -rw-r--r-- | drivers/md/dm-crypt.c | 685 |
1 files changed, 399 insertions, 286 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2653516bcdef..5ef43231fe77 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> @@ -16,6 +17,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> +#include <linux/crc32.h> #include <linux/mempool.h> #include <linux/slab.h> #include <linux/crypto.h> @@ -27,13 +29,13 @@ #include <linux/rbtree.h> #include <linux/ctype.h> #include <asm/page.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/hash.h> #include <crypto/md5.h> -#include <crypto/algapi.h> #include <crypto/skcipher.h> #include <crypto/aead.h> #include <crypto/authenc.h> +#include <crypto/utils.h> #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ #include <linux/key-type.h> #include <keys/user-type.h> @@ -46,21 +48,26 @@ #define DM_MSG_PREFIX "crypt" +static DEFINE_IDA(workqueue_ida); + /* * context holding the current state of a multi-part conversion */ struct convert_context { struct completion restart; struct bio *bio_in; - struct bio *bio_out; struct bvec_iter iter_in; + struct bio *bio_out; struct bvec_iter iter_out; - u64 cc_sector; atomic_t cc_pending; + unsigned int tag_offset; + u64 cc_sector; union { struct skcipher_request *req; struct aead_request *req_aead; } r; + bool aead_recheck; + bool aead_failed; }; @@ -71,9 +78,9 @@ struct dm_crypt_io { struct crypt_config *cc; struct bio *base_bio; u8 *integrity_metadata; - bool integrity_metadata_from_pool; + bool integrity_metadata_from_pool:1; + struct work_struct work; - struct tasklet_struct tasklet; struct convert_context ctx; @@ -81,6 +88,8 @@ struct dm_crypt_io { blk_status_t error; sector_t sector; + struct bvec_iter saved_bi_iter; + struct rb_node rb_node; } CRYPTO_MINALIGN_ATTR; @@ -117,7 +126,6 @@ struct iv_lmk_private { #define TCW_WHITENING_SIZE 16 struct iv_tcw_private { - struct crypto_shash *crc32_tfm; u8 *iv_seed; u8 *whitening; }; @@ -132,14 +140,15 @@ struct iv_elephant_private { * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, - DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE, - DM_CRYPT_WRITE_INLINE }; + DM_CRYPT_SAME_CPU, DM_CRYPT_HIGH_PRIORITY, + DM_CRYPT_NO_OFFLOAD, DM_CRYPT_NO_READ_WORKQUEUE, + DM_CRYPT_NO_WRITE_WORKQUEUE, DM_CRYPT_WRITE_INLINE }; enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */ CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ + CRYPT_KEY_MAC_SIZE_SET, /* The integrity_key_size option was used */ }; /* @@ -171,14 +180,15 @@ struct crypt_config { } iv_gen_private; u64 iv_offset; unsigned int iv_size; - unsigned short int sector_size; + unsigned short sector_size; unsigned char sector_shift; union { struct crypto_skcipher **tfms; struct crypto_aead **tfms_aead; } cipher_tfm; - unsigned tfms_count; + unsigned int tfms_count; + int workqueue_id; unsigned long cipher_flags; /* @@ -206,13 +216,14 @@ struct crypt_config { unsigned int integrity_tag_size; unsigned int integrity_iv_size; - unsigned int on_disk_tag_size; + unsigned int used_tag_size; + unsigned int tuple_size; /* * pool for per bio private data, crypto requests, * encryption requeusts/buffer pages and integrity tags */ - unsigned tag_pool_max_sectors; + unsigned int tag_pool_max_sectors; mempool_t tag_pool; mempool_t req_pool; mempool_t page_pool; @@ -221,7 +232,7 @@ struct crypt_config { struct mutex bio_alloc_lock; u8 *authenc_key; /* space for keys in authenc() format (if used) */ - u8 key[]; + u8 key[] __counted_by(key_size); }; #define MIN_IOS 64 @@ -229,10 +240,53 @@ struct crypt_config { #define POOL_ENTRY_SIZE 512 static DEFINE_SPINLOCK(dm_crypt_clients_lock); -static unsigned dm_crypt_clients_n = 0; +static unsigned int dm_crypt_clients_n; static volatile unsigned long dm_crypt_pages_per_client; #define DM_CRYPT_MEMORY_PERCENT 2 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16) +#define DM_CRYPT_DEFAULT_MAX_READ_SIZE 131072 +#define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE 131072 + +static unsigned int max_read_size = 0; +module_param(max_read_size, uint, 0644); +MODULE_PARM_DESC(max_read_size, "Maximum size of a read request"); +static unsigned int max_write_size = 0; +module_param(max_write_size, uint, 0644); +MODULE_PARM_DESC(max_write_size, "Maximum size of a write request"); + +static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio) +{ + struct crypt_config *cc = ti->private; + unsigned val, sector_align; + bool wrt = op_is_write(bio_op(bio)); + + if (wrt) { + /* + * For zoned devices, splitting write operations creates the + * risk of deadlocking queue freeze operations with zone write + * plugging BIO work when the reminder of a split BIO is + * issued. So always allow the entire BIO to proceed. + */ + if (ti->emulate_zone_append) + return bio_sectors(bio); + + val = min_not_zero(READ_ONCE(max_write_size), + DM_CRYPT_DEFAULT_MAX_WRITE_SIZE); + } else { + val = min_not_zero(READ_ONCE(max_read_size), + DM_CRYPT_DEFAULT_MAX_READ_SIZE); + } + + if (wrt || cc->used_tag_size) + val = min(val, BIO_MAX_VECS << PAGE_SHIFT); + + sector_align = max(bdev_logical_block_size(cc->dev->bdev), + (unsigned)cc->sector_size); + val = round_down(val, sector_align); + if (unlikely(!val)) + val = sector_align; + return val >> SECTOR_SHIFT; +} static void crypt_endio(struct bio *clone); static void kcryptd_queue_crypt(struct dm_crypt_io *io); @@ -354,7 +408,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs; + unsigned int bs; int log; if (crypt_integrity_aead(cc)) @@ -363,9 +417,10 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, bs = crypto_skcipher_blocksize(any_tfm(cc)); log = ilog2(bs); - /* we need to calculate how far we must shift the sector count - * to get the cipher block count, we use this shift in _gen */ - + /* + * We need to calculate how far we must shift the sector count + * to get the cipher block count, we use this shift in _gen. + */ if (1 << log != bs) { ti->error = "cypher blocksize is not a power of 2"; return -EINVAL; @@ -480,7 +535,10 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); - struct md5_state md5state; + union { + struct md5_state md5state; + u8 state[CRYPTO_MD5_STATESIZE]; + } u; __le32 buf[4]; int i, r; @@ -511,13 +569,13 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, return r; /* No MD5 padding here */ - r = crypto_shash_export(desc, &md5state); + r = crypto_shash_export(desc, &u.md5state); if (r) return r; for (i = 0; i < MD5_HASH_WORDS; i++) - __cpu_to_le32s(&md5state.hash[i]); - memcpy(iv, &md5state.hash, cc->iv_size); + __cpu_to_le32s(&u.md5state.hash[i]); + memcpy(iv, &u.md5state.hash, cc->iv_size); return 0; } @@ -531,9 +589,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { sg = crypt_get_sg_data(cc, dmreq->sg_in); - src = kmap_atomic(sg_page(sg)); + src = kmap_local_page(sg_page(sg)); r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); - kunmap_atomic(src); + kunmap_local(src); } else memset(iv, 0, cc->iv_size); @@ -551,14 +609,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, return 0; sg = crypt_get_sg_data(cc, dmreq->sg_out); - dst = kmap_atomic(sg_page(sg)); + dst = kmap_local_page(sg_page(sg)); r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); /* Tweak the first block of plaintext sector */ if (!r) crypto_xor(dst + sg->offset, iv, cc->iv_size); - kunmap_atomic(dst); + kunmap_local(dst); return r; } @@ -570,10 +628,6 @@ static void crypt_iv_tcw_dtr(struct crypt_config *cc) tcw->iv_seed = NULL; kfree_sensitive(tcw->whitening); tcw->whitening = NULL; - - if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) - crypto_free_shash(tcw->crc32_tfm); - tcw->crc32_tfm = NULL; } static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, @@ -591,13 +645,6 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, - CRYPTO_ALG_ALLOCATES_MEMORY); - if (IS_ERR(tcw->crc32_tfm)) { - ti->error = "Error initializing CRC32 in TCW"; - return PTR_ERR(tcw->crc32_tfm); - } - tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); if (!tcw->iv_seed || !tcw->whitening) { @@ -631,42 +678,28 @@ static int crypt_iv_tcw_wipe(struct crypt_config *cc) return 0; } -static int crypt_iv_tcw_whitening(struct crypt_config *cc, - struct dm_crypt_request *dmreq, - u8 *data) +static void crypt_iv_tcw_whitening(struct crypt_config *cc, + struct dm_crypt_request *dmreq, u8 *data) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 buf[TCW_WHITENING_SIZE]; - SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); - int i, r; + int i; /* xor whitening with sector number */ crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8); /* calculate crc32 for every 32bit part and xor it */ - desc->tfm = tcw->crc32_tfm; - for (i = 0; i < 4; i++) { - r = crypto_shash_init(desc); - if (r) - goto out; - r = crypto_shash_update(desc, &buf[i * 4], 4); - if (r) - goto out; - r = crypto_shash_final(desc, &buf[i * 4]); - if (r) - goto out; - } + for (i = 0; i < 4; i++) + put_unaligned_le32(crc32(0, &buf[i * 4], 4), &buf[i * 4]); crypto_xor(&buf[0], &buf[12], 4); crypto_xor(&buf[4], &buf[8], 4); /* apply whitening (8 bytes) to whole sector */ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) crypto_xor(data + i * 8, buf, 8); -out: memzero_explicit(buf, sizeof(buf)); - return r; } static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, @@ -676,14 +709,13 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 *src; - int r = 0; /* Remove whitening from ciphertext */ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { sg = crypt_get_sg_data(cc, dmreq->sg_in); - src = kmap_atomic(sg_page(sg)); - r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); - kunmap_atomic(src); + src = kmap_local_page(sg_page(sg)); + crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); + kunmap_local(src); } /* Calculate IV */ @@ -692,7 +724,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, cc->iv_size - 8); - return r; + return 0; } static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, @@ -700,18 +732,17 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, { struct scatterlist *sg; u8 *dst; - int r; if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) return 0; /* Apply whitening on ciphertext */ sg = crypt_get_sg_data(cc, dmreq->sg_out); - dst = kmap_atomic(sg_page(sg)); - r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); - kunmap_atomic(dst); + dst = kmap_local_page(sg_page(sg)); + crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); + kunmap_local(dst); - return r; + return 0; } static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, @@ -731,8 +762,7 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, } if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { - ti->error = "Block size of EBOIV cipher does " - "not match IV size of block cipher"; + ti->error = "Block size of EBOIV cipher does not match IV size of block cipher"; return -EINVAL; } @@ -742,16 +772,24 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { - u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); + struct crypto_skcipher *tfm = any_tfm(cc); struct skcipher_request *req; struct scatterlist src, dst; DECLARE_CRYPTO_WAIT(wait); + unsigned int reqsize; int err; + u8 *buf; + + reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm); + reqsize = ALIGN(reqsize, __alignof__(__le64)); - req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); + req = kmalloc(reqsize + cc->iv_size, GFP_NOIO); if (!req) return -ENOMEM; + skcipher_request_set_tfm(req, tfm); + + buf = (u8 *)req + reqsize; memset(buf, 0, cc->iv_size); *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); @@ -760,7 +798,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); skcipher_request_set_callback(req, 0, crypto_req_done, &wait); err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); - skcipher_request_free(req); + kfree_sensitive(req); return err; } @@ -974,35 +1012,35 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d goto out; sg = crypt_get_sg_data(cc, dmreq->sg_out); - data = kmap_atomic(sg_page(sg)); + data = kmap_local_page(sg_page(sg)); data_offset = data + sg->offset; /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { sg2 = crypt_get_sg_data(cc, dmreq->sg_in); - data2 = kmap_atomic(sg_page(sg2)); + data2 = kmap_local_page(sg_page(sg2)); memcpy(data_offset, data2 + sg2->offset, cc->sector_size); - kunmap_atomic(data2); + kunmap_local(data2); } if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { - diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32)); } for (i = 0; i < (cc->sector_size / 32); i++) crypto_xor(data_offset + i * 32, ks, 32); if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { - diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32)); } - kunmap_atomic(data); + kunmap_local(data); out: kfree_sensitive(ks); kfree_sensitive(es); @@ -1141,17 +1179,16 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) unsigned int tag_len; int ret; - if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) + if (!bio_sectors(bio) || !io->cc->tuple_size) return 0; bip = bio_integrity_alloc(bio, GFP_NOIO, 1); if (IS_ERR(bip)) return PTR_ERR(bip); - tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); + tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift); - bip->bip_iter.bi_size = tag_len; - bip->bip_iter.bi_sector = io->cc->start + io->sector; + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), tag_len, offset_in_page(io->integrity_metadata)); @@ -1167,24 +1204,24 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); struct mapped_device *md = dm_table_get_md(ti->table); - /* From now we require underlying device with our integrity profile */ - if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { + /* We require an underlying device with non-PI metadata */ + if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) { ti->error = "Integrity profile not supported."; return -EINVAL; } - if (bi->tag_size != cc->on_disk_tag_size || - bi->tuple_size != cc->on_disk_tag_size) { + if (bi->metadata_size < cc->used_tag_size) { ti->error = "Integrity profile tag size mismatch."; return -EINVAL; } + cc->tuple_size = bi->metadata_size; if (1 << bi->interval_exp != cc->sector_size) { ti->error = "Integrity profile sector size mismatch."; return -EINVAL; } if (crypt_integrity_aead(cc)) { - cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; + cc->integrity_tag_size = cc->used_tag_size - cc->integrity_iv_size; DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md), cc->integrity_tag_size, cc->integrity_iv_size); @@ -1196,7 +1233,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md), cc->integrity_iv_size); - if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { + if ((cc->integrity_tag_size + cc->integrity_iv_size) > cc->tuple_size) { ti->error = "Not enough space for integrity tag in the profile."; return -EINVAL; } @@ -1220,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc, if (bio_out) ctx->iter_out = bio_out->bi_iter; ctx->cc_sector = sector + cc->iv_offset; + ctx->tag_offset = 0; init_completion(&ctx->restart); } @@ -1255,6 +1293,7 @@ static __le64 *org_sector_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; + return (__le64 *) ptr; } @@ -1263,7 +1302,8 @@ static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, { u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size + sizeof(uint64_t); - return (unsigned int*)ptr; + + return (unsigned int *)ptr; } static void *tag_from_dmreq(struct crypt_config *cc, @@ -1273,7 +1313,7 @@ static void *tag_from_dmreq(struct crypt_config *cc, struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * - cc->on_disk_tag_size]; + cc->tuple_size]; } static void *iv_tag_from_dmreq(struct crypt_config *cc, @@ -1354,9 +1394,9 @@ static int crypt_convert_block_aead(struct crypt_config *cc, aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, cc->sector_size, iv); r = crypto_aead_encrypt(req); - if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) + if (cc->integrity_tag_size + cc->integrity_iv_size != cc->tuple_size) memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, - cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); + cc->tuple_size - (cc->integrity_tag_size + cc->integrity_iv_size)); } else { aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, cc->sector_size + cc->integrity_tag_size, iv); @@ -1366,10 +1406,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc, if (r == -EBADMSG) { sector_t s = le64_to_cpu(*sector); - DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", - ctx->bio_in->bi_bdev, s); - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", - ctx->bio_in, s, 0); + ctx->aead_failed = true; + if (ctx->aead_recheck) { + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", + ctx->bio_in->bi_bdev, s); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", + ctx->bio_in, s, 0); + } } if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) @@ -1458,13 +1501,12 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, return r; } -static void kcryptd_async_done(struct crypto_async_request *async_req, - int error); +static void kcryptd_async_done(void *async_req, int error); static int crypt_alloc_req_skcipher(struct crypt_config *cc, struct convert_context *ctx) { - unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); + unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1); if (!ctx->r.req) { ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); @@ -1548,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ static blk_status_t crypt_convert(struct crypt_config *cc, struct convert_context *ctx, bool atomic, bool reset_pending) { - unsigned int tag_offset = 0; unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; int r; @@ -1571,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->cc_pending); if (crypt_integrity_aead(cc)) - r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); + r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset); else - r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); + r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset); switch (r) { /* @@ -1593,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc, * exit and continue processing in a workqueue */ ctx->r.req = NULL; + ctx->tag_offset++; ctx->cc_sector += sector_step; - tag_offset++; return BLK_STS_DEV_RESOURCE; } } else { @@ -1608,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc, */ case -EINPROGRESS: ctx->r.req = NULL; + ctx->tag_offset++; ctx->cc_sector += sector_step; - tag_offset++; continue; /* * The request was already processed (synchronously). @@ -1617,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc, case 0: atomic_dec(&ctx->cc_pending); ctx->cc_sector += sector_step; - tag_offset++; + ctx->tag_offset++; if (!atomic) cond_resched(); continue; @@ -1643,8 +1684,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); /* * Generate a new unfragmented bio with the given size - * This should never violate the device limitations (but only because - * max_segment_size is being constrained to PAGE_SIZE). + * This should never violate the device limitations (but if it did then block + * core should split the bio as needed). * * This function may be called concurrently. If we allocate from the mempool * concurrently, there is a possibility of deadlock. For example, if we have @@ -1657,15 +1698,18 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); * In order to not degrade performance with excessive locking, we try * non-blocking allocations without a mutex first but on failure we fallback * to blocking allocations with a mutex. + * + * In order to reduce allocation overhead, we try to allocate compound pages in + * the first pass. If they are not available, we fall back to the mempool. */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) { struct crypt_config *cc = io->cc; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; - unsigned i, len, remaining_size; - struct page *page; + unsigned int remaining_size; + unsigned int order = MAX_PAGE_ORDER; retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) @@ -1675,23 +1719,45 @@ retry: GFP_NOIO, &cc->bs); clone->bi_private = io; clone->bi_end_io = crypt_endio; + clone->bi_ioprio = io->base_bio->bi_ioprio; + clone->bi_iter.bi_sector = cc->start + io->sector; remaining_size = size; - for (i = 0; i < nr_iovecs; i++) { - page = mempool_alloc(&cc->page_pool, gfp_mask); - if (!page) { + while (remaining_size) { + struct page *pages; + unsigned size_to_add; + unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT); + order = min(order, remaining_order); + + while (order > 0) { + if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) + + (1 << order) > dm_crypt_pages_per_client)) + goto decrease_order; + pages = alloc_pages(gfp_mask + | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, + order); + if (likely(pages != NULL)) { + percpu_counter_add(&cc->n_allocated_pages, 1 << order); + goto have_pages; + } +decrease_order: + order--; + } + + pages = mempool_alloc(&cc->page_pool, gfp_mask); + if (!pages) { crypt_free_buffer_pages(cc, clone); bio_put(clone); gfp_mask |= __GFP_DIRECT_RECLAIM; + order = 0; goto retry; } - len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; - - bio_add_page(clone, page, len, 0); - - remaining_size -= len; +have_pages: + size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size); + __bio_add_page(clone, pages, size_to_add, 0); + remaining_size -= size_to_add; } /* Allocate space for integrity tags */ @@ -1709,12 +1775,18 @@ retry: static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) { - struct bio_vec *bv; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bv, clone, iter_all) { - BUG_ON(!bv->bv_page); - mempool_free(bv->bv_page, &cc->page_pool); + if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */ + bio_for_each_folio_all(fi, clone) { + if (folio_test_large(fi.folio)) { + percpu_counter_sub(&cc->n_allocated_pages, + 1 << folio_order(fi.folio)); + folio_put(fi.folio); + } else { + mempool_free(&fi.folio->page, &cc->page_pool); + } + } } } @@ -1725,6 +1797,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, io->base_bio = bio; io->sector = sector; io->error = 0; + io->ctx.aead_recheck = false; + io->ctx.aead_failed = false; io->ctx.r.req = NULL; io->integrity_metadata = NULL; io->integrity_metadata_from_pool = false; @@ -1736,11 +1810,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) atomic_inc(&io->io_pending); } -static void kcryptd_io_bio_endio(struct work_struct *work) -{ - struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); - bio_endio(io->base_bio); -} +static void kcryptd_queue_read(struct dm_crypt_io *io); /* * One of the bios was finished. Check for completion of @@ -1755,6 +1825,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->io_pending)) return; + if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) && + cc->used_tag_size && bio_data_dir(base_bio) == READ) { + io->ctx.aead_recheck = true; + io->ctx.aead_failed = false; + io->error = 0; + kcryptd_queue_read(io); + return; + } + if (io->ctx.r.req) crypt_free_req(cc, io->ctx.r.req, base_bio); @@ -1765,22 +1844,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) base_bio->bi_status = error; - /* - * If we are running this function from our tasklet, - * we can't call bio_endio() here, because it will call - * clone_endio() from dm.c, which in turn will - * free the current struct dm_crypt_io structure with - * our tasklet. In this case we need to delay bio_endio() - * execution to after the tasklet is done and dequeued. - */ - if (tasklet_trylock(&io->tasklet)) { - tasklet_unlock(&io->tasklet); - bio_endio(base_bio); - return; - } - - INIT_WORK(&io->work, kcryptd_io_bio_endio); - queue_work(cc->io_queue, &io->work); + bio_endio(base_bio); } /* @@ -1804,16 +1868,20 @@ static void crypt_endio(struct bio *clone) { struct dm_crypt_io *io = clone->bi_private; struct crypt_config *cc = io->cc; - unsigned rw = bio_data_dir(clone); - blk_status_t error; + unsigned int rw = bio_data_dir(clone); + blk_status_t error = clone->bi_status; + + if (io->ctx.aead_recheck && !error) { + kcryptd_queue_crypt(io); + return; + } /* * free the processed pages */ - if (rw == WRITE) + if (rw == WRITE || io->ctx.aead_recheck) crypt_free_buffer_pages(cc, clone); - error = clone->bi_status; bio_put(clone); if (rw == READ && !error) { @@ -1834,6 +1902,21 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) struct crypt_config *cc = io->cc; struct bio *clone; + if (io->ctx.aead_recheck) { + if (!(gfp & __GFP_DIRECT_RECLAIM)) + return 1; + crypt_inc_pending(io); + clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); + if (unlikely(!clone)) { + crypt_dec_pending(io); + return 1; + } + crypt_convert_init(cc, &io->ctx, clone, clone, io->sector); + io->saved_bi_iter = clone->bi_iter; + dm_submit_bio_remap(io->base_bio, clone); + return 0; + } + /* * We need the original biovec array in order to decrypt the whole bio * data *afterwards* -- thanks to immutable biovecs we don't need to @@ -1843,13 +1926,13 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs); if (!clone) return 1; + + clone->bi_iter.bi_sector = cc->start + io->sector; clone->bi_private = io; clone->bi_end_io = crypt_endio; crypt_inc_pending(io); - clone->bi_iter.bi_sector = cc->start + io->sector; - if (dm_crypt_integrity_io_alloc(io, clone)) { crypt_dec_pending(io); bio_put(clone); @@ -1913,7 +1996,6 @@ continue_locked: schedule(); - set_current_state(TASK_RUNNING); spin_lock_irq(&cc->write_thread_lock); goto continue_locked; @@ -1933,6 +2015,7 @@ pop_from_list: io = crypt_io_from_node(rb_first(&write_tree)); rb_erase(&io->rb_node, &write_tree); kcryptd_io_write(io); + cond_resched(); } while (!RB_EMPTY_ROOT(&write_tree)); blk_finish_plug(&plug); } @@ -1957,8 +2040,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) /* crypt_convert should have filled the clone bio */ BUG_ON(io->ctx.iter_out.bi_size); - clone->bi_iter.bi_sector = cc->start + io->sector; - if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) || test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) { dm_submit_bio_remap(io->base_bio, clone); @@ -2010,13 +2091,12 @@ static void kcryptd_crypt_write_continue(struct work_struct *work) struct crypt_config *cc = io->cc; struct convert_context *ctx = &io->ctx; int crypt_finished; - sector_t sector = io->sector; blk_status_t r; wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); - r = crypt_convert(cc, &io->ctx, true, false); + r = crypt_convert(cc, &io->ctx, false, false); if (r) io->error = r; crypt_finished = atomic_dec_and_test(&ctx->cc_pending); @@ -2027,10 +2107,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work) } /* Encryption was already finished, submit io now */ - if (crypt_finished) { + if (crypt_finished) kcryptd_crypt_write_io_submit(io, 0); - io->sector = sector; - } crypt_dec_pending(io); } @@ -2041,14 +2119,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct convert_context *ctx = &io->ctx; struct bio *clone; int crypt_finished; - sector_t sector = io->sector; blk_status_t r; /* * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); - crypt_convert_init(cc, ctx, NULL, io->base_bio, sector); + crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector); clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); if (unlikely(!clone)) { @@ -2059,7 +2136,11 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.bio_out = clone; io->ctx.iter_out = clone->bi_iter; - sector += bio_sectors(clone); + if (crypt_integrity_aead(cc)) { + bio_copy_data(clone, io->base_bio); + io->ctx.bio_in = clone; + io->ctx.iter_in = clone->bi_iter; + } crypt_inc_pending(io); r = crypt_convert(cc, ctx, @@ -2084,10 +2165,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) } /* Encryption was already finished, submit io now */ - if (crypt_finished) { + if (crypt_finished) kcryptd_crypt_write_io_submit(io, 0); - io->sector = sector; - } dec: crypt_dec_pending(io); @@ -2095,6 +2174,14 @@ dec: static void kcryptd_crypt_read_done(struct dm_crypt_io *io) { + if (io->ctx.aead_recheck) { + if (!io->error) { + io->ctx.bio_in->bi_iter = io->saved_bi_iter; + bio_copy_data(io->base_bio, io->ctx.bio_in); + } + crypt_free_buffer_pages(io->cc, io->ctx.bio_in); + bio_put(io->ctx.bio_in); + } crypt_dec_pending(io); } @@ -2107,7 +2194,7 @@ static void kcryptd_crypt_read_continue(struct work_struct *work) wait_for_completion(&io->ctx.restart); reinit_completion(&io->ctx.restart); - r = crypt_convert(cc, &io->ctx, true, false); + r = crypt_convert(cc, &io->ctx, false, false); if (r) io->error = r; @@ -2124,11 +2211,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) crypt_inc_pending(io); - crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, - io->sector); + if (io->ctx.aead_recheck) { + r = crypt_convert(cc, &io->ctx, + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + } else { + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, + io->sector); - r = crypt_convert(cc, &io->ctx, - test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + r = crypt_convert(cc, &io->ctx, + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + } /* * Crypto API backlogged the request, because its queue was full * and we're in softirq context, so continue from a workqueue @@ -2147,10 +2239,9 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) crypt_dec_pending(io); } -static void kcryptd_async_done(struct crypto_async_request *async_req, - int error) +static void kcryptd_async_done(void *data, int error) { - struct dm_crypt_request *dmreq = async_req->data; + struct dm_crypt_request *dmreq = data; struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->cc; @@ -2171,10 +2262,13 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (error == -EBADMSG) { sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)); - DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", - ctx->bio_in->bi_bdev, s); - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", - ctx->bio_in, s, 0); + ctx->aead_failed = true; + if (ctx->aead_recheck) { + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", + ctx->bio_in->bi_bdev, s); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", + ctx->bio_in, s, 0); + } io->error = BLK_STS_PROTECTION; } else if (error < 0) io->error = BLK_STS_IOERR; @@ -2211,11 +2305,6 @@ static void kcryptd_crypt(struct work_struct *work) kcryptd_crypt_write_convert(io); } -static void kcryptd_crypt_tasklet(unsigned long work) -{ - kcryptd_crypt((struct work_struct *)work); -} - static void kcryptd_queue_crypt(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; @@ -2228,13 +2317,13 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) * it is being executed with irqs disabled. */ if (in_hardirq() || irqs_disabled()) { - tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); - tasklet_schedule(&io->tasklet); + INIT_WORK(&io->work, kcryptd_crypt); + queue_work(system_bh_wq, &io->work); + return; + } else { + kcryptd_crypt(&io->work); return; } - - kcryptd_crypt(&io->work); - return; } INIT_WORK(&io->work, kcryptd_crypt); @@ -2257,7 +2346,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc) static void crypt_free_tfms_skcipher(struct crypt_config *cc) { - unsigned i; + unsigned int i; if (!cc->cipher_tfm.tfms) return; @@ -2282,7 +2371,7 @@ static void crypt_free_tfms(struct crypt_config *cc) static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) { - unsigned i; + unsigned int i; int err; cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, @@ -2340,12 +2429,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) return crypt_alloc_tfms_skcipher(cc, ciphermode); } -static unsigned crypt_subkey_size(struct crypt_config *cc) +static unsigned int crypt_subkey_size(struct crypt_config *cc) { return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); } -static unsigned crypt_authenckey_size(struct crypt_config *cc) +static unsigned int crypt_authenckey_size(struct crypt_config *cc) { return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); } @@ -2356,7 +2445,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc) * This funcion converts cc->key to this special format. */ static void crypt_copy_authenckey(char *p, const void *key, - unsigned enckeylen, unsigned authkeylen) + unsigned int enckeylen, unsigned int authkeylen) { struct crypto_authenc_key_param *param; struct rtattr *rta; @@ -2374,7 +2463,7 @@ static void crypt_copy_authenckey(char *p, const void *key, static int crypt_setkey(struct crypt_config *cc) { - unsigned subkey_size; + unsigned int subkey_size; int err = 0, i, r; /* Ignore extra keys (which are used for IV etc) */ @@ -2487,7 +2576,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string } /* look for next ':' separating key_type from key_description */ - key_desc = strpbrk(key_string, ":"); + key_desc = strchr(key_string, ':'); if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) return -EINVAL; @@ -2502,7 +2591,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string type = &key_type_encrypted; set_key = set_key_encrypted; } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) && - !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { + !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { type = &key_type_trusted; set_key = set_key_trusted; } else { @@ -2515,35 +2604,31 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string key = request_key(type, key_desc + 1, NULL); if (IS_ERR(key)) { - kfree_sensitive(new_key_string); - return PTR_ERR(key); + ret = PTR_ERR(key); + goto free_new_key_string; } down_read(&key->sem); - ret = set_key(cc, key); - if (ret < 0) { - up_read(&key->sem); - key_put(key); - kfree_sensitive(new_key_string); - return ret; - } - up_read(&key->sem); key_put(key); + if (ret < 0) + goto free_new_key_string; /* clear the flag since following operations may invalidate previously valid key */ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); ret = crypt_setkey(cc); + if (ret) + goto free_new_key_string; - if (!ret) { - set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - kfree_sensitive(cc->key_string); - cc->key_string = new_key_string; - } else - kfree_sensitive(new_key_string); + set_bit(DM_CRYPT_KEY_VALID, &cc->flags); + kfree_sensitive(cc->key_string); + cc->key_string = new_key_string; + return 0; +free_new_key_string: + kfree_sensitive(new_key_string); return ret; } @@ -2702,6 +2787,9 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); + if (cc->workqueue_id) + ida_free(&workqueue_ida, cc->workqueue_id); + crypt_free_tfms(cc); bioset_exit(&cc->bs); @@ -2826,10 +2914,9 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) if (!start || !end || ++start > end) return -EINVAL; - mac_alg = kzalloc(end - start + 1, GFP_KERNEL); + mac_alg = kmemdup_nul(start, end - start, GFP_KERNEL); if (!mac_alg) return -ENOMEM; - strncpy(mac_alg, start, end - start); mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY); kfree(mac_alg); @@ -2837,7 +2924,8 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) if (IS_ERR(mac)) return PTR_ERR(mac); - cc->key_mac_size = crypto_ahash_digestsize(mac); + if (!test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags)) + cc->key_mac_size = crypto_ahash_digestsize(mac); crypto_free_ahash(mac); cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); @@ -2882,7 +2970,7 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key ret = crypt_ctr_auth_cipher(cc, cipher_api); if (ret < 0) { ti->error = "Invalid AEAD cipher spec"; - return -ENOMEM; + return ret; } } @@ -3066,7 +3154,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar struct crypt_config *cc = ti->private; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 8, "Invalid number of feature args"}, + {0, 9, "Invalid number of feature args"}, }; unsigned int opt_params, val; const char *opt_string, *sval; @@ -3093,6 +3181,8 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar else if (!strcasecmp(opt_string, "same_cpu_crypt")) set_bit(DM_CRYPT_SAME_CPU, &cc->flags); + else if (!strcasecmp(opt_string, "high_priority")) + set_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags); else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); @@ -3105,11 +3195,11 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar ti->error = "Invalid integrity arguments"; return -EINVAL; } - cc->on_disk_tag_size = val; + cc->used_tag_size = val; sval = strchr(opt_string + strlen("integrity:"), ':') + 1; if (!strcasecmp(sval, "aead")) { set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); - } else if (strcasecmp(sval, "none")) { + } else if (strcasecmp(sval, "none")) { ti->error = "Unknown integrity profile"; return -EINVAL; } @@ -3117,6 +3207,13 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar cc->cipher_auth = kstrdup(sval, GFP_KERNEL); if (!cc->cipher_auth) return -ENOMEM; + } else if (sscanf(opt_string, "integrity_key_size:%u%c", &val, &dummy) == 1) { + if (!val) { + ti->error = "Invalid integrity_key_size argument"; + return -EINVAL; + } + cc->key_mac_size = val; + set_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags); } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) { if (cc->sector_size < (1 << SECTOR_SHIFT) || cc->sector_size > 4096 || @@ -3162,8 +3259,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; const char *devname = dm_table_device_name(ti->table); - int key_size; + int key_size, wq_id; unsigned int align_mask; + unsigned int common_wq_flags; unsigned long long tmpll; int ret; size_t iv_size_padding, additional_req_size; @@ -3250,7 +3348,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->per_bio_data_size = ti->per_io_data_size = ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, - ARCH_KMALLOC_MINALIGN); + ARCH_DMA_MINALIGN); ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc); if (ret) { @@ -3316,12 +3414,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (ret) goto bad; - cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size; + cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->tuple_size; if (!cc->tag_pool_max_sectors) cc->tag_pool_max_sectors = 1; ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS, - cc->tag_pool_max_sectors * cc->on_disk_tag_size); + cc->tag_pool_max_sectors * cc->tuple_size); if (ret) { ti->error = "Cannot allocate integrity tags mempool"; goto bad; @@ -3330,20 +3428,38 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->tag_pool_max_sectors <<= cc->sector_shift; } + wq_id = ida_alloc_min(&workqueue_ida, 1, GFP_KERNEL); + if (wq_id < 0) { + ti->error = "Couldn't get workqueue id"; + ret = wq_id; + goto bad; + } + cc->workqueue_id = wq_id; + ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); + common_wq_flags = WQ_MEM_RECLAIM | WQ_SYSFS; + if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags)) + common_wq_flags |= WQ_HIGHPRI; + + cc->io_queue = alloc_workqueue("kcryptd_io-%s-%d", common_wq_flags, 1, devname, wq_id); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } - if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) - cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, - 1, devname); - else - cc->crypt_queue = alloc_workqueue("kcryptd/%s", - WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, - num_online_cpus(), devname); + if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) { + cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d", + common_wq_flags | WQ_CPU_INTENSIVE, + 1, devname, wq_id); + } else { + /* + * While crypt_queue is certainly CPU intensive, the use of + * WQ_CPU_INTENSIVE is meaningless with WQ_UNBOUND. + */ + cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d", + common_wq_flags | WQ_UNBOUND, + num_online_cpus(), devname, wq_id); + } if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; @@ -3359,6 +3475,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Couldn't spawn write thread"; goto bad; } + if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags)) + set_user_nice(cc->write_thread, MIN_NICE); ti->num_flush_bios = 1; ti->limit_swap_bios = true; @@ -3377,6 +3495,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) { struct dm_crypt_io *io; struct crypt_config *cc = ti->private; + unsigned max_sectors; /* * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. @@ -3395,9 +3514,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) /* * Check if bio is too large, split as needed. */ - if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) && - (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) - dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT)); + max_sectors = get_max_request_sectors(ti, bio); + if (unlikely(bio_sectors(bio) > max_sectors)) + dm_accept_partial_bio(bio, max_sectors); /* * Ensure that bio is a multiple of internal sector encryption size @@ -3412,12 +3531,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); - if (cc->on_disk_tag_size) { - unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); + if (cc->tuple_size) { + unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift); + + if (unlikely(tag_len > KMALLOC_MAX_SIZE)) + io->integrity_metadata = NULL; + else + io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (unlikely(tag_len > KMALLOC_MAX_SIZE) || - unlikely(!(io->integrity_metadata = kmalloc(tag_len, - GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { + if (unlikely(!io->integrity_metadata)) { if (bio_sectors(bio) > cc->tag_pool_max_sectors) dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO); @@ -3441,14 +3563,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) static char hex2asc(unsigned char c) { - return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); + return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27); } static void crypt_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) + unsigned int status_flags, char *result, unsigned int maxlen) { struct crypt_config *cc = ti->private; - unsigned i, sz = 0; + unsigned int i, sz = 0; int num_feature_args = 0; switch (type) { @@ -3476,31 +3598,36 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += !!ti->num_discard_bios; num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); + num_feature_args += test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); + num_feature_args += !!cc->used_tag_size; num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); - if (cc->on_disk_tag_size) - num_feature_args++; + num_feature_args += test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags); if (num_feature_args) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) DMEMIT(" allow_discards"); if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) DMEMIT(" same_cpu_crypt"); + if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags)) + DMEMIT(" high_priority"); if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) DMEMIT(" submit_from_crypt_cpus"); if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) DMEMIT(" no_read_workqueue"); if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) DMEMIT(" no_write_workqueue"); - if (cc->on_disk_tag_size) - DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); + if (cc->used_tag_size) + DMEMIT(" integrity:%u:%s", cc->used_tag_size, cc->cipher_auth); if (cc->sector_size != (1 << SECTOR_SHIFT)) DMEMIT(" sector_size:%d", cc->sector_size); if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) DMEMIT(" iv_large_sectors"); + if (test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags)) + DMEMIT(" integrity_key_size:%u", cc->key_mac_size); } break; @@ -3508,6 +3635,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT_TARGET_NAME_VERSION(ti->type); DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n'); DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n'); + DMEMIT(",high_priority=%c", test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags) ? 'y' : 'n'); DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ? 'y' : 'n'); DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ? @@ -3517,9 +3645,9 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ? 'y' : 'n'); - if (cc->on_disk_tag_size) + if (cc->used_tag_size) DMEMIT(",integrity_tag_size=%u,cipher_auth=%s", - cc->on_disk_tag_size, cc->cipher_auth); + cc->used_tag_size, cc->cipher_auth); if (cc->sector_size != (1 << SECTOR_SHIFT)) DMEMIT(",sector_size=%d", cc->sector_size); if (cc->cipher_string) @@ -3564,8 +3692,8 @@ static void crypt_resume(struct dm_target *ti) * key set <key> * key wipe */ -static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, - char *result, unsigned maxlen) +static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv, + char *result, unsigned int maxlen) { struct crypt_config *cc = ti->private; int key_size, ret = -EINVAL; @@ -3617,25 +3745,28 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct crypt_config *cc = ti->private; - /* - * Unfortunate constraint that is required to avoid the potential - * for exceeding underlying device's max_segments limits -- due to - * crypt_alloc_buffer() possibly allocating pages for the encryption - * bio that are not as physically contiguous as the original bio. - */ - limits->max_segment_size = PAGE_SIZE; - limits->logical_block_size = - max_t(unsigned, limits->logical_block_size, cc->sector_size); + max_t(unsigned int, limits->logical_block_size, cc->sector_size); limits->physical_block_size = - max_t(unsigned, limits->physical_block_size, cc->sector_size); - limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); + max_t(unsigned int, limits->physical_block_size, cc->sector_size); + limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size); limits->dma_alignment = limits->logical_block_size - 1; + + /* + * For zoned dm-crypt targets, there will be no internal splitting of + * write BIOs to avoid exceeding BIO_MAX_VECS vectors per BIO. But + * without respecting this limit, crypt_alloc_buffer() will trigger a + * BUG(). Avoid this by forcing DM core to split write BIOs to this + * limit. + */ + if (ti->emulate_zone_append) + limits->max_hw_sectors = min(limits->max_hw_sectors, + BIO_MAX_VECS << PAGE_SECTORS_SHIFT); } static struct target_type crypt_target = { .name = "crypt", - .version = {1, 24, 0}, + .version = {1, 28, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, @@ -3650,25 +3781,7 @@ static struct target_type crypt_target = { .iterate_devices = crypt_iterate_devices, .io_hints = crypt_io_hints, }; - -static int __init dm_crypt_init(void) -{ - int r; - - r = dm_register_target(&crypt_target); - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void __exit dm_crypt_exit(void) -{ - dm_unregister_target(&crypt_target); -} - -module_init(dm_crypt_init); -module_exit(dm_crypt_exit); +module_dm(crypt); MODULE_AUTHOR("Jana Saout <jana@saout.de>"); MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); |
