diff options
Diffstat (limited to 'crypto/lrw.c')
| -rw-r--r-- | crypto/lrw.c | 547 |
1 files changed, 287 insertions, 260 deletions
diff --git a/crypto/lrw.c b/crypto/lrw.c index ba42acc4deba..dd403b800513 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* LRW: as defined by Cyril Guyot in * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf * @@ -5,19 +6,15 @@ * * Based on ecb.c * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -27,14 +24,37 @@ #include <crypto/b128ops.h> #include <crypto/gf128mul.h> -#include <crypto/lrw.h> -struct priv { - struct crypto_cipher *child; - struct lrw_table_ctx table; +#define LRW_BLOCK_SIZE 16 + +struct lrw_tfm_ctx { + struct crypto_skcipher *child; + + /* + * optimizes multiplying a random (non incrementing, as at the + * start of a new sector) value with key2, we could also have + * used 4k optimization tables or no optimization at all. In the + * latter case we would have to store key2 here + */ + struct gf128mul_64k *table; + + /* + * stores: + * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, + * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } + * key2*{ 0,0,...1,1,1,1,1 }, etc + * needed for optimized multiplication of incrementing values + * with key2 + */ + be128 mulinc[128]; +}; + +struct lrw_request_ctx { + be128 t; + struct skcipher_request subreq; }; -static inline void setbit128_bbe(void *b, int bit) +static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN @@ -45,11 +65,23 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child = ctx->child; + int err, bsize = LRW_BLOCK_SIZE; + const u8 *tweak = key + keylen - bsize; be128 tmp = { 0 }; int i; + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen - bsize); + if (err) + return err; + if (ctx->table) gf128mul_free_64k(ctx->table); @@ -60,343 +92,338 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) /* initialize optimization table */ for (i = 0; i < 128; i++) { - setbit128_bbe(&tmp, i); + lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } return 0; } -EXPORT_SYMBOL_GPL(lrw_init_table); - -void lrw_free_table(struct lrw_table_ctx *ctx) -{ - if (ctx->table) - gf128mul_free_64k(ctx->table); -} -EXPORT_SYMBOL_GPL(lrw_free_table); -static int setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) +/* + * Returns the number of trailing '1' bits in the words of the counter, which is + * represented by 4 32-bit words, arranged from least to most significant. + * At the same time, increments the counter by one. + * + * For example: + * + * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; + * int i = lrw_next_index(&counter); + * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } + */ +static int lrw_next_index(u32 *counter) { - struct priv *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err, bsize = LRW_BLOCK_SIZE; - const u8 *tweak = key + keylen - bsize; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen - bsize); - if (err) - return err; - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return lrw_init_table(&ctx->table, tweak); -} + int i, res = 0; -struct sinfo { - be128 t; - struct crypto_tfm *tfm; - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); -}; + for (i = 0; i < 4; i++) { + if (counter[i] + 1 != 0) + return res + ffz(counter[i]++); -static inline void inc(be128 *iv) -{ - be64_add_cpu(&iv->b, 1); - if (!iv->b) - be64_add_cpu(&iv->a, 1); -} + counter[i] = 0; + res += 32; + } -static inline void lrw_round(struct sinfo *s, void *dst, const void *src) -{ - be128_xor(dst, &s->t, src); /* PP <- T xor P */ - s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ - be128_xor(dst, dst, &s->t); /* C <- T xor CC */ + /* + * If we get here, then x == 128 and we are incrementing the counter + * from all ones to all zeros. This means we must return index 127, i.e. + * the one corresponding to key2*{ 1,...,1 }. + */ + return 127; } -/* this returns the number of consequative 1 bits starting - * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ -static inline int get_index128(be128 *block) +/* + * We compute the tweak masks twice (both before and after the ECB encryption or + * decryption) to avoid having to allocate a temporary buffer and/or make + * mutliple calls to the 'ecb(..)' instance, which usually would be slower than + * just doing the lrw_next_index() calls again. + */ +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { - int x; - __be32 *p = (__be32 *) block; - - for (p += 3, x = 0; x < 128; p--, x += 32) { - u32 val = be32_to_cpup(p); - - if (!~val) - continue; + const int bs = LRW_BLOCK_SIZE; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); + be128 t = rctx->t; + struct skcipher_walk w; + __be32 *iv; + u32 counter[4]; + int err; - return x + ffz(val); + if (second_pass) { + req = &rctx->subreq; + /* set to our TFM to enforce correct alignment: */ + skcipher_request_set_tfm(req, tfm); } - return x; -} - -static int crypt(struct blkcipher_desc *d, - struct blkcipher_walk *w, struct priv *ctx, - void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) -{ - int err; - unsigned int avail; - const int bs = LRW_BLOCK_SIZE; - struct sinfo s = { - .tfm = crypto_cipher_tfm(ctx->child), - .fn = fn - }; - be128 *iv; - u8 *wsrc; - u8 *wdst; - - err = blkcipher_walk_virt(d, w); - if (!(avail = w->nbytes)) + err = skcipher_walk_virt(&w, req, false); + if (err) return err; - wsrc = w->src.virt.addr; - wdst = w->dst.virt.addr; - - /* calculate first value of T */ - iv = (be128 *)w->iv; - s.t = *iv; + iv = (__be32 *)w.iv; + counter[0] = be32_to_cpu(iv[3]); + counter[1] = be32_to_cpu(iv[2]); + counter[2] = be32_to_cpu(iv[1]); + counter[3] = be32_to_cpu(iv[0]); - /* T <- I*Key2 */ - gf128mul_64k_bbe(&s.t, ctx->table.table); + while (w.nbytes) { + unsigned int avail = w.nbytes; + const be128 *wsrc; + be128 *wdst; - goto first; + wsrc = w.src.virt.addr; + wdst = w.dst.virt.addr; - for (;;) { do { + be128_xor(wdst++, &t, wsrc++); + /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&s.t, &s.t, - &ctx->table.mulinc[get_index128(iv)]); - inc(iv); - -first: - lrw_round(&s, wdst, wsrc); - - wsrc += bs; - wdst += bs; + be128_xor(&t, &t, + &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); - err = blkcipher_walk_done(d, w, avail); - if (!(avail = w->nbytes)) - break; + if (second_pass && w.nbytes == w.total) { + iv[0] = cpu_to_be32(counter[3]); + iv[1] = cpu_to_be32(counter[2]); + iv[2] = cpu_to_be32(counter[1]); + iv[3] = cpu_to_be32(counter[0]); + } - wsrc = w->src.virt.addr; - wdst = w->dst.virt.addr; + err = skcipher_walk_done(&w, avail); } return err; } -static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int lrw_xor_tweak_pre(struct skcipher_request *req) { - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk w; + return lrw_xor_tweak(req, false); +} - blkcipher_walk_init(&w, dst, src, nbytes); - return crypt(desc, &w, ctx, - crypto_cipher_alg(ctx->child)->cia_encrypt); +static int lrw_xor_tweak_post(struct skcipher_request *req) +{ + return lrw_xor_tweak(req, true); } -static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static void lrw_crypt_done(void *data, int err) { - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk w; + struct skcipher_request *req = data; + + if (!err) { + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); + + rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = lrw_xor_tweak_post(req); + } - blkcipher_walk_init(&w, dst, src, nbytes); - return crypt(desc, &w, ctx, - crypto_cipher_alg(ctx->child)->cia_decrypt); + skcipher_request_complete(req, err); } -int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, - struct scatterlist *ssrc, unsigned int nbytes, - struct lrw_crypt_req *req) +static void lrw_init_crypt(struct skcipher_request *req) { - const unsigned int bsize = LRW_BLOCK_SIZE; - const unsigned int max_blks = req->tbuflen / bsize; - struct lrw_table_ctx *ctx = req->table_ctx; - struct blkcipher_walk walk; - unsigned int nblocks; - be128 *iv, *src, *dst, *t; - be128 *t_buf = req->tbuf; - int err, i; - - BUG_ON(max_blks < 1); - - blkcipher_walk_init(&walk, sdst, ssrc, nbytes); - - err = blkcipher_walk_virt(desc, &walk); - nbytes = walk.nbytes; - if (!nbytes) - return err; - - nblocks = min(walk.nbytes / bsize, max_blks); - src = (be128 *)walk.src.virt.addr; - dst = (be128 *)walk.dst.virt.addr; + const struct lrw_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq = &rctx->subreq; + + skcipher_request_set_tfm(subreq, ctx->child); + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, + req); + /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ + skcipher_request_set_crypt(subreq, req->dst, req->dst, + req->cryptlen, req->iv); /* calculate first value of T */ - iv = (be128 *)walk.iv; - t_buf[0] = *iv; + memcpy(&rctx->t, req->iv, sizeof(rctx->t)); /* T <- I*Key2 */ - gf128mul_64k_bbe(&t_buf[0], ctx->table); + gf128mul_64k_bbe(&rctx->t, ctx->table); +} - i = 0; - goto first; +static int lrw_encrypt(struct skcipher_request *req) +{ + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq = &rctx->subreq; - for (;;) { - do { - for (i = 0; i < nblocks; i++) { - /* T <- I*Key2, using the optimization - * discussed in the specification */ - be128_xor(&t_buf[i], t, - &ctx->mulinc[get_index128(iv)]); - inc(iv); -first: - t = &t_buf[i]; - - /* PP <- T xor P */ - be128_xor(dst + i, t, src + i); - } - - /* CC <- E(Key2,PP) */ - req->crypt_fn(req->crypt_ctx, (u8 *)dst, - nblocks * bsize); - - /* C <- T xor CC */ - for (i = 0; i < nblocks; i++) - be128_xor(dst + i, dst + i, &t_buf[i]); - - src += nblocks; - dst += nblocks; - nbytes -= nblocks * bsize; - nblocks = min(nbytes / bsize, max_blks); - } while (nblocks > 0); - - err = blkcipher_walk_done(desc, &walk, nbytes); - nbytes = walk.nbytes; - if (!nbytes) - break; - - nblocks = min(nbytes / bsize, max_blks); - src = (be128 *)walk.src.virt.addr; - dst = (be128 *)walk.dst.virt.addr; - } + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: + crypto_skcipher_encrypt(subreq) ?: + lrw_xor_tweak_post(req); +} - return err; +static int lrw_decrypt(struct skcipher_request *req) +{ + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq = &rctx->subreq; + + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: + crypto_skcipher_decrypt(subreq) ?: + lrw_xor_tweak_post(req); } -EXPORT_SYMBOL_GPL(lrw_crypt); -static int init_tfm(struct crypto_tfm *tfm) +static int lrw_init_tfm(struct crypto_skcipher *tfm) { - struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct priv *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *cipher; - cipher = crypto_spawn_cipher(spawn); + cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); - if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { - *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; - crypto_free_cipher(cipher); - return -EINVAL; - } - ctx->child = cipher; + + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + + sizeof(struct lrw_request_ctx)); + return 0; } -static void exit_tfm(struct crypto_tfm *tfm) +static void lrw_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_tfm_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - lrw_free_table(&ctx->table); - crypto_free_cipher(ctx->child); + if (ctx->table) + gf128mul_free_64k(ctx->table); + crypto_free_skcipher(ctx->child); +} + +static void lrw_free_instance(struct skcipher_instance *inst) +{ + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); } -static struct crypto_instance *alloc(struct rtattr **tb) +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; - struct crypto_alg *alg; + struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; + struct skcipher_instance *inst; + const char *cipher_name; + char ecb_name[CRYPTO_MAX_ALG_NAME]; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) - return ERR_PTR(err); + return err; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); - inst = crypto_alloc_instance("lrw", alg); - if (IS_ERR(inst)) - goto out_put_alg; + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; + spawn = skcipher_instance_ctx(inst); - if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; - else inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; + err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), + cipher_name, 0, mask); + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { + err = -ENAMETOOLONG; + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; - if (!(alg->cra_blocksize % 4)) - inst->alg.cra_alignmask |= 3; - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = - alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; - inst->alg.cra_blkcipher.max_keysize = - alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; + err = crypto_grab_skcipher(spawn, + skcipher_crypto_instance(inst), + ecb_name, 0, mask); + } - inst->alg.cra_ctxsize = sizeof(struct priv); + if (err) + goto err_free_inst; - inst->alg.cra_init = init_tfm; - inst->alg.cra_exit = exit_tfm; + alg = crypto_spawn_skcipher_alg_common(spawn); - inst->alg.cra_blkcipher.setkey = setkey; - inst->alg.cra_blkcipher.encrypt = encrypt; - inst->alg.cra_blkcipher.decrypt = decrypt; + err = -EINVAL; + if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) + goto err_free_inst; -out_put_alg: - crypto_mod_put(alg); - return inst; -} + if (alg->ivsize) + goto err_free_inst; -static void free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", + &alg->base); + if (err) + goto err_free_inst; + + err = -EINVAL; + cipher_name = alg->base.cra_name; + + /* Alas we screwed up the naming so we have to mangle the + * cipher name. + */ + if (!memcmp(cipher_name, "ecb(", 4)) { + int len; + + len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); + if (len < 2) + goto err_free_inst; + + if (ecb_name[len - 1] != ')') + goto err_free_inst; + + ecb_name[len - 1] = 0; + + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { + err = -ENAMETOOLONG; + goto err_free_inst; + } + } else + goto err_free_inst; + + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask | + (__alignof__(be128) - 1); + + inst->alg.ivsize = LRW_BLOCK_SIZE; + inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE; + inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE; + + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); + + inst->alg.init = lrw_init_tfm; + inst->alg.exit = lrw_exit_tfm; + + inst->alg.setkey = lrw_setkey; + inst->alg.encrypt = lrw_encrypt; + inst->alg.decrypt = lrw_decrypt; + + inst->free = lrw_free_instance; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +err_free_inst: + lrw_free_instance(inst); + } + return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template lrw_tmpl = { .name = "lrw", - .alloc = alloc, - .free = free, + .create = lrw_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init lrw_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&lrw_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit lrw_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&lrw_tmpl); } -module_init(crypto_module_init); -module_exit(crypto_module_exit); +module_init(lrw_module_init); +module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); +MODULE_ALIAS_CRYPTO("lrw"); +MODULE_SOFTDEP("pre: ecb"); |
