diff options
Diffstat (limited to 'drivers/crypto/padlock-aes.c')
| -rw-r--r-- | drivers/crypto/padlock-aes.c | 223 |
1 files changed, 98 insertions, 125 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 633ba945e153..1be549a07a21 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Cryptographic API. * @@ -9,6 +10,7 @@ #include <crypto/algapi.h> #include <crypto/aes.h> +#include <crypto/internal/skcipher.h> #include <crypto/padlock.h> #include <linux/module.h> #include <linux/init.h> @@ -16,13 +18,14 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/slab.h> #include <asm/cpu_device_id.h> #include <asm/byteorder.h> #include <asm/processor.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> /* * Number of data blocks actually fetched for each xcrypt insn. @@ -96,9 +99,9 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) return aes_ctx_common(crypto_tfm_ctx(tfm)); } -static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) +static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) { - return aes_ctx_common(crypto_blkcipher_ctx(tfm)); + return aes_ctx_common(crypto_skcipher_ctx(tfm)); } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, @@ -106,14 +109,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct aes_ctx *ctx = aes_ctx(tfm); const __le32 *key = (const __le32 *)in_key; - u32 *flags = &tfm->crt_flags; struct crypto_aes_ctx gen_aes; int cpu; - if (key_len % 8) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len % 8) return -EINVAL; - } /* * If the hardware is capable of generating the extended key @@ -144,10 +144,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ctx->cword.encrypt.keygen = 1; ctx->cword.decrypt.keygen = 1; - if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (aes_expandkey(&gen_aes, in_key, key_len)) return -EINVAL; - } memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); @@ -161,6 +159,12 @@ ok: return 0; } +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ @@ -183,8 +187,8 @@ static inline void padlock_store_cword(struct cword *cword) /* * While the padlock instructions don't use FP/SSE registers, they - * generate a spurious DNA fault when cr0.ts is '1'. These instructions - * should be used only inside the irq_ts_save/restore() context + * generate a spurious DNA fault when CR0.TS is '1'. Fortunately, + * the kernel doesn't use CR0.TS. */ static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, @@ -238,7 +242,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. * We could avoid some copying here but it's probably not worth it. */ - if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) { + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { ecb_crypt_copy(in, out, key, cword, count); return; } @@ -250,7 +254,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) { /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ - if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE)) + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) return cbc_crypt_copy(in, out, key, iv, cword, count); return rep_xcrypt_cbc(in, out, key, iv, cword, count); @@ -266,6 +270,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -273,7 +279,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count - initial)); + : "d"(control_word), "b"(key), "c"(count)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -284,6 +290,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (count < cbc_fetch_blocks) return cbc_crypt(input, output, key, iv, control_word, count); + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) @@ -291,31 +299,25 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count-initial)); + : "d" (control_word), "b" (key), "c" (count)); return iv; } -static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - ts_state = irq_ts_save(); ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } -static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - ts_state = irq_ts_save(); ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -333,176 +335,147 @@ static struct crypto_alg aes_alg = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, } } }; -static int ecb_aes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_aes_encrypt(struct skcipher_request *req) { - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - ts_state = irq_ts_save(); - while ((nbytes = walk.nbytes)) { + while ((nbytes = walk.nbytes) != 0) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); return err; } -static int ecb_aes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_aes_decrypt(struct skcipher_request *req) { - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - int ts_state; padlock_reset_key(&ctx->cword.decrypt); - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - ts_state = irq_ts_save(); - while ((nbytes = walk.nbytes)) { + while ((nbytes = walk.nbytes) != 0) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, &ctx->cword.decrypt, nbytes / AES_BLOCK_SIZE); nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); return err; } -static struct crypto_alg ecb_aes_alg = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-padlock", - .cra_priority = PADLOCK_COMPOSITE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aes_ctx), - .cra_alignmask = PADLOCK_ALIGNMENT - 1, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = aes_set_key, - .encrypt = ecb_aes_encrypt, - .decrypt = ecb_aes_decrypt, - } - } +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, }; -static int cbc_aes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_aes_encrypt(struct skcipher_request *req) { - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - ts_state = irq_ts_save(); - while ((nbytes = walk.nbytes)) { + while ((nbytes = walk.nbytes) != 0) { u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, walk.iv, &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); memcpy(walk.iv, iv, AES_BLOCK_SIZE); nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.decrypt); return err; } -static int cbc_aes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_aes_decrypt(struct skcipher_request *req) { - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - ts_state = irq_ts_save(); - while ((nbytes = walk.nbytes)) { + while ((nbytes = walk.nbytes) != 0) { padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, walk.iv, &ctx->cword.decrypt, nbytes / AES_BLOCK_SIZE); nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } - irq_ts_restore(ts_state); - padlock_store_cword(&ctx->cword.encrypt); return err; } -static struct crypto_alg cbc_aes_alg = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-padlock", - .cra_priority = PADLOCK_COMPOSITE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aes_ctx), - .cra_alignmask = PADLOCK_ALIGNMENT - 1, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = aes_set_key, - .encrypt = cbc_aes_encrypt, - .decrypt = cbc_aes_decrypt, - } - } +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, }; -static struct x86_cpu_id padlock_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), +static const struct x86_cpu_id padlock_cpu_id[] = { + X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); @@ -515,23 +488,23 @@ static int __init padlock_init(void) if (!x86_match_cpu(padlock_cpu_id)) return -ENODEV; - if (!cpu_has_xcrypt_enabled) { + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); return -ENODEV; } - if ((ret = crypto_register_alg(&aes_alg))) + if ((ret = crypto_register_alg(&aes_alg)) != 0) goto aes_err; - if ((ret = crypto_register_alg(&ecb_aes_alg))) + if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0) goto ecb_aes_err; - if ((ret = crypto_register_alg(&cbc_aes_alg))) + if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0) goto cbc_aes_err; printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); @@ -541,7 +514,7 @@ out: return ret; cbc_aes_err: - crypto_unregister_alg(&ecb_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); ecb_aes_err: crypto_unregister_alg(&aes_alg); aes_err: @@ -551,8 +524,8 @@ aes_err: static void __exit padlock_fini(void) { - crypto_unregister_alg(&cbc_aes_alg); - crypto_unregister_alg(&ecb_aes_alg); + crypto_unregister_skcipher(&cbc_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); crypto_unregister_alg(&aes_alg); } @@ -563,4 +536,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Ludvig"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); |
