summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/842.c81
-rw-r--r--crypto/Kconfig32
-rw-r--r--crypto/Makefile6
-rw-r--r--crypto/acompress.c169
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/algif_aead.c5
-rw-r--r--crypto/algif_skcipher.c4
-rw-r--r--crypto/api.c22
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/authencesn.c8
-rw-r--r--crypto/cbc.c269
-rw-r--r--crypto/ccm.c8
-rw-r--r--crypto/chacha20poly1305.c8
-rw-r--r--crypto/cipher.c4
-rw-r--r--crypto/cmac.c14
-rw-r--r--crypto/compress.c4
-rw-r--r--crypto/cryptd.c286
-rw-r--r--crypto/crypto_engine.c26
-rw-r--r--crypto/crypto_user.c19
-rw-r--r--crypto/ctr.c8
-rw-r--r--crypto/cts.c8
-rw-r--r--crypto/deflate.c111
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c1
-rw-r--r--crypto/gcm.c10
-rw-r--r--crypto/gf128mul.c59
-rw-r--r--crypto/internal.h3
-rw-r--r--crypto/jitterentropy-kcapi.c1
-rw-r--r--crypto/lrw.c507
-rw-r--r--crypto/lz4.c91
-rw-r--r--crypto/lz4hc.c92
-rw-r--r--crypto/lzo.c97
-rw-r--r--crypto/pcbc.c201
-rw-r--r--crypto/poly1305_generic.c34
-rw-r--r--crypto/scompress.c356
-rw-r--r--crypto/simd.c226
-rw-r--r--crypto/skcipher.c540
-rw-r--r--crypto/testmgr.c318
-rw-r--r--crypto/testmgr.h70
-rw-r--r--crypto/xts.c547
40 files changed, 3300 insertions, 963 deletions
diff --git a/crypto/842.c b/crypto/842.c
index 98e387efb8c8..bc26dc942821 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -31,11 +31,46 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/sw842.h>
+#include <crypto/internal/scompress.h>
struct crypto842_ctx {
- char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */
+ void *wmem; /* working memory for compress */
};
+static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
+static int crypto842_init(struct crypto_tfm *tfm)
+{
+ struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->wmem = crypto842_alloc_ctx(NULL);
+ if (IS_ERR(ctx->wmem))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ kfree(ctx);
+}
+
+static void crypto842_exit(struct crypto_tfm *tfm)
+{
+ struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto842_free_ctx(NULL, ctx->wmem);
+}
+
static int crypto842_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
@@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm,
return sw842_compress(src, slen, dst, dlen, ctx->wmem);
}
+static int crypto842_scompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return sw842_compress(src, slen, dst, dlen, ctx);
+}
+
static int crypto842_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
@@ -52,6 +94,13 @@ static int crypto842_decompress(struct crypto_tfm *tfm,
return sw842_decompress(src, slen, dst, dlen);
}
+static int crypto842_sdecompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return sw842_decompress(src, slen, dst, dlen);
+}
+
static struct crypto_alg alg = {
.cra_name = "842",
.cra_driver_name = "842-generic",
@@ -59,20 +108,48 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct crypto842_ctx),
.cra_module = THIS_MODULE,
+ .cra_init = crypto842_init,
+ .cra_exit = crypto842_exit,
.cra_u = { .compress = {
.coa_compress = crypto842_compress,
.coa_decompress = crypto842_decompress } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = crypto842_alloc_ctx,
+ .free_ctx = crypto842_free_ctx,
+ .compress = crypto842_scompress,
+ .decompress = crypto842_sdecompress,
+ .base = {
+ .cra_name = "842",
+ .cra_driver_name = "842-scomp",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init crypto842_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int ret;
+
+ ret = crypto_register_alg(&alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg);
+ return ret;
+ }
+
+ return ret;
}
module_init(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_scomp(&scomp);
}
module_exit(crypto842_mod_exit);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 84d71482bf08..160f08e721cc 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -24,7 +24,7 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
- depends on MODULE_SIG
+ depends on (MODULE_SIG || !MODULES)
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
@@ -102,6 +102,15 @@ config CRYPTO_KPP
select CRYPTO_ALGAPI
select CRYPTO_KPP2
+config CRYPTO_ACOMP2
+ tristate
+ select CRYPTO_ALGAPI2
+
+config CRYPTO_ACOMP
+ tristate
+ select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
+
config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
select CRYPTO_KPP2
+ select CRYPTO_ACOMP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@@ -236,10 +246,14 @@ config CRYPTO_ABLK_HELPER
tristate
select CRYPTO_CRYPTD
+config CRYPTO_SIMD
+ tristate
+ select CRYPTO_CRYPTD
+
config CRYPTO_GLUE_HELPER_X86
tristate
depends on X86
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
config CRYPTO_ENGINE
tristate
@@ -437,7 +451,7 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation.
Module will be crc32c-intel.
-config CRYPT_CRC32C_VPMSUM
+config CRYPTO_CRC32C_VPMSUM
tristate "CRC32c CRC algorithm (powerpc64)"
depends on PPC64 && ALTIVEC
select CRYPTO_HASH
@@ -928,14 +942,13 @@ config CRYPTO_AES_X86_64
config CRYPTO_AES_NI_INTEL
tristate "AES cipher algorithms (AES-NI)"
depends on X86
+ select CRYPTO_AEAD
select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86 if 64BIT
- select CRYPTO_LRW
- select CRYPTO_XTS
+ select CRYPTO_SIMD
help
Use Intel AES-NI instructions for AES algorithm.
@@ -1568,6 +1581,7 @@ comment "Compression"
config CRYPTO_DEFLATE
tristate "Deflate compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select ZLIB_INFLATE
select ZLIB_DEFLATE
help
@@ -1579,6 +1593,7 @@ config CRYPTO_DEFLATE
config CRYPTO_LZO
tristate "LZO compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select LZO_COMPRESS
select LZO_DECOMPRESS
help
@@ -1587,6 +1602,7 @@ config CRYPTO_LZO
config CRYPTO_842
tristate "842 compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select 842_COMPRESS
select 842_DECOMPRESS
help
@@ -1595,6 +1611,7 @@ config CRYPTO_842
config CRYPTO_LZ4
tristate "LZ4 compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select LZ4_COMPRESS
select LZ4_DECOMPRESS
help
@@ -1603,6 +1620,7 @@ config CRYPTO_LZ4
config CRYPTO_LZ4HC
tristate "LZ4HC compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select LZ4HC_COMPRESS
select LZ4_DECOMPRESS
help
diff --git a/crypto/Makefile b/crypto/Makefile
index bd6a029094e6..b8f0e3eb0791 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -51,6 +51,10 @@ rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
+crypto_acompress-y := acompress.o
+crypto_acompress-y += scompress.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o
+
cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -139,3 +143,5 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
+crypto_simd-y := simd.o
+obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
new file mode 100644
index 000000000000..887783d8e9a9
--- /dev/null
+++ b/crypto/acompress.c
@@ -0,0 +1,169 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_acomp_type;
+
+#ifdef CONFIG_NET
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_acomp racomp;
+
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+ sizeof(struct crypto_report_acomp), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : acomp\n");
+}
+
+static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+ struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+ alg->exit(acomp);
+}
+
+static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+ struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+ return crypto_init_scomp_ops_async(tfm);
+
+ acomp->compress = alg->compress;
+ acomp->decompress = alg->decompress;
+ acomp->dst_free = alg->dst_free;
+ acomp->reqsize = alg->reqsize;
+
+ if (alg->exit)
+ acomp->base.exit = crypto_acomp_exit_tfm;
+
+ if (alg->init)
+ return alg->init(acomp);
+
+ return 0;
+}
+
+static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
+{
+ int extsize = crypto_alg_extsize(alg);
+
+ if (alg->cra_type != &crypto_acomp_type)
+ extsize += sizeof(struct crypto_scomp *);
+
+ return extsize;
+}
+
+static const struct crypto_type crypto_acomp_type = {
+ .extsize = crypto_acomp_extsize,
+ .init_tfm = crypto_acomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_acomp_show,
+#endif
+ .report = crypto_acomp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
+ .type = CRYPTO_ALG_TYPE_ACOMPRESS,
+ .tfmsize = offsetof(struct crypto_acomp, base),
+};
+
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req *req;
+
+ req = __acomp_request_alloc(acomp);
+ if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
+ return crypto_acomp_scomp_alloc_ctx(req);
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_alloc);
+
+void acomp_request_free(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+
+ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+ crypto_acomp_scomp_free_ctx(req);
+
+ if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
+ acomp->dst_free(req->dst);
+ req->dst = NULL;
+ }
+
+ __acomp_request_free(req);
+}
+EXPORT_SYMBOL_GPL(acomp_request_free);
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ base->cra_type = &crypto_acomp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_acomp);
+
+int crypto_unregister_acomp(struct acomp_alg *alg)
+{
+ return crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 6e39d9c05b98..ccb85e1798f2 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -247,12 +247,8 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
type = alg->cra_flags;
- /* This piece of crap needs to disappear into per-type test hooks. */
- if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
- CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
- ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
- alg->cra_ablkcipher.ivsize))
+ /* Do not test internal algorithms. */
+ if (type & CRYPTO_ALG_INTERNAL)
type |= CRYPTO_ALG_TESTED;
param->type = type;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 235f54d4f8a9..668ef402c6eb 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -454,12 +454,13 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */
- areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
+ areq->tsgl = sock_kmalloc(sk,
+ sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
GFP_KERNEL);
if (unlikely(!areq->tsgl))
goto free;
- sg_init_table(areq->tsgl, sgl->cur);
+ sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
for (i = 0; i < sgl->cur; i++)
sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
sgl->sg[i].length, sgl->sg[i].offset);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 1e38aaa8303e..a9e79d8eff87 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -566,8 +566,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
- if (!tmp)
+ if (!tmp) {
+ err = -ENOMEM;
goto free;
+ }
sg_init_table(tmp, tx_nents * 2);
for (x = 0; x < tx_nents; x++)
diff --git a/crypto/api.c b/crypto/api.c
index bbc147cb5dec..b16ce1653284 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -211,8 +211,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
if (!name)
return ERR_PTR(-ENOENT);
+ type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
- type &= mask;
alg = crypto_alg_lookup(name, type, mask);
if (!alg) {
@@ -310,24 +310,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
- if (type) {
- if (tfm->exit)
- tfm->exit(tfm);
- return;
- }
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- crypto_exit_cipher_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- crypto_exit_compress_ops(tfm);
- break;
-
- default:
- BUG();
- }
+ if (type && tfm->exit)
+ tfm->exit(tfm);
}
static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a7e1ac786c5d..875470b0e026 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -324,7 +324,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(auth))
return PTR_ERR(auth);
- enc = crypto_spawn_skcipher2(&ictx->enc);
+ enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
@@ -420,9 +420,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_auth;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 121010ac9962..6f8f6b86bfe2 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -342,7 +342,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(auth))
return PTR_ERR(auth);
- enc = crypto_spawn_skcipher2(&ictx->enc);
+ enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
@@ -441,9 +441,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_auth;
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 780ee27b2d43..68f751a41a84 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -1,7 +1,7 @@
/*
* CBC: Cipher Block Chaining mode
*
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -10,191 +10,78 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/cbc.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_cbc_ctx {
struct crypto_cipher *child;
};
-static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
-static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
+static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(iv, src, bsize);
- fn(crypto_cipher_tfm(tfm), dst, iv);
- memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- return nbytes;
-}
-
-static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
-{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(src, iv, bsize);
- fn(crypto_cipher_tfm(tfm), src, src);
- iv = src;
-
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
+ crypto_cipher_encrypt_one(ctx->child, dst, src);
}
-static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_cbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
- else
- nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
+ return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
}
-static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
+static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- fn(crypto_cipher_tfm(tfm), dst, src);
- crypto_xor(dst, iv, bsize);
- iv = src;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
-{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 last_iv[bsize];
-
- /* Start of the last block. */
- src += nbytes - (nbytes & (bsize - 1)) - bsize;
- memcpy(last_iv, src, bsize);
-
- for (;;) {
- fn(crypto_cipher_tfm(tfm), src, src);
- if ((nbytes -= bsize) < bsize)
- break;
- crypto_xor(src, src - bsize, bsize);
- src -= bsize;
- }
-
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
-
- return nbytes;
+ crypto_cipher_decrypt_one(ctx->child, dst, src);
}
-static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_cbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
- else
- nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while (walk.nbytes) {
+ err = crypto_cbc_decrypt_blocks(&walk, tfm,
+ crypto_cbc_decrypt_one);
+ err = skcipher_walk_done(&walk, err);
}
return err;
}
-static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
@@ -205,72 +92,94 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
+static void crypto_cbc_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct skcipher_instance *inst;
+ struct crypto_spawn *spawn;
struct crypto_alg *alg;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
if (err)
- return ERR_PTR(err);
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
+ err = PTR_ERR(alg);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ goto err_free_inst;
- inst = ERR_PTR(-EINVAL);
- if (!is_power_of_2(alg->cra_blocksize))
- goto out_put_alg;
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
- inst = crypto_alloc_instance("cbc", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ err = -EINVAL;
+ if (!is_power_of_2(alg->cra_blocksize))
+ goto err_drop_spawn;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
/* We access the data as u32s when xoring. */
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
- inst->alg.cra_init = crypto_cbc_init_tfm;
- inst->alg.cra_exit = crypto_cbc_exit_tfm;
+ inst->alg.init = crypto_cbc_init_tfm;
+ inst->alg.exit = crypto_cbc_exit_tfm;
- inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
+ inst->alg.setkey = crypto_cbc_setkey;
+ inst->alg.encrypt = crypto_cbc_encrypt;
+ inst->alg.decrypt = crypto_cbc_decrypt;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ inst->free = crypto_cbc_free;
-static void crypto_cbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_cbc_tmpl = {
.name = "cbc",
- .alloc = crypto_cbc_alloc,
- .free = crypto_cbc_free,
+ .create = crypto_cbc_create,
.module = THIS_MODULE,
};
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 006d8575ef5c..26b924d1e582 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctr = crypto_spawn_skcipher2(&ictx->ctr);
+ ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_cipher;
@@ -544,9 +544,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_cipher;
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index e899ef51dc8e..db1bc3147bc4 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
if (IS_ERR(poly))
return PTR_ERR(poly);
- chacha = crypto_spawn_skcipher2(&ictx->chacha);
+ chacha = crypto_spawn_skcipher(&ictx->chacha);
if (IS_ERR(chacha)) {
crypto_free_ahash(poly);
return PTR_ERR(chacha);
@@ -625,9 +625,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_poly;
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 39541e0e537d..94fa3551476b 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -116,7 +116,3 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
return 0;
}
-
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
-{
-}
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 7a8bfbd548f6..04080dca8f0c 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -57,7 +57,8 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
unsigned int bs = crypto_shash_blocksize(parent);
- __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
+ __be64 *consts = PTR_ALIGN((void *)ctx->ctx,
+ (alignmask | (__alignof__(__be64) - 1)) + 1);
u64 _const[2];
int i, err = 0;
u8 msb_mask, gfmask;
@@ -173,7 +174,8 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1);
+ u8 *consts = PTR_ALIGN((void *)tctx->ctx,
+ (alignmask | (__alignof__(__be64) - 1)) + 1);
u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
u8 *prev = odds + bs;
unsigned int offset = 0;
@@ -243,6 +245,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
case 8:
break;
default:
+ err = -EINVAL;
goto out_put_alg;
}
@@ -257,7 +260,8 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto out_free_inst;
- alignmask = alg->cra_alignmask | (sizeof(long) - 1);
+ /* We access the data as u32s when xoring. */
+ alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
@@ -269,7 +273,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+ alg->cra_blocksize * 2;
inst->alg.base.cra_ctxsize =
- ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1)
+ ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment())
+ + ((alignmask | (__alignof__(__be64) - 1)) &
+ ~(crypto_tfm_ctx_alignment() - 1))
+ alg->cra_blocksize * 2;
inst->alg.base.cra_init = cmac_init_tfm;
diff --git a/crypto/compress.c b/crypto/compress.c
index c33f0763a956..f2d522924a07 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -42,7 +42,3 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm)
return 0;
}
-
-void crypto_exit_compress_ops(struct crypto_tfm *tfm)
-{
-}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0c654e59f215..0508c48a45c4 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -17,9 +17,9 @@
*
*/
-#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/atomic.h>
@@ -48,6 +48,11 @@ struct cryptd_instance_ctx {
struct cryptd_queue *queue;
};
+struct skcipherd_instance_ctx {
+ struct crypto_skcipher_spawn spawn;
+ struct cryptd_queue *queue;
+};
+
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn;
struct cryptd_queue *queue;
@@ -67,6 +72,15 @@ struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
+struct cryptd_skcipher_ctx {
+ atomic_t refcnt;
+ struct crypto_skcipher *child;
+};
+
+struct cryptd_skcipher_request_ctx {
+ crypto_completion_t complete;
+};
+
struct cryptd_hash_ctx {
atomic_t refcnt;
struct crypto_shash *child;
@@ -122,7 +136,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
{
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
- struct crypto_tfm *tfm;
atomic_t *refcnt;
bool may_backlog;
@@ -141,7 +154,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
if (!atomic_read(refcnt))
goto out_put_cpu;
- tfm = request->tfm;
atomic_inc(refcnt);
out_put_cpu:
@@ -432,6 +444,216 @@ out_put_alg:
return err;
}
+static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
+ int err;
+
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ int refcnt = atomic_read(&ctx->refcnt);
+
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+
+ if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(tfm);
+}
+
+static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
+ int err)
+{
+ struct skcipher_request *req = skcipher_request_cast(base);
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ SKCIPHER_REQUEST_ON_STACK(subreq, child);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ req->base.complete = rctx->complete;
+
+out:
+ cryptd_skcipher_complete(req, err);
+}
+
+static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
+ int err)
+{
+ struct skcipher_request *req = skcipher_request_cast(base);
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ SKCIPHER_REQUEST_ON_STACK(subreq, child);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ req->base.complete = rctx->complete;
+
+out:
+ cryptd_skcipher_complete(req, err);
+}
+
+static int cryptd_skcipher_enqueue(struct skcipher_request *req,
+ crypto_completion_t compl)
+{
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_queue *queue;
+
+ queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
+ rctx->complete = req->base.complete;
+ req->base.complete = compl;
+
+ return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
+{
+ return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
+}
+
+static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
+{
+ return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
+}
+
+static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
+ struct crypto_skcipher_spawn *spawn = &ictx->spawn;
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *cipher;
+
+ cipher = crypto_spawn_skcipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ crypto_skcipher_set_reqsize(
+ tfm, sizeof(struct cryptd_skcipher_request_ctx));
+ return 0;
+}
+
+static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
+}
+
+static void cryptd_skcipher_free(struct skcipher_instance *inst)
+{
+ struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->spawn);
+}
+
+static int cryptd_create_skcipher(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct skcipherd_instance_ctx *ctx;
+ struct skcipher_instance *inst;
+ struct skcipher_alg *alg;
+ const char *name;
+ u32 type;
+ u32 mask;
+ int err;
+
+ type = 0;
+ mask = CRYPTO_ALG_ASYNC;
+
+ cryptd_check_internal(tb, &type, &mask);
+
+ name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = skcipher_instance_ctx(inst);
+ ctx->queue = queue;
+
+ crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
+ if (err)
+ goto out_free_inst;
+
+ alg = crypto_spawn_skcipher_alg(&ctx->spawn);
+ err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
+ if (err)
+ goto out_drop_skcipher;
+
+ inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
+
+ inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+
+ inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
+
+ inst->alg.init = cryptd_skcipher_init_tfm;
+ inst->alg.exit = cryptd_skcipher_exit_tfm;
+
+ inst->alg.setkey = cryptd_skcipher_setkey;
+ inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
+ inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
+
+ inst->free = cryptd_skcipher_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err) {
+out_drop_skcipher:
+ crypto_drop_skcipher(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+ return err;
+}
+
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -895,7 +1117,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_create_blkcipher(tmpl, tb, &queue);
+ if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER)
+ return cryptd_create_blkcipher(tmpl, tb, &queue);
+
+ return cryptd_create_skcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_create_hash(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_AEAD:
@@ -985,6 +1211,58 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
+struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct cryptd_skcipher_ctx *ctx;
+ struct crypto_skcipher *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+
+ tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx = crypto_skcipher_ctx(tfm);
+ atomic_set(&ctx->refcnt, 1);
+
+ return container_of(tfm, struct cryptd_skcipher, base);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
+
+struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
+
+bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+ return atomic_read(&ctx->refcnt) - 1;
+}
+EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
+
+void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+ if (atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
+
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 6989ba0046df..f1bf3418d968 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
/* If another context is idling then defer */
if (engine->idling) {
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
/* Only do teardown in the thread */
if (!in_kthread) {
- kthread_queue_work(&engine->kworker,
+ kthread_queue_work(engine->kworker,
&engine->pump_requests);
goto out;
}
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
ret = ablkcipher_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
ret = ahash_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
req->base.complete(&req->base, err);
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
req->base.complete(&req->base, err);
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
engine->running = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
return 0;
}
@@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
spin_lock_init(&engine->queue_lock);
- kthread_init_worker(&engine->kworker);
- engine->kworker_task = kthread_run(kthread_worker_fn,
- &engine->kworker, "%s",
- engine->name);
- if (IS_ERR(engine->kworker_task)) {
+ engine->kworker = kthread_create_worker(0, "%s", engine->name);
+ if (IS_ERR(engine->kworker)) {
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
}
@@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
- sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
+ sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
}
return engine;
@@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
if (ret)
return ret;
- kthread_flush_worker(&engine->kworker);
- kthread_stop(engine->kworker_task);
+ kthread_destroy_worker(engine->kworker);
return 0;
}
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 1c5705481c69..a90404a0c5ff 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -112,6 +112,21 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_acomp racomp;
+
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+ sizeof(struct crypto_report_acomp), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
+ case CRYPTO_ALG_TYPE_ACOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
case CRYPTO_ALG_TYPE_AKCIPHER:
if (crypto_report_akcipher(skb, alg))
goto nla_put_failure;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ff4d21eddb83..a9a7a44f2783 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -312,7 +312,7 @@ static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
unsigned long align;
unsigned int reqsize;
- cipher = crypto_spawn_skcipher2(spawn);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -370,9 +370,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
spawn = skcipher_instance_ctx(inst);
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher2(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_free_inst;
diff --git a/crypto/cts.c b/crypto/cts.c
index 51976187b2bf..00254d76b21b 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -290,7 +290,7 @@ static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
unsigned bsize;
unsigned align;
- cipher = crypto_spawn_skcipher2(spawn);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -348,9 +348,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher2(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_free_inst;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 95d8d37c5021..f942cb391890 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/net.h>
+#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
@@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
vfree(ctx->decomp_stream.workspace);
}
-static int deflate_init(struct crypto_tfm *tfm)
+static int __deflate_init(void *ctx)
{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = deflate_comp_init(ctx);
@@ -116,19 +116,55 @@ out:
return ret;
}
-static void deflate_exit(struct crypto_tfm *tfm)
+static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
+{
+ struct deflate_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ret = __deflate_init(ctx);
+ if (ret) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
+ return ctx;
+}
+
+static int deflate_init(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+ return __deflate_init(ctx);
+}
+
+static void __deflate_exit(void *ctx)
+{
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
}
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ __deflate_exit(ctx);
+ kzfree(ctx);
+}
+
+static void deflate_exit(struct crypto_tfm *tfm)
+{
+ struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __deflate_exit(ctx);
+}
+
+static int __deflate_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int ret = 0;
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->comp_stream;
ret = zlib_deflateReset(stream);
@@ -153,12 +189,27 @@ out:
return ret;
}
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+ return __deflate_compress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __deflate_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __deflate_decompress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int ret = 0;
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->decomp_stream;
ret = zlib_inflateReset(stream);
@@ -194,6 +245,21 @@ out:
return ret;
}
+static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+ return __deflate_decompress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __deflate_decompress(src, slen, dst, dlen, ctx);
+}
+
static struct crypto_alg alg = {
.cra_name = "deflate",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -206,14 +272,39 @@ static struct crypto_alg alg = {
.coa_decompress = deflate_decompress } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = deflate_alloc_ctx,
+ .free_ctx = deflate_free_ctx,
+ .compress = deflate_scompress,
+ .decompress = deflate_sdecompress,
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "deflate-scomp",
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init deflate_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int ret;
+
+ ret = crypto_register_alg(&alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg);
+ return ret;
+ }
+
+ return ret;
}
static void __exit deflate_mod_fini(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_scomp(&scomp);
}
module_init(deflate_mod_init);
diff --git a/crypto/dh.c b/crypto/dh.c
index 9d19360e7189..ddcb528ab2cc 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -118,7 +118,7 @@ static int dh_compute_value(struct kpp_request *req)
if (req->src) {
base = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!base) {
- ret = EINVAL;
+ ret = -EINVAL;
goto err_free_val;
}
} else {
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 053035b5c8f8..8a4d98b4adba 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1782,6 +1782,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
memcpy(outbuf, drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
+ outbuf += cryptlen;
}
ret = 0;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index f624ac98c94e..b7ad808be3d4 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(ghash))
return PTR_ERR(ghash);
- ctr = crypto_spawn_skcipher2(&ictx->ctr);
+ ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_hash;
@@ -663,20 +663,20 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_ghash;
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* We only support 16-byte blocks. */
+ err = -EINVAL;
if (crypto_skcipher_alg_ivsize(ctr) != 16)
goto out_put_ctr;
/* Not a stream cipher? */
- err = -EINVAL;
if (ctr->base.cra_blocksize != 1)
goto out_put_ctr;
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 5276607c72d0..72015fee533d 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -263,48 +263,6 @@ EXPORT_SYMBOL(gf128mul_bbe);
* t[1][BYTE] contains g*x^8*BYTE
* ..
* t[15][BYTE] contains g*x^120*BYTE */
-struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
-{
- struct gf128mul_64k *t;
- int i, j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- for (i = 0; i < 16; i++) {
- t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
- if (!t->t[i]) {
- gf128mul_free_64k(t);
- t = NULL;
- goto out;
- }
- }
-
- t->t[0]->t[128] = *g;
- for (j = 64; j > 0; j >>= 1)
- gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
-
- for (i = 0;;) {
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[i]->t[j + k],
- &t->t[i]->t[j], &t->t[i]->t[k]);
-
- if (++i >= 16)
- break;
-
- for (j = 128; j > 0; j >>= 1) {
- t->t[i]->t[j] = t->t[i - 1]->t[j];
- gf128mul_x8_lle(&t->t[i]->t[j]);
- }
- }
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_64k_lle);
-
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
{
struct gf128mul_64k *t;
@@ -352,24 +310,11 @@ void gf128mul_free_64k(struct gf128mul_64k *t)
int i;
for (i = 0; i < 16; i++)
- kfree(t->t[i]);
- kfree(t);
+ kzfree(t->t[i]);
+ kzfree(t);
}
EXPORT_SYMBOL(gf128mul_free_64k);
-void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i;
-
- *r = t->t[0]->t[ap[0]];
- for (i = 1; i < 16; ++i)
- be128_xor(r, r, &t->t[i]->t[ap[i]]);
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_64k_lle);
-
void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
{
u8 *ap = (u8 *)a;
diff --git a/crypto/internal.h b/crypto/internal.h
index 7eefcdb00227..f07320423191 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -76,9 +76,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
-void crypto_exit_compress_ops(struct crypto_tfm *tfm);
-
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index c4938497eedb..787dccca3715 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -39,7 +39,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/fips.h>
#include <linux/time.h>
#include <linux/crypto.h>
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 6f9908a7ebcb..ecd8474018e3 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -17,7 +17,8 @@
*
* The test vectors are included in the testing module tcrypt.[ch] */
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -29,11 +30,30 @@
#include <crypto/gf128mul.h>
#include <crypto/lrw.h>
+#define LRW_BUFFER_SIZE 128u
+
struct priv {
- struct crypto_cipher *child;
+ struct crypto_skcipher *child;
struct lrw_table_ctx table;
};
+struct rctx {
+ be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
+
+ be128 t;
+
+ be128 *ext;
+
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ unsigned int left;
+
+ struct skcipher_request subreq;
+};
+
static inline void setbit128_bbe(void *b, int bit)
{
__set_bit(bit ^ (0x80 -
@@ -76,32 +96,26 @@ void lrw_free_table(struct lrw_table_ctx *ctx)
}
EXPORT_SYMBOL_GPL(lrw_free_table);
-static int setkey(struct crypto_tfm *parent, const u8 *key,
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct priv *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->child;
+ struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
int err, bsize = LRW_BLOCK_SIZE;
const u8 *tweak = key + keylen - bsize;
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen - bsize);
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen - bsize);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
if (err)
return err;
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
return lrw_init_table(&ctx->table, tweak);
}
-struct sinfo {
- be128 t;
- struct crypto_tfm *tfm;
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-};
-
static inline void inc(be128 *iv)
{
be64_add_cpu(&iv->b, 1);
@@ -109,13 +123,6 @@ static inline void inc(be128 *iv)
be64_add_cpu(&iv->a, 1);
}
-static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
-{
- be128_xor(dst, &s->t, src); /* PP <- T xor P */
- s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
- be128_xor(dst, dst, &s->t); /* C <- T xor CC */
-}
-
/* this returns the number of consequative 1 bits starting
* from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
static inline int get_index128(be128 *block)
@@ -135,83 +142,263 @@ static inline int get_index128(be128 *block)
return x;
}
-static int crypt(struct blkcipher_desc *d,
- struct blkcipher_walk *w, struct priv *ctx,
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+static int post_crypt(struct skcipher_request *req)
{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
+ const int bs = LRW_BLOCK_SIZE;
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned offset;
int err;
- unsigned int avail;
+
+ subreq = &rctx->subreq;
+ err = skcipher_walk_virt(&w, subreq, false);
+
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wdst;
+
+ wdst = w.dst.virt.addr;
+
+ do {
+ be128_xor(wdst, buf++, wdst);
+ wdst++;
+ } while ((avail -= bs) >= bs);
+
+ err = skcipher_walk_done(&w, avail);
+ }
+
+ rctx->left -= subreq->cryptlen;
+
+ if (err || !rctx->left)
+ goto out;
+
+ rctx->dst = rctx->dstbuf;
+
+ scatterwalk_done(&w.out, 0, 1);
+ sg = w.out.sg;
+ offset = w.out.offset;
+
+ if (rctx->dst != sg) {
+ rctx->dst[0] = *sg;
+ sg_unmark_end(rctx->dst);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ }
+ rctx->dst[0].length -= offset - sg->offset;
+ rctx->dst[0].offset = offset;
+
+out:
+ return err;
+}
+
+static int pre_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
const int bs = LRW_BLOCK_SIZE;
- struct sinfo s = {
- .tfm = crypto_cipher_tfm(ctx->child),
- .fn = fn
- };
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned cryptlen;
+ unsigned offset;
be128 *iv;
- u8 *wsrc;
- u8 *wdst;
+ bool more;
+ int err;
- err = blkcipher_walk_virt(d, w);
- if (!(avail = w->nbytes))
- return err;
+ subreq = &rctx->subreq;
+ skcipher_request_set_tfm(subreq, tfm);
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ cryptlen = subreq->cryptlen;
+ more = rctx->left > cryptlen;
+ if (!more)
+ cryptlen = rctx->left;
- /* calculate first value of T */
- iv = (be128 *)w->iv;
- s.t = *iv;
+ skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
+ cryptlen, req->iv);
- /* T <- I*Key2 */
- gf128mul_64k_bbe(&s.t, ctx->table.table);
+ err = skcipher_walk_virt(&w, subreq, false);
+ iv = w.iv;
- goto first;
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wsrc;
+ be128 *wdst;
+
+ wsrc = w.src.virt.addr;
+ wdst = w.dst.virt.addr;
- for (;;) {
do {
+ *buf++ = rctx->t;
+ be128_xor(wdst++, &rctx->t, wsrc++);
+
/* T <- I*Key2, using the optimization
* discussed in the specification */
- be128_xor(&s.t, &s.t,
+ be128_xor(&rctx->t, &rctx->t,
&ctx->table.mulinc[get_index128(iv)]);
inc(iv);
+ } while ((avail -= bs) >= bs);
-first:
- lrw_round(&s, wdst, wsrc);
+ err = skcipher_walk_done(&w, avail);
+ }
- wsrc += bs;
- wdst += bs;
- } while ((avail -= bs) >= bs);
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
+ cryptlen, NULL);
- err = blkcipher_walk_done(d, w, avail);
- if (!(avail = w->nbytes))
- break;
+ if (err || !more)
+ goto out;
+
+ rctx->src = rctx->srcbuf;
+
+ scatterwalk_done(&w.in, 0, 1);
+ sg = w.in.sg;
+ offset = w.in.offset;
+
+ if (rctx->src != sg) {
+ rctx->src[0] = *sg;
+ sg_unmark_end(rctx->src);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ }
+ rctx->src[0].length -= offset - sg->offset;
+ rctx->src[0].offset = offset;
+
+out:
+ return err;
+}
+
+static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+{
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+ gfp_t gfp;
+
+ subreq = &rctx->subreq;
+ skcipher_request_set_callback(subreq, req->base.flags, done, req);
+
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ rctx->ext = NULL;
+
+ subreq->cryptlen = LRW_BUFFER_SIZE;
+ if (req->cryptlen > LRW_BUFFER_SIZE) {
+ subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
+ rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ }
+
+ rctx->src = req->src;
+ rctx->dst = req->dst;
+ rctx->left = req->cryptlen;
+
+ /* calculate first value of T */
+ memcpy(&rctx->t, req->iv, sizeof(rctx->t));
+
+ /* T <- I*Key2 */
+ gf128mul_64k_bbe(&rctx->t, ctx->table.table);
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ return 0;
+}
+
+static void exit_crypt(struct skcipher_request *req)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+
+ rctx->left = 0;
+
+ if (rctx->ext)
+ kfree(rctx->ext);
+}
+
+static int do_encrypt(struct skcipher_request *req, int err)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
}
+ exit_crypt(req);
return err;
}
-static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static void encrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_encrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
+
+ skcipher_request_complete(req, err);
+}
+
+static int encrypt(struct skcipher_request *req)
+{
+ return do_encrypt(req, init_crypt(req, encrypt_done));
+}
+
+static int do_decrypt(struct skcipher_request *req, int err)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
+ }
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx,
- crypto_cipher_alg(ctx->child)->cia_encrypt);
+ exit_crypt(req);
+ return err;
}
-static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static void decrypt_done(struct crypto_async_request *areq, int err)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_decrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx,
- crypto_cipher_alg(ctx->child)->cia_decrypt);
+ skcipher_request_complete(req, err);
+}
+
+static int decrypt(struct skcipher_request *req)
+{
+ return do_decrypt(req, init_crypt(req, decrypt_done));
}
int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
@@ -293,95 +480,161 @@ first:
}
EXPORT_SYMBOL_GPL(lrw_crypt);
-static int init_tfm(struct crypto_tfm *tfm)
+static int init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct priv *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *cipher;
- cipher = crypto_spawn_cipher(spawn);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- crypto_free_cipher(cipher);
- return -EINVAL;
- }
-
ctx->child = cipher;
+
+ crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
+ sizeof(struct rctx));
+
return 0;
}
-static void exit_tfm(struct crypto_tfm *tfm)
+static void exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_tfm_ctx(tfm);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
lrw_free_table(&ctx->table);
- crypto_free_cipher(ctx->child);
+ crypto_free_skcipher(ctx->child);
+}
+
+static void free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
}
-static struct crypto_instance *alloc(struct rtattr **tb)
+static int create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct crypto_skcipher_spawn *spawn;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct skcipher_alg *alg;
+ const char *cipher_name;
+ char ecb_name[CRYPTO_MAX_ALG_NAME];
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return PTR_ERR(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = skcipher_instance_ctx(inst);
+
+ crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err == -ENOENT) {
+ err = -ENAMETOOLONG;
+ if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_skcipher(spawn, ecb_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ }
+
if (err)
- return ERR_PTR(err);
+ goto err_free_inst;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ alg = crypto_skcipher_spawn_alg(spawn);
- inst = crypto_alloc_instance("lrw", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = -EINVAL;
+ if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
+ if (crypto_skcipher_alg_ivsize(alg))
+ goto err_drop_spawn;
- if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
- else inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
+ &alg->base);
+ if (err)
+ goto err_drop_spawn;
- if (!(alg->cra_blocksize % 4))
- inst->alg.cra_alignmask |= 3;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize =
- alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
- inst->alg.cra_blkcipher.max_keysize =
- alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
+ err = -EINVAL;
+ cipher_name = alg->base.cra_name;
- inst->alg.cra_ctxsize = sizeof(struct priv);
+ /* Alas we screwed up the naming so we have to mangle the
+ * cipher name.
+ */
+ if (!strncmp(cipher_name, "ecb(", 4)) {
+ unsigned len;
- inst->alg.cra_init = init_tfm;
- inst->alg.cra_exit = exit_tfm;
+ len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+ if (len < 2 || len >= sizeof(ecb_name))
+ goto err_drop_spawn;
- inst->alg.cra_blkcipher.setkey = setkey;
- inst->alg.cra_blkcipher.encrypt = encrypt;
- inst->alg.cra_blkcipher.decrypt = decrypt;
+ if (ecb_name[len - 1] != ')')
+ goto err_drop_spawn;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ ecb_name[len - 1] = 0;
-static void free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+ }
+
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+ (__alignof__(u64) - 1);
+
+ inst->alg.ivsize = LRW_BLOCK_SIZE;
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
+ LRW_BLOCK_SIZE;
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
+ LRW_BLOCK_SIZE;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct priv);
+
+ inst->alg.init = init_tfm;
+ inst->alg.exit = exit_tfm;
+
+ inst->alg.setkey = setkey;
+ inst->alg.encrypt = encrypt;
+ inst->alg.decrypt = decrypt;
+
+ inst->free = free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_skcipher(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_tmpl = {
.name = "lrw",
- .alloc = alloc,
- .free = free,
+ .create = create,
.module = THIS_MODULE,
};
diff --git a/crypto/lz4.c b/crypto/lz4.c
index aefbceaf3104..99c1b2cc2976 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -23,36 +23,53 @@
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
+#include <crypto/internal/scompress.h>
struct lz4_ctx {
void *lz4_comp_mem;
};
+static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = vmalloc(LZ4_MEM_COMPRESS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
static int lz4_init(struct crypto_tfm *tfm)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS);
- if (!ctx->lz4_comp_mem)
+ ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
+ if (IS_ERR(ctx->lz4_comp_mem))
return -ENOMEM;
return 0;
}
+static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ vfree(ctx);
+}
+
static void lz4_exit(struct crypto_tfm *tfm)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
- vfree(ctx->lz4_comp_mem);
+
+ lz4_free_ctx(NULL, ctx->lz4_comp_mem);
}
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
- err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem);
+ err = lz4_compress(src, slen, dst, &tmp_len, ctx);
if (err < 0)
return -EINVAL;
@@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
return 0;
}
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
+}
+
+static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int err;
size_t tmp_len = *dlen;
@@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
return err;
}
+static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst,
+ unsigned int *dlen)
+{
+ return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
static struct crypto_alg alg_lz4 = {
.cra_name = "lz4",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +135,39 @@ static struct crypto_alg alg_lz4 = {
.coa_decompress = lz4_decompress_crypto } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = lz4_alloc_ctx,
+ .free_ctx = lz4_free_ctx,
+ .compress = lz4_scompress,
+ .decompress = lz4_sdecompress,
+ .base = {
+ .cra_name = "lz4",
+ .cra_driver_name = "lz4-scomp",
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init lz4_mod_init(void)
{
- return crypto_register_alg(&alg_lz4);
+ int ret;
+
+ ret = crypto_register_alg(&alg_lz4);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg_lz4);
+ return ret;
+ }
+
+ return ret;
}
static void __exit lz4_mod_fini(void)
{
crypto_unregister_alg(&alg_lz4);
+ crypto_unregister_scomp(&scomp);
}
module_init(lz4_mod_init);
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index a1d3b5bd3d85..75ffc4a3f786 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -22,37 +22,53 @@
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
+#include <crypto/internal/scompress.h>
struct lz4hc_ctx {
void *lz4hc_comp_mem;
};
+static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = vmalloc(LZ4HC_MEM_COMPRESS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
static int lz4hc_init(struct crypto_tfm *tfm)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS);
- if (!ctx->lz4hc_comp_mem)
+ ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
+ if (IS_ERR(ctx->lz4hc_comp_mem))
return -ENOMEM;
return 0;
}
+static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ vfree(ctx);
+}
+
static void lz4hc_exit(struct crypto_tfm *tfm)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
- vfree(ctx->lz4hc_comp_mem);
+ lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
}
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
- err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem);
+ err = lz4hc_compress(src, slen, dst, &tmp_len, ctx);
if (err < 0)
return -EINVAL;
@@ -61,8 +77,25 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
return 0;
}
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst,
+ unsigned int *dlen)
+{
+ struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return __lz4hc_compress_crypto(src, slen, dst, dlen,
+ ctx->lz4hc_comp_mem);
+}
+
+static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int err;
size_t tmp_len = *dlen;
@@ -76,6 +109,20 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
return err;
}
+static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst,
+ unsigned int *dlen)
+{
+ return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
static struct crypto_alg alg_lz4hc = {
.cra_name = "lz4hc",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +136,39 @@ static struct crypto_alg alg_lz4hc = {
.coa_decompress = lz4hc_decompress_crypto } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = lz4hc_alloc_ctx,
+ .free_ctx = lz4hc_free_ctx,
+ .compress = lz4hc_scompress,
+ .decompress = lz4hc_sdecompress,
+ .base = {
+ .cra_name = "lz4hc",
+ .cra_driver_name = "lz4hc-scomp",
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init lz4hc_mod_init(void)
{
- return crypto_register_alg(&alg_lz4hc);
+ int ret;
+
+ ret = crypto_register_alg(&alg_lz4hc);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg_lz4hc);
+ return ret;
+ }
+
+ return ret;
}
static void __exit lz4hc_mod_fini(void)
{
crypto_unregister_alg(&alg_lz4hc);
+ crypto_unregister_scomp(&scomp);
}
module_init(lz4hc_mod_init);
diff --git a/crypto/lzo.c b/crypto/lzo.c
index c3f3dd9a28c5..168df784da84 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -22,40 +22,55 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/lzo.h>
+#include <crypto/internal/scompress.h>
struct lzo_ctx {
void *lzo_comp_mem;
};
+static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
+ if (!ctx)
+ ctx = vmalloc(LZO1X_MEM_COMPRESS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
static int lzo_init(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_NOWARN);
- if (!ctx->lzo_comp_mem)
- ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
- if (!ctx->lzo_comp_mem)
+ ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
+ if (IS_ERR(ctx->lzo_comp_mem))
return -ENOMEM;
return 0;
}
+static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ kvfree(ctx);
+}
+
static void lzo_exit(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
- kvfree(ctx->lzo_comp_mem);
+ lzo_free_ctx(NULL, ctx->lzo_comp_mem);
}
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lzo_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem);
+ err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,8 +79,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
return 0;
}
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
+}
+
+static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lzo_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __lzo_decompress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
int err;
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
@@ -77,7 +107,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
*dlen = tmp_len;
return 0;
+}
+static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ return __lzo_decompress(src, slen, dst, dlen);
+}
+
+static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lzo_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
@@ -88,18 +130,43 @@ static struct crypto_alg alg = {
.cra_init = lzo_init,
.cra_exit = lzo_exit,
.cra_u = { .compress = {
- .coa_compress = lzo_compress,
- .coa_decompress = lzo_decompress } }
+ .coa_compress = lzo_compress,
+ .coa_decompress = lzo_decompress } }
+};
+
+static struct scomp_alg scomp = {
+ .alloc_ctx = lzo_alloc_ctx,
+ .free_ctx = lzo_free_ctx,
+ .compress = lzo_scompress,
+ .decompress = lzo_sdecompress,
+ .base = {
+ .cra_name = "lzo",
+ .cra_driver_name = "lzo-scomp",
+ .cra_module = THIS_MODULE,
+ }
};
static int __init lzo_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int ret;
+
+ ret = crypto_register_alg(&alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg);
+ return ret;
+ }
+
+ return ret;
}
static void __exit lzo_mod_fini(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_scomp(&scomp);
}
module_init(lzo_mod_init);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index f654965f0933..e4538e07f7ca 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -14,40 +14,37 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_pcbc_ctx {
struct crypto_cipher *child;
};
-static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
-static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
@@ -56,7 +53,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
do {
crypto_xor(iv, src, bsize);
- fn(crypto_cipher_tfm(tfm), dst, iv);
+ crypto_cipher_encrypt_one(tfm, dst, iv);
memcpy(iv, dst, bsize);
crypto_xor(iv, src, bsize);
@@ -67,12 +64,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
@@ -82,7 +77,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
do {
memcpy(tmpbuf, src, bsize);
crypto_xor(iv, src, bsize);
- fn(crypto_cipher_tfm(tfm), src, iv);
+ crypto_cipher_encrypt_one(tfm, src, iv);
memcpy(iv, tmpbuf, bsize);
crypto_xor(iv, src, bsize);
@@ -94,38 +89,34 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_pcbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_pcbc_encrypt_inplace(desc, &walk,
+ nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
child);
else
- nbytes = crypto_pcbc_encrypt_segment(desc, &walk,
+ nbytes = crypto_pcbc_encrypt_segment(req, &walk,
child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
@@ -133,7 +124,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
u8 *iv = walk->iv;
do {
- fn(crypto_cipher_tfm(tfm), dst, src);
+ crypto_cipher_decrypt_one(tfm, dst, src);
crypto_xor(dst, iv, bsize);
memcpy(iv, src, bsize);
crypto_xor(iv, dst, bsize);
@@ -147,21 +138,19 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
- u8 tmpbuf[bsize];
+ u8 tmpbuf[bsize] __attribute__ ((aligned(__alignof__(u32))));
do {
memcpy(tmpbuf, src, bsize);
- fn(crypto_cipher_tfm(tfm), src, src);
+ crypto_cipher_decrypt_one(tfm, src, src);
crypto_xor(src, iv, bsize);
memcpy(iv, tmpbuf, bsize);
crypto_xor(iv, src, bsize);
@@ -174,37 +163,35 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_pcbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_pcbc_decrypt_inplace(desc, &walk,
+ nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
child);
else
- nbytes = crypto_pcbc_decrypt_segment(desc, &walk,
+ nbytes = crypto_pcbc_decrypt_segment(req, &walk,
child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
@@ -215,68 +202,98 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
+static void crypto_pcbc_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct crypto_spawn *spawn;
struct crypto_alg *alg;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
- if (err)
- return ERR_PTR(err);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) &
+ ~CRYPTO_ALG_INTERNAL)
+ return -EINVAL;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER |
+ (algt->type & CRYPTO_ALG_INTERNAL),
+ CRYPTO_ALG_TYPE_MASK |
+ (algt->mask & CRYPTO_ALG_INTERNAL));
+ err = PTR_ERR(alg);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ goto err_free_inst;
+
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
- inst = crypto_alloc_instance("pcbc", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL;
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
/* We access the data as u32s when xoring. */
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
- inst->alg.cra_init = crypto_pcbc_init_tfm;
- inst->alg.cra_exit = crypto_pcbc_exit_tfm;
+ inst->alg.init = crypto_pcbc_init_tfm;
+ inst->alg.exit = crypto_pcbc_exit_tfm;
- inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+ inst->alg.setkey = crypto_pcbc_setkey;
+ inst->alg.encrypt = crypto_pcbc_encrypt;
+ inst->alg.decrypt = crypto_pcbc_decrypt;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ inst->free = crypto_pcbc_free;
-static void crypto_pcbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_pcbc_tmpl = {
.name = "pcbc",
- .alloc = crypto_pcbc_alloc,
- .free = crypto_pcbc_free,
+ .create = crypto_pcbc_create,
.module = THIS_MODULE,
};
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2df9835dfbc0..b1c2d57dc734 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -17,6 +17,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <asm/unaligned.h>
static inline u64 mlt(u64 a, u64 b)
{
@@ -33,11 +34,6 @@ static inline u32 and(u32 v, u32 mask)
return v & mask;
}
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -65,19 +61,19 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff;
- dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03;
- dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff;
- dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff;
- dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff;
+ dctx->r[0] = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff;
+ dctx->r[1] = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03;
+ dctx->r[2] = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff;
+ dctx->r[3] = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff;
+ dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff;
}
static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
- dctx->s[0] = le32_to_cpuvp(key + 0);
- dctx->s[1] = le32_to_cpuvp(key + 4);
- dctx->s[2] = le32_to_cpuvp(key + 8);
- dctx->s[3] = le32_to_cpuvp(key + 12);
+ dctx->s[0] = get_unaligned_le32(key + 0);
+ dctx->s[1] = get_unaligned_le32(key + 4);
+ dctx->s[2] = get_unaligned_le32(key + 8);
+ dctx->s[3] = get_unaligned_le32(key + 12);
}
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
@@ -137,11 +133,11 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
while (likely(srclen >= POLY1305_BLOCK_SIZE)) {
/* h += m[i] */
- h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff;
- h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff;
- h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff;
- h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff;
- h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit;
+ h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
+ h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(src + 12) >> 8) | hibit;
/* h *= r */
d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
diff --git a/crypto/scompress.c b/crypto/scompress.c
new file mode 100644
index 000000000000..35e396d154b7
--- /dev/null
+++ b/crypto/scompress.c
@@ -0,0 +1,356 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/vmalloc.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_scomp_type;
+static void * __percpu *scomp_src_scratches;
+static void * __percpu *scomp_dst_scratches;
+static int scomp_scratch_users;
+static DEFINE_MUTEX(scomp_lock);
+
+#ifdef CONFIG_NET
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_comp rscomp;
+
+ strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rscomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : scomp\n");
+}
+
+static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+static void crypto_scomp_free_scratches(void * __percpu *scratches)
+{
+ int i;
+
+ if (!scratches)
+ return;
+
+ for_each_possible_cpu(i)
+ vfree(*per_cpu_ptr(scratches, i));
+
+ free_percpu(scratches);
+}
+
+static void * __percpu *crypto_scomp_alloc_scratches(void)
+{
+ void * __percpu *scratches;
+ int i;
+
+ scratches = alloc_percpu(void *);
+ if (!scratches)
+ return NULL;
+
+ for_each_possible_cpu(i) {
+ void *scratch;
+
+ scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!scratch)
+ goto error;
+ *per_cpu_ptr(scratches, i) = scratch;
+ }
+
+ return scratches;
+
+error:
+ crypto_scomp_free_scratches(scratches);
+ return NULL;
+}
+
+static void crypto_scomp_free_all_scratches(void)
+{
+ if (!--scomp_scratch_users) {
+ crypto_scomp_free_scratches(scomp_src_scratches);
+ crypto_scomp_free_scratches(scomp_dst_scratches);
+ scomp_src_scratches = NULL;
+ scomp_dst_scratches = NULL;
+ }
+}
+
+static int crypto_scomp_alloc_all_scratches(void)
+{
+ if (!scomp_scratch_users++) {
+ scomp_src_scratches = crypto_scomp_alloc_scratches();
+ if (!scomp_src_scratches)
+ return -ENOMEM;
+ scomp_dst_scratches = crypto_scomp_alloc_scratches();
+ if (!scomp_dst_scratches)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void crypto_scomp_sg_free(struct scatterlist *sgl)
+{
+ int i, n;
+ struct page *page;
+
+ if (!sgl)
+ return;
+
+ n = sg_nents(sgl);
+ for_each_sg(sgl, sgl, n, i) {
+ page = sg_page(sgl);
+ if (page)
+ __free_page(page);
+ }
+
+ kfree(sgl);
+}
+
+static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
+{
+ struct scatterlist *sgl;
+ struct page *page;
+ int i, n;
+
+ n = ((size - 1) >> PAGE_SHIFT) + 1;
+
+ sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, n);
+
+ for (i = 0; i < n; i++) {
+ page = alloc_page(gfp);
+ if (!page)
+ goto err;
+ sg_set_page(sgl + i, page, PAGE_SIZE, 0);
+ }
+
+ return sgl;
+
+err:
+ sg_mark_end(sgl + i);
+ crypto_scomp_sg_free(sgl);
+ return NULL;
+}
+
+static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+ const int cpu = get_cpu();
+ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
+ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ int ret;
+
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (req->dst && !req->dlen) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+ req->dlen = SCOMP_SCRATCH_SIZE;
+
+ scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+ if (dir)
+ ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
+ scratch_dst, &req->dlen, *ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
+ scratch_dst, &req->dlen, *ctx);
+ if (!ret) {
+ if (!req->dst) {
+ req->dst = crypto_scomp_sg_alloc(req->dlen,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req->dst)
+ goto out;
+ }
+ scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+ 1);
+ }
+out:
+ put_cpu();
+ return ret;
+}
+
+static int scomp_acomp_compress(struct acomp_req *req)
+{
+ return scomp_acomp_comp_decomp(req, 1);
+}
+
+static int scomp_acomp_decompress(struct acomp_req *req)
+{
+ return scomp_acomp_comp_decomp(req, 0);
+}
+
+static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
+{
+ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_scomp(*ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
+ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ scomp = crypto_create_tfm(calg, &crypto_scomp_type);
+ if (IS_ERR(scomp)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(scomp);
+ }
+
+ *ctx = scomp;
+ tfm->exit = crypto_exit_scomp_ops_async;
+
+ crt->compress = scomp_acomp_compress;
+ crt->decompress = scomp_acomp_decompress;
+ crt->dst_free = crypto_scomp_sg_free;
+ crt->reqsize = sizeof(void *);
+
+ return 0;
+}
+
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void *ctx;
+
+ ctx = crypto_scomp_alloc_ctx(scomp);
+ if (IS_ERR(ctx)) {
+ kfree(req);
+ return NULL;
+ }
+
+ *req->__ctx = ctx;
+
+ return req;
+}
+
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void *ctx = *req->__ctx;
+
+ if (ctx)
+ crypto_scomp_free_ctx(scomp, ctx);
+}
+
+static const struct crypto_type crypto_scomp_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_scomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_scomp_show,
+#endif
+ .report = crypto_scomp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_SCOMPRESS,
+ .tfmsize = offsetof(struct crypto_scomp, base),
+};
+
+int crypto_register_scomp(struct scomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int ret = -ENOMEM;
+
+ mutex_lock(&scomp_lock);
+ if (crypto_scomp_alloc_all_scratches())
+ goto error;
+
+ base->cra_type = &crypto_scomp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
+
+ ret = crypto_register_alg(base);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&scomp_lock);
+ return ret;
+
+error:
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_scomp);
+
+int crypto_unregister_scomp(struct scomp_alg *alg)
+{
+ int ret;
+
+ mutex_lock(&scomp_lock);
+ ret = crypto_unregister_alg(&alg->base);
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Synchronous compression type");
diff --git a/crypto/simd.c b/crypto/simd.c
new file mode 100644
index 000000000000..88203370a62f
--- /dev/null
+++ b/crypto/simd.c
@@ -0,0 +1,226 @@
+/*
+ * Shared crypto simd helpers
+ *
+ * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Based on aesni-intel_glue.c by:
+ * Copyright (C) 2008, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ */
+
+#include <crypto/cryptd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <asm/simd.h>
+
+struct simd_skcipher_alg {
+ const char *ialg_name;
+ struct skcipher_alg alg;
+};
+
+struct simd_skcipher_ctx {
+ struct cryptd_skcipher *cryptd_tfm;
+};
+
+static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
+ int err;
+
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, key_len);
+ crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int simd_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_request *subreq;
+ struct crypto_skcipher *child;
+
+ subreq = skcipher_request_ctx(req);
+ *subreq = *req;
+
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_skcipher_child(ctx->cryptd_tfm);
+
+ skcipher_request_set_tfm(subreq, child);
+
+ return crypto_skcipher_encrypt(subreq);
+}
+
+static int simd_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_request *subreq;
+ struct crypto_skcipher *child;
+
+ subreq = skcipher_request_ctx(req);
+ *subreq = *req;
+
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_skcipher_child(ctx->cryptd_tfm);
+
+ skcipher_request_set_tfm(subreq, child);
+
+ return crypto_skcipher_decrypt(subreq);
+}
+
+static void simd_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ cryptd_free_skcipher(ctx->cryptd_tfm);
+}
+
+static int simd_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cryptd_skcipher *cryptd_tfm;
+ struct simd_skcipher_alg *salg;
+ struct skcipher_alg *alg;
+ unsigned reqsize;
+
+ alg = crypto_skcipher_alg(tfm);
+ salg = container_of(alg, struct simd_skcipher_alg, alg);
+
+ cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+
+ ctx->cryptd_tfm = cryptd_tfm;
+
+ reqsize = sizeof(struct skcipher_request);
+ reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
+
+ crypto_skcipher_set_reqsize(tfm, reqsize);
+
+ return 0;
+}
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename)
+{
+ struct simd_skcipher_alg *salg;
+ struct crypto_skcipher *tfm;
+ struct skcipher_alg *ialg;
+ struct skcipher_alg *alg;
+ int err;
+
+ tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ ialg = crypto_skcipher_alg(tfm);
+
+ salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+ if (!salg) {
+ salg = ERR_PTR(-ENOMEM);
+ goto out_put_tfm;
+ }
+
+ salg->ialg_name = basename;
+ alg = &salg->alg;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ drvname) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_priority = ialg->base.cra_priority;
+ alg->base.cra_blocksize = ialg->base.cra_blocksize;
+ alg->base.cra_alignmask = ialg->base.cra_alignmask;
+ alg->base.cra_module = ialg->base.cra_module;
+ alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
+
+ alg->ivsize = ialg->ivsize;
+ alg->chunksize = ialg->chunksize;
+ alg->min_keysize = ialg->min_keysize;
+ alg->max_keysize = ialg->max_keysize;
+
+ alg->init = simd_skcipher_init;
+ alg->exit = simd_skcipher_exit;
+
+ alg->setkey = simd_skcipher_setkey;
+ alg->encrypt = simd_skcipher_encrypt;
+ alg->decrypt = simd_skcipher_decrypt;
+
+ err = crypto_register_skcipher(alg);
+ if (err)
+ goto out_free_salg;
+
+out_put_tfm:
+ crypto_free_skcipher(tfm);
+ return salg;
+
+out_free_salg:
+ kfree(salg);
+ salg = ERR_PTR(err);
+ goto out_put_tfm;
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
+
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+ const char *basename)
+{
+ char drvname[CRYPTO_MAX_ALG_NAME];
+
+ if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+ CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ return simd_skcipher_create_compat(algname, drvname, basename);
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_create);
+
+void simd_skcipher_free(struct simd_skcipher_alg *salg)
+{
+ crypto_unregister_skcipher(&salg->alg);
+ kfree(salg);
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_free);
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018dcaee..aca07c643d41 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -14,9 +14,12 @@
*
*/
+#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
@@ -24,6 +27,543 @@
#include "internal.h"
+enum {
+ SKCIPHER_WALK_PHYS = 1 << 0,
+ SKCIPHER_WALK_SLOW = 1 << 1,
+ SKCIPHER_WALK_COPY = 1 << 2,
+ SKCIPHER_WALK_DIFF = 1 << 3,
+ SKCIPHER_WALK_SLEEP = 1 << 4,
+};
+
+struct skcipher_walk_buffer {
+ struct list_head entry;
+ struct scatter_walk dst;
+ unsigned int len;
+ u8 *data;
+ u8 buffer[];
+};
+
+static int skcipher_walk_next(struct skcipher_walk *walk);
+
+static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
+{
+ if (PageHighMem(scatterwalk_page(walk)))
+ kunmap_atomic(vaddr);
+}
+
+static inline void *skcipher_map(struct scatter_walk *walk)
+{
+ struct page *page = scatterwalk_page(walk);
+
+ return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
+ offset_in_page(walk->offset);
+}
+
+static inline void skcipher_map_src(struct skcipher_walk *walk)
+{
+ walk->src.virt.addr = skcipher_map(&walk->in);
+}
+
+static inline void skcipher_map_dst(struct skcipher_walk *walk)
+{
+ walk->dst.virt.addr = skcipher_map(&walk->out);
+}
+
+static inline void skcipher_unmap_src(struct skcipher_walk *walk)
+{
+ skcipher_unmap(&walk->in, walk->src.virt.addr);
+}
+
+static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
+{
+ skcipher_unmap(&walk->out, walk->dst.virt.addr);
+}
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
+{
+ u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+
+ return max(start, end_page);
+}
+
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ u8 *addr;
+
+ addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ addr = skcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize,
+ (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+ return 0;
+}
+
+int skcipher_walk_done(struct skcipher_walk *walk, int err)
+{
+ unsigned int n = walk->nbytes - err;
+ unsigned int nbytes;
+
+ nbytes = walk->total - n;
+
+ if (unlikely(err < 0)) {
+ nbytes = 0;
+ n = 0;
+ } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+ SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+unmap_src:
+ skcipher_unmap_src(walk);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ skcipher_unmap_dst(walk);
+ goto unmap_src;
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ skcipher_map_dst(walk);
+ memcpy(walk->dst.virt.addr, walk->page, n);
+ skcipher_unmap_dst(walk);
+ } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+ if (WARN_ON(err)) {
+ err = -EINVAL;
+ nbytes = 0;
+ } else
+ n = skcipher_done_slow(walk, n);
+ }
+
+ if (err > 0)
+ err = 0;
+
+ walk->total = nbytes;
+ walk->nbytes = nbytes;
+
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+ scatterwalk_done(&walk->in, 0, nbytes);
+ scatterwalk_done(&walk->out, 1, nbytes);
+
+ if (nbytes) {
+ crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+ CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ return skcipher_walk_next(walk);
+ }
+
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->flags & SKCIPHER_WALK_PHYS)
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
+
+void skcipher_walk_complete(struct skcipher_walk *walk, int err)
+{
+ struct skcipher_walk_buffer *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+ u8 *data;
+
+ if (err)
+ goto done;
+
+ data = p->data;
+ if (!data) {
+ data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
+ data = skcipher_get_spot(data, walk->chunksize);
+ }
+
+ scatterwalk_copychunks(data, &p->dst, p->len, 1);
+
+ if (offset_in_page(p->data) + p->len + walk->chunksize >
+ PAGE_SIZE)
+ free_page((unsigned long)p->data);
+
+done:
+ list_del(&p->entry);
+ kfree(p);
+ }
+
+ if (!err && walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_complete);
+
+static void skcipher_queue_write(struct skcipher_walk *walk,
+ struct skcipher_walk_buffer *p)
+{
+ p->dst = walk->out;
+ list_add_tail(&p->entry, &walk->buffers);
+}
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ bool phys = walk->flags & SKCIPHER_WALK_PHYS;
+ unsigned alignmask = walk->alignmask;
+ struct skcipher_walk_buffer *p;
+ unsigned a;
+ unsigned n;
+ u8 *buffer;
+ void *v;
+
+ if (!phys) {
+ buffer = walk->buffer ?: walk->page;
+ if (buffer)
+ goto ok;
+ }
+
+ /* Start with the minimum alignment of kmalloc. */
+ a = crypto_tfm_ctx_alignment() - 1;
+ n = bsize;
+
+ if (phys) {
+ /* Calculate the minimum alignment of p->buffer. */
+ a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
+ n += sizeof(*p);
+ }
+
+ /* Minimum size to align p->buffer by alignmask. */
+ n += alignmask & ~a;
+
+ /* Minimum size to ensure p->buffer does not straddle a page. */
+ n += (bsize - 1) & ~(alignmask | a);
+
+ v = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!v)
+ return skcipher_walk_done(walk, -ENOMEM);
+
+ if (phys) {
+ p = v;
+ p->len = bsize;
+ skcipher_queue_write(walk, p);
+ buffer = p->buffer;
+ } else {
+ walk->buffer = v;
+ buffer = v;
+ }
+
+ok:
+ walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
+ walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
+ walk->src.virt.addr = walk->dst.virt.addr;
+
+ scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ struct skcipher_walk_buffer *p;
+ u8 *tmp = walk->page;
+
+ skcipher_map_src(walk);
+ memcpy(tmp, walk->src.virt.addr, walk->nbytes);
+ skcipher_unmap_src(walk);
+
+ walk->src.virt.addr = tmp;
+ walk->dst.virt.addr = tmp;
+
+ if (!(walk->flags & SKCIPHER_WALK_PHYS))
+ return 0;
+
+ p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
+ if (!p)
+ return -ENOMEM;
+
+ p->data = walk->page;
+ p->len = walk->nbytes;
+ skcipher_queue_write(walk, p);
+
+ if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
+ PAGE_SIZE)
+ walk->page = NULL;
+ else
+ walk->page += walk->nbytes;
+
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ walk->src.phys.page = scatterwalk_page(&walk->in);
+ walk->src.phys.offset = offset_in_page(walk->in.offset);
+ walk->dst.phys.page = scatterwalk_page(&walk->out);
+ walk->dst.phys.offset = offset_in_page(walk->out.offset);
+
+ if (walk->flags & SKCIPHER_WALK_PHYS)
+ return 0;
+
+ diff = walk->src.phys.offset - walk->dst.phys.offset;
+ diff |= walk->src.virt.page - walk->dst.virt.page;
+
+ skcipher_map_src(walk);
+ walk->dst.virt.addr = walk->src.virt.addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ skcipher_map_dst(walk);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+ int err;
+
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+
+ n = walk->total;
+ bsize = min(walk->chunksize, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ err = skcipher_next_slow(walk, bsize);
+ goto set_phys_lowmem;
+ }
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+
+ walk->nbytes = min_t(unsigned, n,
+ PAGE_SIZE - offset_in_page(walk->page));
+ walk->flags |= SKCIPHER_WALK_COPY;
+ err = skcipher_next_copy(walk);
+ goto set_phys_lowmem;
+ }
+
+ walk->nbytes = n;
+
+ return skcipher_next_fast(walk);
+
+set_phys_lowmem:
+ if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
+ walk->src.phys.page = virt_to_page(walk->src.virt.addr);
+ walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
+ walk->src.phys.offset &= PAGE_SIZE - 1;
+ walk->dst.phys.offset &= PAGE_SIZE - 1;
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_next);
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned a = crypto_tfm_ctx_alignment() - 1;
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned bs = walk->chunksize;
+ unsigned aligned_bs;
+ unsigned size;
+ u8 *iv;
+
+ aligned_bs = ALIGN(bs, alignmask);
+
+ /* Minimum size to align buffer by alignmask. */
+ size = alignmask & ~a;
+
+ if (walk->flags & SKCIPHER_WALK_PHYS)
+ size += ivsize;
+ else {
+ size += aligned_bs + ivsize;
+
+ /* Minimum size to ensure buffer does not straddle a page. */
+ size += (bs - 1) & ~(alignmask | a);
+ }
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1);
+ iv = skcipher_get_spot(iv, bs) + aligned_bs;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+static int skcipher_walk_first(struct skcipher_walk *walk)
+{
+ walk->nbytes = 0;
+
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+ walk->nbytes = walk->total;
+
+ return skcipher_walk_next(walk);
+}
+
+static int skcipher_walk_skcipher(struct skcipher_walk *walk,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ scatterwalk_start(&walk->in, req->src);
+ scatterwalk_start(&walk->out, req->dst);
+
+ walk->total = req->cryptlen;
+ walk->iv = req->iv;
+ walk->oiv = req->iv;
+
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ SKCIPHER_WALK_SLEEP : 0;
+
+ walk->blocksize = crypto_skcipher_blocksize(tfm);
+ walk->chunksize = crypto_skcipher_chunksize(tfm);
+ walk->ivsize = crypto_skcipher_ivsize(tfm);
+ walk->alignmask = crypto_skcipher_alignmask(tfm);
+
+ return skcipher_walk_first(walk);
+}
+
+int skcipher_walk_virt(struct skcipher_walk *walk,
+ struct skcipher_request *req, bool atomic)
+{
+ int err;
+
+ walk->flags &= ~SKCIPHER_WALK_PHYS;
+
+ err = skcipher_walk_skcipher(walk, req);
+
+ walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_virt);
+
+void skcipher_walk_atomise(struct skcipher_walk *walk)
+{
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
+
+int skcipher_walk_async(struct skcipher_walk *walk,
+ struct skcipher_request *req)
+{
+ walk->flags |= SKCIPHER_WALK_PHYS;
+
+ INIT_LIST_HEAD(&walk->buffers);
+
+ return skcipher_walk_skcipher(walk, req);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_async);
+
+static int skcipher_walk_aead_common(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int err;
+
+ walk->flags &= ~SKCIPHER_WALK_PHYS;
+
+ scatterwalk_start(&walk->in, req->src);
+ scatterwalk_start(&walk->out, req->dst);
+
+ scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
+ scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
+
+ walk->iv = req->iv;
+ walk->oiv = req->iv;
+
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+ walk->flags |= SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+
+ walk->blocksize = crypto_aead_blocksize(tfm);
+ walk->chunksize = crypto_aead_chunksize(tfm);
+ walk->ivsize = crypto_aead_ivsize(tfm);
+ walk->alignmask = crypto_aead_alignmask(tfm);
+
+ err = skcipher_walk_first(walk);
+
+ if (atomic)
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+
+ return err;
+}
+
+int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
+ bool atomic)
+{
+ walk->total = req->cryptlen;
+
+ return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead);
+
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic)
+{
+ walk->total = req->cryptlen;
+
+ return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
+
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ walk->total = req->cryptlen - crypto_aead_authsize(tfm);
+
+ return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
+
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
{
if (alg->cra_type == &crypto_blkcipher_type)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 62dffa0028ac..f616ad74cce7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -33,6 +33,7 @@
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
+#include <crypto/acompress.h>
#include "internal.h"
@@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
*/
#define IDX1 32
#define IDX2 32400
-#define IDX3 1
+#define IDX3 1511
#define IDX4 8193
#define IDX5 22222
#define IDX6 17101
@@ -1442,6 +1443,126 @@ out:
return ret;
}
+static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
+ struct comp_testvec *dtemplate, int ctcount, int dtcount)
+{
+ const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+ unsigned int i;
+ char *output;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct tcrypt_result result;
+
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ for (i = 0; i < ctcount; i++) {
+ unsigned int dlen = COMP_BUF_SIZE;
+ int ilen = ctemplate[i].inlen;
+
+ memset(output, 0, dlen);
+ init_completion(&result.completion);
+ sg_init_one(&src, ctemplate[i].input, ilen);
+ sg_init_one(&dst, output, dlen);
+
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ ret = wait_async_op(&result, crypto_acomp_compress(req));
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (req->dlen != ctemplate[i].outlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(output, ctemplate[i].output, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ acomp_request_free(req);
+ }
+
+ for (i = 0; i < dtcount; i++) {
+ unsigned int dlen = COMP_BUF_SIZE;
+ int ilen = dtemplate[i].inlen;
+
+ memset(output, 0, dlen);
+ init_completion(&result.completion);
+ sg_init_one(&src, dtemplate[i].input, ilen);
+ sg_init_one(&dst, output, dlen);
+
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ if (ret) {
+ pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (req->dlen != dtemplate[i].outlen) {
+ pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
+ pr_err("alg: acomp: Decompression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ acomp_request_free(req);
+ }
+
+ ret = 0;
+
+out:
+ kfree(output);
+ return ret;
+}
+
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
@@ -1509,7 +1630,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
struct crypto_aead *tfm;
int err = 0;
- tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1538,7 +1659,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
struct crypto_cipher *tfm;
int err = 0;
- tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1567,7 +1688,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
struct crypto_skcipher *tfm;
int err = 0;
- tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_skcipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1593,22 +1714,38 @@ out:
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
- struct crypto_comp *tfm;
+ struct crypto_comp *comp;
+ struct crypto_acomp *acomp;
int err;
+ u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
+
+ if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
+ acomp = crypto_alloc_acomp(driver, type, mask);
+ if (IS_ERR(acomp)) {
+ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(acomp));
+ return PTR_ERR(acomp);
+ }
+ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
+ crypto_free_acomp(acomp);
+ } else {
+ comp = crypto_alloc_comp(driver, type, mask);
+ if (IS_ERR(comp)) {
+ pr_err("alg: comp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(comp));
+ return PTR_ERR(comp);
+ }
- tfm = crypto_alloc_comp(driver, type, mask);
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- err = test_comp(tfm, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
+ err = test_comp(comp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
- crypto_free_comp(tfm);
+ crypto_free_comp(comp);
+ }
return err;
}
@@ -1618,7 +1755,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
struct crypto_ahash *tfm;
int err;
- tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1646,7 +1783,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
if (err)
goto out;
- tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1688,7 +1825,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
struct crypto_rng *rng;
int err;
- rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
@@ -1715,7 +1852,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
if (!buf)
return -ENOMEM;
- drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ drng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(drng)) {
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
@@ -1909,7 +2046,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
struct crypto_kpp *tfm;
int err = 0;
- tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_kpp(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
@@ -2068,7 +2205,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
struct crypto_akcipher *tfm;
int err = 0;
- tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_akcipher(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
@@ -2091,88 +2228,6 @@ static int alg_test_null(const struct alg_test_desc *desc,
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
- .alg = "__cbc-cast5-avx",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-cast6-avx",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-serpent-avx",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-serpent-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-serpent-sse2",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-twofish-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-aes-aesni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "__driver-cbc-camellia-aesni",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-camellia-aesni-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-cast5-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-cast6-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-serpent-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-serpent-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-serpent-sse2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-twofish-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-aes-aesni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "__driver-ecb-camellia-aesni",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-camellia-aesni-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-cast5-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-cast6-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-serpent-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-serpent-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-serpent-sse2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-twofish-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-gcm-aes-aesni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "__ghash-pclmulqdqni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.suite = {
@@ -2659,55 +2714,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
- .alg = "cryptd(__driver-cbc-aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "cryptd(__driver-cbc-camellia-aesni)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-cbc-serpent-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "cryptd(__driver-ecb-camellia-aesni)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-cast5-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-cast6-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-serpent-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-serpent-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-serpent-sse2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-twofish-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-gcm-aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "cryptd(__ghash-pclmulqdqni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -3034,10 +3040,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
- .alg = "ecb(__aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e64a4ef9d8ca..9b656be7f52f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1334,36 +1334,50 @@ static struct hash_testvec rmd320_tv_template[] = {
}
};
-#define CRCT10DIF_TEST_VECTORS 3
+#define CRCT10DIF_TEST_VECTORS ARRAY_SIZE(crct10dif_tv_template)
static struct hash_testvec crct10dif_tv_template[] = {
{
- .plaintext = "abc",
- .psize = 3,
-#ifdef __LITTLE_ENDIAN
- .digest = "\x3b\x44",
-#else
- .digest = "\x44\x3b",
-#endif
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 79,
-#ifdef __LITTLE_ENDIAN
- .digest = "\x70\x4b",
-#else
- .digest = "\x4b\x70",
-#endif
- }, {
- .plaintext =
- "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
- .psize = 56,
-#ifdef __LITTLE_ENDIAN
- .digest = "\xe3\x9c",
-#else
- .digest = "\x9c\xe3",
-#endif
- .np = 2,
- .tap = { 28, 28 }
+ .plaintext = "abc",
+ .psize = 3,
+ .digest = (u8 *)(u16 []){ 0x443b },
+ }, {
+ .plaintext = "1234567890123456789012345678901234567890"
+ "123456789012345678901234567890123456789",
+ .psize = 79,
+ .digest = (u8 *)(u16 []){ 0x4b70 },
+ .np = 2,
+ .tap = { 63, 16 },
+ }, {
+ .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
+ "ddddddddddddd",
+ .psize = 56,
+ .digest = (u8 *)(u16 []){ 0x9ce3 },
+ .np = 8,
+ .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
+ }, {
+ .plaintext = "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "123456789012345678901234567890123456789",
+ .psize = 319,
+ .digest = (u8 *)(u16 []){ 0x44c6 },
+ }, {
+ .plaintext = "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "123456789012345678901234567890123456789",
+ .psize = 319,
+ .digest = (u8 *)(u16 []){ 0x44c6 },
+ .np = 4,
+ .tap = { 1, 255, 57, 6 },
}
};
diff --git a/crypto/xts.c b/crypto/xts.c
index 305343f22a02..410a2e299085 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -13,7 +13,8 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -25,140 +26,320 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
+#define XTS_BUFFER_SIZE 128u
+
struct priv {
- struct crypto_cipher *child;
+ struct crypto_skcipher *child;
struct crypto_cipher *tweak;
};
-static int setkey(struct crypto_tfm *parent, const u8 *key,
+struct xts_instance_ctx {
+ struct crypto_skcipher_spawn spawn;
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct rctx {
+ be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
+
+ be128 t;
+
+ be128 *ext;
+
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ unsigned int left;
+
+ struct skcipher_request subreq;
+};
+
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct priv *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->tweak;
+ struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child;
+ struct crypto_cipher *tweak;
int err;
- err = xts_check_key(parent, key, keylen);
+ err = xts_verify_key(parent, key, keylen);
if (err)
return err;
+ keylen /= 2;
+
/* we need two cipher instances: one to compute the initial 'tweak'
* by encrypting the IV (usually the 'plain' iv) and the other
* one to encrypt and decrypt the data */
/* tweak cipher, uses Key2 i.e. the second half of *key */
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+ tweak = ctx->tweak;
+ crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key + keylen/2, keylen/2);
+ err = crypto_cipher_setkey(tweak, key + keylen, keylen);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
+ CRYPTO_TFM_RES_MASK);
if (err)
return err;
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
+ /* data cipher, uses Key1 i.e. the first half of *key */
child = ctx->child;
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
- /* data cipher, uses Key1 i.e. the first half of *key */
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen/2);
- if (err)
- return err;
+ return err;
+}
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+static int post_crypt(struct skcipher_request *req)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
+ const int bs = XTS_BLOCK_SIZE;
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned offset;
+ int err;
- return 0;
-}
+ subreq = &rctx->subreq;
+ err = skcipher_walk_virt(&w, subreq, false);
-struct sinfo {
- be128 *t;
- struct crypto_tfm *tfm;
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-};
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wdst;
-static inline void xts_round(struct sinfo *s, void *dst, const void *src)
-{
- be128_xor(dst, s->t, src); /* PP <- T xor P */
- s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */
- be128_xor(dst, dst, s->t); /* C <- T xor CC */
+ wdst = w.dst.virt.addr;
+
+ do {
+ be128_xor(wdst, buf++, wdst);
+ wdst++;
+ } while ((avail -= bs) >= bs);
+
+ err = skcipher_walk_done(&w, avail);
+ }
+
+ rctx->left -= subreq->cryptlen;
+
+ if (err || !rctx->left)
+ goto out;
+
+ rctx->dst = rctx->dstbuf;
+
+ scatterwalk_done(&w.out, 0, 1);
+ sg = w.out.sg;
+ offset = w.out.offset;
+
+ if (rctx->dst != sg) {
+ rctx->dst[0] = *sg;
+ sg_unmark_end(rctx->dst);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ }
+ rctx->dst[0].length -= offset - sg->offset;
+ rctx->dst[0].offset = offset;
+
+out:
+ return err;
}
-static int crypt(struct blkcipher_desc *d,
- struct blkcipher_walk *w, struct priv *ctx,
- void (*tw)(struct crypto_tfm *, u8 *, const u8 *),
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+static int pre_crypt(struct skcipher_request *req)
{
- int err;
- unsigned int avail;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
- struct sinfo s = {
- .tfm = crypto_cipher_tfm(ctx->child),
- .fn = fn
- };
- u8 *wsrc;
- u8 *wdst;
-
- err = blkcipher_walk_virt(d, w);
- if (!w->nbytes)
- return err;
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned cryptlen;
+ unsigned offset;
+ bool more;
+ int err;
- s.t = (be128 *)w->iv;
- avail = w->nbytes;
+ subreq = &rctx->subreq;
+ cryptlen = subreq->cryptlen;
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ more = rctx->left > cryptlen;
+ if (!more)
+ cryptlen = rctx->left;
- /* calculate first value of T */
- tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
+ skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
+ cryptlen, NULL);
- goto first;
+ err = skcipher_walk_virt(&w, subreq, false);
- for (;;) {
- do {
- gf128mul_x_ble(s.t, s.t);
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wsrc;
+ be128 *wdst;
-first:
- xts_round(&s, wdst, wsrc);
+ wsrc = w.src.virt.addr;
+ wdst = w.dst.virt.addr;
- wsrc += bs;
- wdst += bs;
+ do {
+ *buf++ = rctx->t;
+ be128_xor(wdst++, &rctx->t, wsrc++);
+ gf128mul_x_ble(&rctx->t, &rctx->t);
} while ((avail -= bs) >= bs);
- err = blkcipher_walk_done(d, w, avail);
- if (!w->nbytes)
- break;
+ err = skcipher_walk_done(&w, avail);
+ }
+
+ skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
+ cryptlen, NULL);
- avail = w->nbytes;
+ if (err || !more)
+ goto out;
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ rctx->src = rctx->srcbuf;
+
+ scatterwalk_done(&w.in, 0, 1);
+ sg = w.in.sg;
+ offset = w.in.offset;
+
+ if (rctx->src != sg) {
+ rctx->src[0] = *sg;
+ sg_unmark_end(rctx->src);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
}
+ rctx->src[0].length -= offset - sg->offset;
+ rctx->src[0].offset = offset;
+out:
return err;
}
-static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+ gfp_t gfp;
+
+ subreq = &rctx->subreq;
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_callback(subreq, req->base.flags, done, req);
+
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ rctx->ext = NULL;
+
+ subreq->cryptlen = XTS_BUFFER_SIZE;
+ if (req->cryptlen > XTS_BUFFER_SIZE) {
+ subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
+ rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ }
+
+ rctx->src = req->src;
+ rctx->dst = req->dst;
+ rctx->left = req->cryptlen;
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
- crypto_cipher_alg(ctx->child)->cia_encrypt);
+ /* calculate first value of T */
+ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
+
+ return 0;
}
-static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static void exit_crypt(struct skcipher_request *req)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct rctx *rctx = skcipher_request_ctx(req);
+
+ rctx->left = 0;
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
- crypto_cipher_alg(ctx->child)->cia_decrypt);
+ if (rctx->ext)
+ kzfree(rctx->ext);
+}
+
+static int do_encrypt(struct skcipher_request *req, int err)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
+ }
+
+ exit_crypt(req);
+ return err;
+}
+
+static void encrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_encrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
+
+ skcipher_request_complete(req, err);
+}
+
+static int encrypt(struct skcipher_request *req)
+{
+ return do_encrypt(req, init_crypt(req, encrypt_done));
+}
+
+static int do_decrypt(struct skcipher_request *req, int err)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
+ }
+
+ exit_crypt(req);
+ return err;
+}
+
+static void decrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_decrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
+
+ skcipher_request_complete(req, err);
+}
+
+static int decrypt(struct skcipher_request *req)
+{
+ return do_decrypt(req, init_crypt(req, decrypt_done));
}
int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
@@ -233,112 +414,168 @@ first:
}
EXPORT_SYMBOL_GPL(xts_crypt);
-static int init_tfm(struct crypto_tfm *tfm)
+static int init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct priv *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- crypto_free_cipher(cipher);
- return -EINVAL;
- }
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child;
+ struct crypto_cipher *tweak;
- ctx->child = cipher;
+ child = crypto_spawn_skcipher(&ictx->spawn);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher)) {
- crypto_free_cipher(ctx->child);
- return PTR_ERR(cipher);
- }
+ ctx->child = child;
- /* this check isn't really needed, leave it here just in case */
- if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
- crypto_free_cipher(cipher);
- crypto_free_cipher(ctx->child);
- *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return -EINVAL;
+ tweak = crypto_alloc_cipher(ictx->name, 0, 0);
+ if (IS_ERR(tweak)) {
+ crypto_free_skcipher(ctx->child);
+ return PTR_ERR(tweak);
}
- ctx->tweak = cipher;
+ ctx->tweak = tweak;
+
+ crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
+ sizeof(struct rctx));
return 0;
}
-static void exit_tfm(struct crypto_tfm *tfm)
+static void exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
crypto_free_cipher(ctx->tweak);
}
-static struct crypto_instance *alloc(struct rtattr **tb)
+static void free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct xts_instance_ctx *ctx;
+ struct skcipher_alg *alg;
+ const char *cipher_name;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return PTR_ERR(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = skcipher_instance_ctx(inst);
+
+ crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err == -ENOENT) {
+ err = -ENAMETOOLONG;
+ if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ }
+
if (err)
- return ERR_PTR(err);
+ goto err_free_inst;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ alg = crypto_skcipher_spawn_alg(&ctx->spawn);
- inst = crypto_alloc_instance("xts", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = -EINVAL;
+ if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
+ if (crypto_skcipher_alg_ivsize(alg))
+ goto err_drop_spawn;
- if (alg->cra_alignmask < 7)
- inst->alg.cra_alignmask = 7;
- else
- inst->alg.cra_alignmask = alg->cra_alignmask;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
+ &alg->base);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ err = -EINVAL;
+ cipher_name = alg->base.cra_name;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize =
- 2 * alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize =
- 2 * alg->cra_cipher.cia_max_keysize;
+ /* Alas we screwed up the naming so we have to mangle the
+ * cipher name.
+ */
+ if (!strncmp(cipher_name, "ecb(", 4)) {
+ unsigned len;
- inst->alg.cra_ctxsize = sizeof(struct priv);
+ len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+ if (len < 2 || len >= sizeof(ctx->name))
+ goto err_drop_spawn;
- inst->alg.cra_init = init_tfm;
- inst->alg.cra_exit = exit_tfm;
+ if (ctx->name[len - 1] != ')')
+ goto err_drop_spawn;
- inst->alg.cra_blkcipher.setkey = setkey;
- inst->alg.cra_blkcipher.encrypt = encrypt;
- inst->alg.cra_blkcipher.decrypt = decrypt;
+ ctx->name[len - 1] = 0;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+ } else
+ goto err_drop_spawn;
-static void free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+ (__alignof__(u64) - 1);
+
+ inst->alg.ivsize = XTS_BLOCK_SIZE;
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct priv);
+
+ inst->alg.init = init_tfm;
+ inst->alg.exit = exit_tfm;
+
+ inst->alg.setkey = setkey;
+ inst->alg.encrypt = encrypt;
+ inst->alg.decrypt = decrypt;
+
+ inst->free = free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_skcipher(&ctx->spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_tmpl = {
.name = "xts",
- .alloc = alloc,
- .free = free,
+ .create = create,
.module = THIS_MODULE,
};