summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig30
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ahash.c12
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/drbg.c130
-rw-r--r--crypto/lz4.c2
-rw-r--r--crypto/lz4hc.c2
-rw-r--r--crypto/mcryptd.c705
-rw-r--r--crypto/sha256_generic.c3
-rw-r--r--crypto/sha512_generic.c3
-rw-r--r--crypto/testmgr.c966
-rw-r--r--crypto/testmgr.h66
12 files changed, 1339 insertions, 583 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 77daef031db5..87bbc9c1e681 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -158,6 +158,20 @@ config CRYPTO_CRYPTD
converts an arbitrary synchronous software crypto algorithm
into an asynchronous algorithm that executes in a kernel thread.
+config CRYPTO_MCRYPTD
+ tristate "Software async multi-buffer crypto daemon"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_MANAGER
+ select CRYPTO_WORKQUEUE
+ help
+ This is a generic software asynchronous crypto daemon that
+ provides the kernel thread to assist multi-buffer crypto
+ algorithms for submitting jobs and flushing jobs in multi-buffer
+ crypto algorithms. Multi-buffer crypto algorithms are executed
+ in the context of this kernel thread and drivers can post
+ their crypto request asynchronously to be processed by this daemon.
+
config CRYPTO_AUTHENC
tristate "Authenc support"
select CRYPTO_AEAD
@@ -559,6 +573,22 @@ config CRYPTO_SHA1_PPC
This is the powerpc hardware accelerated implementation of the
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
+config CRYPTO_SHA1_MB
+ tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
+ depends on X86 && 64BIT
+ select CRYPTO_SHA1
+ select CRYPTO_HASH
+ select CRYPTO_MCRYPTD
+ help
+ SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+ using multi-buffer technique. This algorithm computes on
+ multiple data lanes concurrently with SIMD instructions for
+ better throughput. It should not be enabled by default but
+ used when there is significant amount of work to keep the keep
+ the data lanes filled to get performance benefit. If the data
+ lanes remain unfilled, a flush operation will be initiated to
+ process the crypto jobs, adding a slight latency.
+
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
diff --git a/crypto/Makefile b/crypto/Makefile
index cfa57b3f5a4d..1445b9100c05 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
+obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f2a5d8f656ff..f6a36a52d738 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -131,8 +131,10 @@ int crypto_hash_walk_first(struct ahash_request *req,
{
walk->total = req->nbytes;
- if (!walk->total)
+ if (!walk->total) {
+ walk->entrylen = 0;
return 0;
+ }
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
@@ -147,8 +149,10 @@ int crypto_ahash_walk_first(struct ahash_request *req,
{
walk->total = req->nbytes;
- if (!walk->total)
+ if (!walk->total) {
+ walk->entrylen = 0;
return 0;
+ }
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
@@ -167,8 +171,10 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
{
walk->total = len;
- if (!walk->total)
+ if (!walk->total) {
+ walk->entrylen = 0;
return 0;
+ }
walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
walk->sg = sg;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index a19c027b29bd..83187f497c7c 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -49,7 +49,7 @@ struct skcipher_ctx {
struct ablkcipher_request req;
};
-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
+#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
static inline int skcipher_sndbuf(struct sock *sk)
diff --git a/crypto/drbg.c b/crypto/drbg.c
index a53ee099e281..54cfd4820abc 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -117,27 +117,18 @@ static const struct drbg_core drbg_cores[] = {
{
.flags = DRBG_CTR | DRBG_STRENGTH128,
.statelen = 32, /* 256 bits as defined in 10.2.1 */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 16,
.cra_name = "ctr_aes128",
.backend_cra_name = "ecb(aes)",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH192,
.statelen = 40, /* 320 bits as defined in 10.2.1 */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 16,
.cra_name = "ctr_aes192",
.backend_cra_name = "ecb(aes)",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH256,
.statelen = 48, /* 384 bits as defined in 10.2.1 */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 16,
.cra_name = "ctr_aes256",
.backend_cra_name = "ecb(aes)",
@@ -147,36 +138,24 @@ static const struct drbg_core drbg_cores[] = {
{
.flags = DRBG_HASH | DRBG_STRENGTH128,
.statelen = 55, /* 440 bits */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 20,
.cra_name = "sha1",
.backend_cra_name = "sha1",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 48,
.cra_name = "sha384",
.backend_cra_name = "sha384",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 64,
.cra_name = "sha512",
.backend_cra_name = "sha512",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 55, /* 440 bits */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 32,
.cra_name = "sha256",
.backend_cra_name = "sha256",
@@ -186,36 +165,24 @@ static const struct drbg_core drbg_cores[] = {
{
.flags = DRBG_HMAC | DRBG_STRENGTH128,
.statelen = 20, /* block length of cipher */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 20,
.cra_name = "hmac_sha1",
.backend_cra_name = "hmac(sha1)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 48, /* block length of cipher */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 48,
.cra_name = "hmac_sha384",
.backend_cra_name = "hmac(sha384)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 64, /* block length of cipher */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 64,
.cra_name = "hmac_sha512",
.backend_cra_name = "hmac(sha512)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 32, /* block length of cipher */
- .max_addtllen = 35,
- .max_bits = 19,
- .max_req = 48,
.blocklen_bytes = 32,
.cra_name = "hmac_sha256",
.backend_cra_name = "hmac(sha256)",
@@ -302,20 +269,19 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg,
* Convert an integer into a byte representation of this integer.
* The byte representation is big-endian
*
- * @buf buffer holding the converted integer
* @val value to be converted
- * @buflen length of buffer
+ * @buf buffer holding the converted integer -- caller must ensure that
+ * buffer size is at least 32 bit
*/
#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
-static inline void drbg_int2byte(unsigned char *buf, uint64_t val,
- size_t buflen)
+static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
{
- unsigned char *byte;
- uint64_t i;
+ struct s {
+ __be32 conv;
+ };
+ struct s *conversion = (struct s *) buf;
- byte = buf + (buflen - 1);
- for (i = 0; i < buflen; i++)
- *(byte--) = val >> (i * 8) & 0xff;
+ conversion->conv = cpu_to_be32(val);
}
/*
@@ -483,10 +449,10 @@ static int drbg_ctr_df(struct drbg_state *drbg,
/* 10.4.2 step 2 -- calculate the entire length of all input data */
list_for_each_entry(seed, seedlist, list)
inputlen += seed->len;
- drbg_int2byte(&L_N[0], inputlen, 4);
+ drbg_cpu_to_be32(inputlen, &L_N[0]);
/* 10.4.2 step 3 */
- drbg_int2byte(&L_N[4], bytes_to_return, 4);
+ drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
/* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
@@ -517,7 +483,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
* holds zeros after allocation -- even the increment of i
* is irrelevant as the increment remains within length of i
*/
- drbg_int2byte(iv, i, 4);
+ drbg_cpu_to_be32(i, iv);
/* 10.4.2 step 9.2 -- BCC and concatenation with temp */
ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
if (ret)
@@ -729,11 +695,9 @@ static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
LIST_HEAD(seedlist);
LIST_HEAD(vdatalist);
- if (!reseed) {
- /* 10.1.2.3 step 2 */
- memset(drbg->C, 0, drbg_statelen(drbg));
+ if (!reseed)
+ /* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */
memset(drbg->V, 1, drbg_statelen(drbg));
- }
drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
list_add_tail(&seed1.list, &seedlist);
@@ -862,7 +826,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
/* 10.4.1 step 3 */
input[0] = 1;
- drbg_int2byte(&input[1], (outlen * 8), 4);
+ drbg_cpu_to_be32((outlen * 8), &input[1]);
/* 10.4.1 step 4.1 -- concatenation of data for input into hash */
drbg_string_fill(&data, input, 5);
@@ -1023,7 +987,10 @@ static int drbg_hash_generate(struct drbg_state *drbg,
{
int len = 0;
int ret = 0;
- unsigned char req[8];
+ union {
+ unsigned char req[8];
+ __be64 req_int;
+ } u;
unsigned char prefix = DRBG_PREFIX3;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
@@ -1053,8 +1020,8 @@ static int drbg_hash_generate(struct drbg_state *drbg,
drbg->scratchpad, drbg_blocklen(drbg));
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->C, drbg_statelen(drbg));
- drbg_int2byte(req, drbg->reseed_ctr, sizeof(req));
- drbg_add_buf(drbg->V, drbg_statelen(drbg), req, 8);
+ u.req_int = cpu_to_be64(drbg->reseed_ctr);
+ drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
out:
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
@@ -1142,6 +1109,11 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
pr_devel("DRBG: using personalization string\n");
}
+ if (!reseed) {
+ memset(drbg->V, 0, drbg_statelen(drbg));
+ memset(drbg->C, 0, drbg_statelen(drbg));
+ }
+
ret = drbg->d_ops->update(drbg, &seedlist, reseed);
if (ret)
goto out;
@@ -1151,8 +1123,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
drbg->reseed_ctr = 1;
out:
- if (entropy)
- kzfree(entropy);
+ kzfree(entropy);
return ret;
}
@@ -1161,19 +1132,15 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
- if (drbg->V)
- kzfree(drbg->V);
+ kzfree(drbg->V);
drbg->V = NULL;
- if (drbg->C)
- kzfree(drbg->C);
+ kzfree(drbg->C);
drbg->C = NULL;
- if (drbg->scratchpad)
- kzfree(drbg->scratchpad);
+ kzfree(drbg->scratchpad);
drbg->scratchpad = NULL;
drbg->reseed_ctr = 0;
#ifdef CONFIG_CRYPTO_FIPS
- if (drbg->prev)
- kzfree(drbg->prev);
+ kzfree(drbg->prev);
drbg->prev = NULL;
drbg->fips_primed = false;
#endif
@@ -1188,17 +1155,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
int ret = -ENOMEM;
unsigned int sb_size = 0;
- if (!drbg)
- return -EINVAL;
-
- drbg->V = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+ drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
if (!drbg->V)
goto err;
- drbg->C = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+ drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
if (!drbg->C)
goto err;
#ifdef CONFIG_CRYPTO_FIPS
- drbg->prev = kzalloc(drbg_blocklen(drbg), GFP_KERNEL);
+ drbg->prev = kmalloc(drbg_blocklen(drbg), GFP_KERNEL);
if (!drbg->prev)
goto err;
drbg->fips_primed = false;
@@ -1263,15 +1227,6 @@ static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow)
int ret = -ENOMEM;
struct drbg_state *tmp = NULL;
- if (!drbg || !drbg->core || !drbg->V || !drbg->C) {
- pr_devel("DRBG: attempt to generate shadow copy for "
- "uninitialized DRBG state rejected\n");
- return -EINVAL;
- }
- /* HMAC does not have a scratchpad */
- if (!(drbg->core->flags & DRBG_HMAC) && NULL == drbg->scratchpad)
- return -EINVAL;
-
tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -1293,8 +1248,7 @@ static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow)
return 0;
err:
- if (tmp)
- kzfree(tmp);
+ kzfree(tmp);
return ret;
}
@@ -1385,11 +1339,9 @@ static int drbg_generate(struct drbg_state *drbg,
shadow->seeded = false;
/* allocate cipher handle */
- if (shadow->d_ops->crypto_init) {
- len = shadow->d_ops->crypto_init(shadow);
- if (len)
- goto err;
- }
+ len = shadow->d_ops->crypto_init(shadow);
+ if (len)
+ goto err;
if (shadow->pr || !shadow->seeded) {
pr_devel("DRBG: reseeding before generation (prediction "
@@ -1471,8 +1423,7 @@ static int drbg_generate(struct drbg_state *drbg,
#endif
err:
- if (shadow->d_ops->crypto_fini)
- shadow->d_ops->crypto_fini(shadow);
+ shadow->d_ops->crypto_fini(shadow);
drbg_restore_shadow(drbg, &shadow);
return len;
}
@@ -1566,11 +1517,10 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
return ret;
ret = -EFAULT;
- if (drbg->d_ops->crypto_init && drbg->d_ops->crypto_init(drbg))
+ if (drbg->d_ops->crypto_init(drbg))
goto err;
ret = drbg_seed(drbg, pers, false);
- if (drbg->d_ops->crypto_fini)
- drbg->d_ops->crypto_fini(drbg);
+ drbg->d_ops->crypto_fini(drbg);
if (ret)
goto err;
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 4586dd15b0d8..34d072b72a73 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -68,7 +68,7 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
size_t tmp_len = *dlen;
size_t __slen = slen;
- err = lz4_decompress(src, &__slen, dst, tmp_len);
+ err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
if (err < 0)
return -EINVAL;
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 151ba31d34e3..9218b3fed5e3 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -68,7 +68,7 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
size_t tmp_len = *dlen;
size_t __slen = slen;
- err = lz4_decompress(src, &__slen, dst, tmp_len);
+ err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
if (err < 0)
return -EINVAL;
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
new file mode 100644
index 000000000000..b39fbd530102
--- /dev/null
+++ b/crypto/mcryptd.c
@@ -0,0 +1,705 @@
+/*
+ * Software multibuffer async crypto daemon.
+ *
+ * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * Adapted from crypto daemon.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/mcryptd.h>
+#include <crypto/crypto_wq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+
+#define MCRYPTD_MAX_CPU_QLEN 100
+#define MCRYPTD_BATCH 9
+
+static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+ unsigned int tail);
+
+struct mcryptd_flush_list {
+ struct list_head list;
+ struct mutex lock;
+};
+
+static struct mcryptd_flush_list __percpu *mcryptd_flist;
+
+struct hashd_instance_ctx {
+ struct crypto_shash_spawn spawn;
+ struct mcryptd_queue *queue;
+};
+
+static void mcryptd_queue_worker(struct work_struct *work);
+
+void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
+{
+ struct mcryptd_flush_list *flist;
+
+ if (!cstate->flusher_engaged) {
+ /* put the flusher on the flush list */
+ flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
+ mutex_lock(&flist->lock);
+ list_add_tail(&cstate->flush_list, &flist->list);
+ cstate->flusher_engaged = true;
+ cstate->next_flush = jiffies + delay;
+ queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
+ &cstate->flush, delay);
+ mutex_unlock(&flist->lock);
+ }
+}
+EXPORT_SYMBOL(mcryptd_arm_flusher);
+
+static int mcryptd_init_queue(struct mcryptd_queue *queue,
+ unsigned int max_cpu_qlen)
+{
+ int cpu;
+ struct mcryptd_cpu_queue *cpu_queue;
+
+ queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
+ pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
+ if (!queue->cpu_queue)
+ return -ENOMEM;
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
+ crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+ }
+ return 0;
+}
+
+static void mcryptd_fini_queue(struct mcryptd_queue *queue)
+{
+ int cpu;
+ struct mcryptd_cpu_queue *cpu_queue;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ BUG_ON(cpu_queue->queue.qlen);
+ }
+ free_percpu(queue->cpu_queue);
+}
+
+static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
+ struct crypto_async_request *request,
+ struct mcryptd_hash_request_ctx *rctx)
+{
+ int cpu, err;
+ struct mcryptd_cpu_queue *cpu_queue;
+
+ cpu = get_cpu();
+ cpu_queue = this_cpu_ptr(queue->cpu_queue);
+ rctx->tag.cpu = cpu;
+
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+ pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
+ cpu, cpu_queue, request);
+ queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+ put_cpu();
+
+ return err;
+}
+
+/*
+ * Try to opportunisticlly flush the partially completed jobs if
+ * crypto daemon is the only task running.
+ */
+static void mcryptd_opportunistic_flush(void)
+{
+ struct mcryptd_flush_list *flist;
+ struct mcryptd_alg_cstate *cstate;
+
+ flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
+ while (single_task_running()) {
+ mutex_lock(&flist->lock);
+ if (list_empty(&flist->list)) {
+ mutex_unlock(&flist->lock);
+ return;
+ }
+ cstate = list_entry(flist->list.next,
+ struct mcryptd_alg_cstate, flush_list);
+ if (!cstate->flusher_engaged) {
+ mutex_unlock(&flist->lock);
+ return;
+ }
+ list_del(&cstate->flush_list);
+ cstate->flusher_engaged = false;
+ mutex_unlock(&flist->lock);
+ cstate->alg_state->flusher(cstate);
+ }
+}
+
+/*
+ * Called in workqueue context, do one real cryption work (via
+ * req->complete) and reschedule itself if there are more work to
+ * do.
+ */
+static void mcryptd_queue_worker(struct work_struct *work)
+{
+ struct mcryptd_cpu_queue *cpu_queue;
+ struct crypto_async_request *req, *backlog;
+ int i;
+
+ /*
+ * Need to loop through more than once for multi-buffer to
+ * be effective.
+ */
+
+ cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
+ i = 0;
+ while (i < MCRYPTD_BATCH || single_task_running()) {
+ /*
+ * preempt_disable/enable is used to prevent
+ * being preempted by mcryptd_enqueue_request()
+ */
+ local_bh_disable();
+ preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+ preempt_enable();
+ local_bh_enable();
+
+ if (!req) {
+ mcryptd_opportunistic_flush();
+ return;
+ }
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+ req->complete(req, 0);
+ if (!cpu_queue->queue.qlen)
+ return;
+ ++i;
+ }
+ if (cpu_queue->queue.qlen)
+ queue_work(kcrypto_wq, &cpu_queue->work);
+}
+
+void mcryptd_flusher(struct work_struct *__work)
+{
+ struct mcryptd_alg_cstate *alg_cpu_state;
+ struct mcryptd_alg_state *alg_state;
+ struct mcryptd_flush_list *flist;
+ int cpu;
+
+ cpu = smp_processor_id();
+ alg_cpu_state = container_of(to_delayed_work(__work),
+ struct mcryptd_alg_cstate, flush);
+ alg_state = alg_cpu_state->alg_state;
+ if (alg_cpu_state->cpu != cpu)
+ pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
+ cpu, alg_cpu_state->cpu);
+
+ if (alg_cpu_state->flusher_engaged) {
+ flist = per_cpu_ptr(mcryptd_flist, cpu);
+ mutex_lock(&flist->lock);
+ list_del(&alg_cpu_state->flush_list);
+ alg_cpu_state->flusher_engaged = false;
+ mutex_unlock(&flist->lock);
+ alg_state->flusher(alg_cpu_state);
+ }
+}
+EXPORT_SYMBOL_GPL(mcryptd_flusher);
+
+static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
+
+ return ictx->queue;
+}
+
+static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+ unsigned int tail)
+{
+ char *p;
+ struct crypto_instance *inst;
+ int err;
+
+ p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ inst = (void *)(p + head);
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_inst;
+
+ memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
+
+ inst->alg.cra_priority = alg->cra_priority + 50;
+ inst->alg.cra_blocksize = alg->cra_blocksize;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+
+out:
+ return p;
+
+out_free_inst:
+ kfree(p);
+ p = ERR_PTR(err);
+ goto out;
+}
+
+static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_shash_spawn *spawn = &ictx->spawn;
+ struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *hash;
+
+ hash = crypto_spawn_shash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ ctx->child = hash;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct mcryptd_hash_request_ctx) +
+ crypto_shash_descsize(hash));
+ return 0;
+}
+
+static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->child);
+}
+
+static int mcryptd_hash_setkey(struct crypto_ahash *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
+ struct crypto_shash *child = ctx->child;
+ int err;
+
+ crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_shash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int mcryptd_hash_enqueue(struct ahash_request *req,
+ crypto_completion_t complete)
+{
+ int ret;
+
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct mcryptd_queue *queue =
+ mcryptd_get_queue(crypto_ahash_tfm(tfm));
+
+ rctx->complete = req->base.complete;
+ req->base.complete = complete;
+
+ ret = mcryptd_enqueue_request(queue, &req->base, rctx);
+
+ return ret;
+}
+
+static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
+{
+ struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_shash_init(desc);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int mcryptd_hash_init_enqueue(struct ahash_request *req)
+{
+ return mcryptd_hash_enqueue(req, mcryptd_hash_init);
+}
+
+static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
+{
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_mcryptd_update(req, &rctx->desc);
+ if (err) {
+ req->base.complete = rctx->complete;
+ goto out;
+ }
+
+ return;
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int mcryptd_hash_update_enqueue(struct ahash_request *req)
+{
+ return mcryptd_hash_enqueue(req, mcryptd_hash_update);
+}
+
+static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
+{
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_mcryptd_final(req, &rctx->desc);
+ if (err) {
+ req->base.complete = rctx->complete;
+ goto out;
+ }
+
+ return;
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int mcryptd_hash_final_enqueue(struct ahash_request *req)
+{
+ return mcryptd_hash_enqueue(req, mcryptd_hash_final);
+}
+
+static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
+{
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_mcryptd_finup(req, &rctx->desc);
+
+ if (err) {
+ req->base.complete = rctx->complete;
+ goto out;
+ }
+
+ return;
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
+{
+ return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
+}
+
+static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
+{
+ struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */
+
+ err = shash_ahash_mcryptd_digest(req, desc);
+
+ if (err) {
+ req->base.complete = rctx->complete;
+ goto out;
+ }
+
+ return;
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
+{
+ return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
+}
+
+static int mcryptd_hash_export(struct ahash_request *req, void *out)
+{
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_export(&rctx->desc, out);
+}
+
+static int mcryptd_hash_import(struct ahash_request *req, const void *in)
+{
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_import(&rctx->desc, in);
+}
+
+static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct mcryptd_queue *queue)
+{
+ struct hashd_instance_ctx *ctx;
+ struct ahash_instance *inst;
+ struct shash_alg *salg;
+ struct crypto_alg *alg;
+ int err;
+
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
+
+ alg = &salg->base;
+ pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
+ inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
+ sizeof(*ctx));
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ ctx = ahash_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_shash_spawn(&ctx->spawn, salg,
+ ahash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+
+ inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
+
+ inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
+ inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
+
+ inst->alg.init = mcryptd_hash_init_enqueue;
+ inst->alg.update = mcryptd_hash_update_enqueue;
+ inst->alg.final = mcryptd_hash_final_enqueue;
+ inst->alg.finup = mcryptd_hash_finup_enqueue;
+ inst->alg.export = mcryptd_hash_export;
+ inst->alg.import = mcryptd_hash_import;
+ inst->alg.setkey = mcryptd_hash_setkey;
+ inst->alg.digest = mcryptd_hash_digest_enqueue;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_shash(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
+static struct mcryptd_queue mqueue;
+
+static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_DIGEST:
+ return mcryptd_create_hash(tmpl, tb, &mqueue);
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void mcryptd_free(struct crypto_instance *inst)
+{
+ struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
+ struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+
+ switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_drop_shash(&hctx->spawn);
+ kfree(ahash_instance(inst));
+ return;
+ default:
+ crypto_drop_spawn(&ctx->spawn);
+ kfree(inst);
+ }
+}
+
+static struct crypto_template mcryptd_tmpl = {
+ .name = "mcryptd",
+ .create = mcryptd_create,
+ .free = mcryptd_free,
+ .module = THIS_MODULE,
+};
+
+struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_ahash *tfm;
+
+ if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_ahash(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return __mcryptd_ahash_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
+
+int shash_ahash_mcryptd_digest(struct ahash_request *req,
+ struct shash_desc *desc)
+{
+ int err;
+
+ err = crypto_shash_init(desc) ?:
+ shash_ahash_mcryptd_finup(req, desc);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
+
+int shash_ahash_mcryptd_update(struct ahash_request *req,
+ struct shash_desc *desc)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
+
+ /* alignment is to be done by multi-buffer crypto algorithm if needed */
+
+ return shash->update(desc, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
+
+int shash_ahash_mcryptd_finup(struct ahash_request *req,
+ struct shash_desc *desc)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
+
+ /* alignment is to be done by multi-buffer crypto algorithm if needed */
+
+ return shash->finup(desc, NULL, 0, req->result);
+}
+EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
+
+int shash_ahash_mcryptd_final(struct ahash_request *req,
+ struct shash_desc *desc)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
+
+ /* alignment is to be done by multi-buffer crypto algorithm if needed */
+
+ return shash->final(desc, req->result);
+}
+EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
+
+struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
+{
+ struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
+
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
+
+struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
+{
+ struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ return &rctx->desc;
+}
+EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
+
+void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
+{
+ crypto_free_ahash(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
+
+
+static int __init mcryptd_init(void)
+{
+ int err, cpu;
+ struct mcryptd_flush_list *flist;
+
+ mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
+ for_each_possible_cpu(cpu) {
+ flist = per_cpu_ptr(mcryptd_flist, cpu);
+ INIT_LIST_HEAD(&flist->list);
+ mutex_init(&flist->lock);
+ }
+
+ err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
+ if (err) {
+ free_percpu(mcryptd_flist);
+ return err;
+ }
+
+ err = crypto_register_template(&mcryptd_tmpl);
+ if (err) {
+ mcryptd_fini_queue(&mqueue);
+ free_percpu(mcryptd_flist);
+ }
+
+ return err;
+}
+
+static void __exit mcryptd_exit(void)
+{
+ mcryptd_fini_queue(&mqueue);
+ crypto_unregister_template(&mcryptd_tmpl);
+ free_percpu(mcryptd_flist);
+}
+
+subsys_initcall(mcryptd_init);
+module_exit(mcryptd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 543366779524..0bb558344699 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
static inline u32 Ch(u32 x, u32 y, u32 z)
{
@@ -42,7 +43,7 @@ static inline u32 Maj(u32 x, u32 y, u32 z)
static inline void LOAD_OP(int I, u32 *W, const u8 *input)
{
- W[I] = __be32_to_cpu( ((__be32*)(input))[I] );
+ W[I] = get_unaligned_be32((__u32 *)input + I);
}
static inline void BLEND_OP(int I, u32 *W)
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 6ed124f3ea0f..6dde57dc511b 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -20,6 +20,7 @@
#include <crypto/sha.h>
#include <linux/percpu.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
static inline u64 Ch(u64 x, u64 y, u64 z)
{
@@ -68,7 +69,7 @@ static const u64 sha512_K[80] = {
static inline void LOAD_OP(int I, u64 *W, const u8 *input)
{
- W[I] = __be64_to_cpu( ((__be64*)(input))[I] );
+ W[I] = get_unaligned_be64((__u64 *)input + I);
}
static inline void BLEND_OP(int I, u64 *W)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index ac2b63105afc..9459dfd7357f 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -178,9 +178,7 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
-static int do_one_async_hash_op(struct ahash_request *req,
- struct tcrypt_result *tr,
- int ret)
+static int wait_async_op(struct tcrypt_result *tr, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
ret = wait_for_completion_interruptible(&tr->completion);
@@ -264,30 +262,26 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
- ret = do_one_async_hash_op(req, &tresult,
- crypto_ahash_digest(req));
+ ret = wait_async_op(&tresult, crypto_ahash_digest(req));
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
- ret = do_one_async_hash_op(req, &tresult,
- crypto_ahash_init(req));
+ ret = wait_async_op(&tresult, crypto_ahash_init(req));
if (ret) {
pr_err("alt: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = do_one_async_hash_op(req, &tresult,
- crypto_ahash_update(req));
+ ret = wait_async_op(&tresult, crypto_ahash_update(req));
if (ret) {
pr_err("alt: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = do_one_async_hash_op(req, &tresult,
- crypto_ahash_final(req));
+ ret = wait_async_op(&tresult, crypto_ahash_final(req));
if (ret) {
pr_err("alt: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
@@ -311,78 +305,75 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
if (align_offset != 0)
break;
- if (template[i].np) {
- j++;
- memset(result, 0, MAX_DIGEST_SIZE);
+ if (!template[i].np)
+ continue;
- temp = 0;
- sg_init_table(sg, template[i].np);
- ret = -EINVAL;
- for (k = 0; k < template[i].np; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].tap[k] > PAGE_SIZE))
- goto out;
- sg_set_buf(&sg[k],
- memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]),
- template[i].plaintext + temp,
- template[i].tap[k]),
- template[i].tap[k]);
- temp += template[i].tap[k];
- }
-
- if (template[i].ksize) {
- if (template[i].ksize > MAX_KEYLEN) {
- pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
- j, algo, template[i].ksize,
- MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- crypto_ahash_clear_flags(tfm, ~0);
- memcpy(key, template[i].key, template[i].ksize);
- ret = crypto_ahash_setkey(tfm, key,
- template[i].ksize);
-
- if (ret) {
- printk(KERN_ERR "alg: hash: setkey "
- "failed on chunking test %d "
- "for %s: ret=%d\n", j, algo,
- -ret);
- goto out;
- }
- }
-
- ahash_request_set_crypt(req, sg, result,
- template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &tresult.completion);
- if (!ret && !(ret = tresult.err)) {
- reinit_completion(&tresult.completion);
- break;
- }
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed "
- "on chunking test %d for %s: "
- "ret=%d\n", j, algo, -ret);
+ j++;
+ memset(result, 0, MAX_DIGEST_SIZE);
+
+ temp = 0;
+ sg_init_table(sg, template[i].np);
+ ret = -EINVAL;
+ for (k = 0; k < template[i].np; k++) {
+ if (WARN_ON(offset_in_page(IDX[k]) +
+ template[i].tap[k] > PAGE_SIZE))
goto out;
- }
+ sg_set_buf(&sg[k],
+ memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]),
+ template[i].plaintext + temp,
+ template[i].tap[k]),
+ template[i].tap[k]);
+ temp += template[i].tap[k];
+ }
- if (memcmp(result, template[i].digest,
- crypto_ahash_digestsize(tfm))) {
- printk(KERN_ERR "alg: hash: Chunking test %d "
- "failed for %s\n", j, algo);
- hexdump(result, crypto_ahash_digestsize(tfm));
+ if (template[i].ksize) {
+ if (template[i].ksize > MAX_KEYLEN) {
+ pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+ j, algo, template[i].ksize, MAX_KEYLEN);
ret = -EINVAL;
goto out;
}
+ crypto_ahash_clear_flags(tfm, ~0);
+ memcpy(key, template[i].key, template[i].ksize);
+ ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
+
+ if (ret) {
+ printk(KERN_ERR "alg: hash: setkey "
+ "failed on chunking test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ }
+
+ ahash_request_set_crypt(req, sg, result, template[i].psize);
+ ret = crypto_ahash_digest(req);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &tresult.completion);
+ if (!ret && !(ret = tresult.err)) {
+ reinit_completion(&tresult.completion);
+ break;
+ }
+ /* fall through */
+ default:
+ printk(KERN_ERR "alg: hash: digest failed "
+ "on chunking test %d for %s: "
+ "ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+
+ if (memcmp(result, template[i].digest,
+ crypto_ahash_digestsize(tfm))) {
+ printk(KERN_ERR "alg: hash: Chunking test %d "
+ "failed for %s\n", j, algo);
+ hexdump(result, crypto_ahash_digestsize(tfm));
+ ret = -EINVAL;
+ goto out;
}
}
@@ -492,121 +483,116 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
tcrypt_complete, &result);
for (i = 0, j = 0; i < tcount; i++) {
- if (!template[i].np) {
- j++;
+ if (template[i].np)
+ continue;
- /* some templates have no input data but they will
- * touch input
- */
- input = xbuf[0];
- input += align_offset;
- assoc = axbuf[0];
+ j++;
- ret = -EINVAL;
- if (WARN_ON(align_offset + template[i].ilen >
- PAGE_SIZE || template[i].alen > PAGE_SIZE))
- goto out;
+ /* some templates have no input data but they will
+ * touch input
+ */
+ input = xbuf[0];
+ input += align_offset;
+ assoc = axbuf[0];
- memcpy(input, template[i].input, template[i].ilen);
- memcpy(assoc, template[i].assoc, template[i].alen);
- if (template[i].iv)
- memcpy(iv, template[i].iv, MAX_IVLEN);
- else
- memset(iv, 0, MAX_IVLEN);
+ ret = -EINVAL;
+ if (WARN_ON(align_offset + template[i].ilen >
+ PAGE_SIZE || template[i].alen > PAGE_SIZE))
+ goto out;
- crypto_aead_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_aead_set_flags(
- tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ memcpy(input, template[i].input, template[i].ilen);
+ memcpy(assoc, template[i].assoc, template[i].alen);
+ if (template[i].iv)
+ memcpy(iv, template[i].iv, MAX_IVLEN);
+ else
+ memset(iv, 0, MAX_IVLEN);
- if (template[i].klen > MAX_KEYLEN) {
- pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
- d, j, algo, template[i].klen,
- MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- memcpy(key, template[i].key, template[i].klen);
+ crypto_aead_clear_flags(tfm, ~0);
+ if (template[i].wk)
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- ret = crypto_aead_setkey(tfm, key,
- template[i].klen);
- if (!ret == template[i].fail) {
- pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
- d, j, algo, crypto_aead_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
+ if (template[i].klen > MAX_KEYLEN) {
+ pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+ d, j, algo, template[i].klen,
+ MAX_KEYLEN);
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(key, template[i].key, template[i].klen);
- authsize = abs(template[i].rlen - template[i].ilen);
- ret = crypto_aead_setauthsize(tfm, authsize);
- if (ret) {
- pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
- d, authsize, j, algo);
- goto out;
- }
+ ret = crypto_aead_setkey(tfm, key, template[i].klen);
+ if (!ret == template[i].fail) {
+ pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
+ d, j, algo, crypto_aead_get_flags(tfm));
+ goto out;
+ } else if (ret)
+ continue;
- if (diff_dst) {
- output = xoutbuf[0];
- output += align_offset;
- sg_init_one(&sg[0], input, template[i].ilen);
- sg_init_one(&sgout[0], output,
- template[i].rlen);
- } else {
- sg_init_one(&sg[0], input,
- template[i].ilen +
- (enc ? authsize : 0));
- output = input;
- }
+ authsize = abs(template[i].rlen - template[i].ilen);
+ ret = crypto_aead_setauthsize(tfm, authsize);
+ if (ret) {
+ pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
+ d, authsize, j, algo);
+ goto out;
+ }
- sg_init_one(&asg[0], assoc, template[i].alen);
+ if (diff_dst) {
+ output = xoutbuf[0];
+ output += align_offset;
+ sg_init_one(&sg[0], input, template[i].ilen);
+ sg_init_one(&sgout[0], output, template[i].rlen);
+ } else {
+ sg_init_one(&sg[0], input,
+ template[i].ilen + (enc ? authsize : 0));
+ output = input;
+ }
- aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- template[i].ilen, iv);
+ sg_init_one(&asg[0], assoc, template[i].alen);
- aead_request_set_assoc(req, asg, template[i].alen);
+ aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+ template[i].ilen, iv);
- ret = enc ?
- crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req);
+ aead_request_set_assoc(req, asg, template[i].alen);
- switch (ret) {
- case 0:
- if (template[i].novrfy) {
- /* verification was supposed to fail */
- pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
- d, e, j, algo);
- /* so really, we got a bad message */
- ret = -EBADMSG;
- goto out;
- }
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
- break;
- }
- case -EBADMSG:
- if (template[i].novrfy)
- /* verification failure was expected */
- continue;
- /* fall through */
- default:
- pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
- }
+ ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
- q = output;
- if (memcmp(q, template[i].result, template[i].rlen)) {
- pr_err("alg: aead%s: Test %d failed on %s for %s\n",
- d, j, e, algo);
- hexdump(q, template[i].rlen);
- ret = -EINVAL;
+ switch (ret) {
+ case 0:
+ if (template[i].novrfy) {
+ /* verification was supposed to fail */
+ pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
+ d, e, j, algo);
+ /* so really, we got a bad message */
+ ret = -EBADMSG;
goto out;
}
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &result.completion);
+ if (!ret && !(ret = result.err)) {
+ reinit_completion(&result.completion);
+ break;
+ }
+ case -EBADMSG:
+ if (template[i].novrfy)
+ /* verification failure was expected */
+ continue;
+ /* fall through */
+ default:
+ pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n",
+ d, e, j, algo, -ret);
+ goto out;
+ }
+
+ q = output;
+ if (memcmp(q, template[i].result, template[i].rlen)) {
+ pr_err("alg: aead%s: Test %d failed on %s for %s\n",
+ d, j, e, algo);
+ hexdump(q, template[i].rlen);
+ ret = -EINVAL;
+ goto out;
}
}
@@ -615,191 +601,182 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
if (align_offset != 0)
break;
- if (template[i].np) {
- j++;
-
- if (template[i].iv)
- memcpy(iv, template[i].iv, MAX_IVLEN);
- else
- memset(iv, 0, MAX_IVLEN);
-
- crypto_aead_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_aead_set_flags(
- tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- if (template[i].klen > MAX_KEYLEN) {
- pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
- d, j, algo, template[i].klen,
- MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- memcpy(key, template[i].key, template[i].klen);
+ if (!template[i].np)
+ continue;
- ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (!ret == template[i].fail) {
- pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
- d, j, algo, crypto_aead_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
+ j++;
- authsize = abs(template[i].rlen - template[i].ilen);
+ if (template[i].iv)
+ memcpy(iv, template[i].iv, MAX_IVLEN);
+ else
+ memset(iv, 0, MAX_IVLEN);
+ crypto_aead_clear_flags(tfm, ~0);
+ if (template[i].wk)
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ if (template[i].klen > MAX_KEYLEN) {
+ pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+ d, j, algo, template[i].klen, MAX_KEYLEN);
ret = -EINVAL;
- sg_init_table(sg, template[i].np);
- if (diff_dst)
- sg_init_table(sgout, template[i].np);
- for (k = 0, temp = 0; k < template[i].np; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].tap[k] > PAGE_SIZE))
- goto out;
-
- q = xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ goto out;
+ }
+ memcpy(key, template[i].key, template[i].klen);
- memcpy(q, template[i].input + temp,
- template[i].tap[k]);
+ ret = crypto_aead_setkey(tfm, key, template[i].klen);
+ if (!ret == template[i].fail) {
+ pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
+ d, j, algo, crypto_aead_get_flags(tfm));
+ goto out;
+ } else if (ret)
+ continue;
- sg_set_buf(&sg[k], q, template[i].tap[k]);
+ authsize = abs(template[i].rlen - template[i].ilen);
- if (diff_dst) {
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ ret = -EINVAL;
+ sg_init_table(sg, template[i].np);
+ if (diff_dst)
+ sg_init_table(sgout, template[i].np);
+ for (k = 0, temp = 0; k < template[i].np; k++) {
+ if (WARN_ON(offset_in_page(IDX[k]) +
+ template[i].tap[k] > PAGE_SIZE))
+ goto out;
- memset(q, 0, template[i].tap[k]);
+ q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
+ memcpy(q, template[i].input + temp, template[i].tap[k]);
+ sg_set_buf(&sg[k], q, template[i].tap[k]);
- sg_set_buf(&sgout[k], q,
- template[i].tap[k]);
- }
+ if (diff_dst) {
+ q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]);
- n = template[i].tap[k];
- if (k == template[i].np - 1 && enc)
- n += authsize;
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
+ memset(q, 0, template[i].tap[k]);
- temp += template[i].tap[k];
+ sg_set_buf(&sgout[k], q, template[i].tap[k]);
}
- ret = crypto_aead_setauthsize(tfm, authsize);
- if (ret) {
- pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n",
- d, authsize, j, algo);
+ n = template[i].tap[k];
+ if (k == template[i].np - 1 && enc)
+ n += authsize;
+ if (offset_in_page(q) + n < PAGE_SIZE)
+ q[n] = 0;
+
+ temp += template[i].tap[k];
+ }
+
+ ret = crypto_aead_setauthsize(tfm, authsize);
+ if (ret) {
+ pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n",
+ d, authsize, j, algo);
+ goto out;
+ }
+
+ if (enc) {
+ if (WARN_ON(sg[k - 1].offset +
+ sg[k - 1].length + authsize >
+ PAGE_SIZE)) {
+ ret = -EINVAL;
goto out;
}
- if (enc) {
- if (WARN_ON(sg[k - 1].offset +
- sg[k - 1].length + authsize >
- PAGE_SIZE)) {
- ret = -EINVAL;
- goto out;
- }
+ if (diff_dst)
+ sgout[k - 1].length += authsize;
+ else
+ sg[k - 1].length += authsize;
+ }
- if (diff_dst)
- sgout[k - 1].length += authsize;
- else
- sg[k - 1].length += authsize;
+ sg_init_table(asg, template[i].anp);
+ ret = -EINVAL;
+ for (k = 0, temp = 0; k < template[i].anp; k++) {
+ if (WARN_ON(offset_in_page(IDX[k]) +
+ template[i].atap[k] > PAGE_SIZE))
+ goto out;
+ sg_set_buf(&asg[k],
+ memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]),
+ template[i].assoc + temp,
+ template[i].atap[k]),
+ template[i].atap[k]);
+ temp += template[i].atap[k];
+ }
+
+ aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+ template[i].ilen,
+ iv);
+
+ aead_request_set_assoc(req, asg, template[i].alen);
+
+ ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+
+ switch (ret) {
+ case 0:
+ if (template[i].novrfy) {
+ /* verification was supposed to fail */
+ pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n",
+ d, e, j, algo);
+ /* so really, we got a bad message */
+ ret = -EBADMSG;
+ goto out;
}
-
- sg_init_table(asg, template[i].anp);
- ret = -EINVAL;
- for (k = 0, temp = 0; k < template[i].anp; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].atap[k] > PAGE_SIZE))
- goto out;
- sg_set_buf(&asg[k],
- memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]),
- template[i].assoc + temp,
- template[i].atap[k]),
- template[i].atap[k]);
- temp += template[i].atap[k];
- }
-
- aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- template[i].ilen,
- iv);
-
- aead_request_set_assoc(req, asg, template[i].alen);
-
- ret = enc ?
- crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req);
-
- switch (ret) {
- case 0:
- if (template[i].novrfy) {
- /* verification was supposed to fail */
- pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n",
- d, e, j, algo);
- /* so really, we got a bad message */
- ret = -EBADMSG;
- goto out;
- }
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &result.completion);
+ if (!ret && !(ret = result.err)) {
+ reinit_completion(&result.completion);
break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
- break;
- }
- case -EBADMSG:
- if (template[i].novrfy)
- /* verification failure was expected */
- continue;
- /* fall through */
- default:
- pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
}
+ case -EBADMSG:
+ if (template[i].novrfy)
+ /* verification failure was expected */
+ continue;
+ /* fall through */
+ default:
+ pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n",
+ d, e, j, algo, -ret);
+ goto out;
+ }
- ret = -EINVAL;
- for (k = 0, temp = 0; k < template[i].np; k++) {
- if (diff_dst)
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
- else
- q = xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
-
- n = template[i].tap[k];
- if (k == template[i].np - 1)
- n += enc ? authsize : -authsize;
+ ret = -EINVAL;
+ for (k = 0, temp = 0; k < template[i].np; k++) {
+ if (diff_dst)
+ q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]);
+ else
+ q = xbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]);
- if (memcmp(q, template[i].result + temp, n)) {
- pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n",
- d, j, e, k, algo);
- hexdump(q, n);
- goto out;
- }
+ n = template[i].tap[k];
+ if (k == template[i].np - 1)
+ n += enc ? authsize : -authsize;
- q += n;
- if (k == template[i].np - 1 && !enc) {
- if (!diff_dst &&
- memcmp(q, template[i].input +
- temp + n, authsize))
- n = authsize;
- else
- n = 0;
- } else {
- for (n = 0; offset_in_page(q + n) &&
- q[n]; n++)
- ;
- }
- if (n) {
- pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
- d, j, e, k, algo, n);
- hexdump(q, n);
- goto out;
- }
+ if (memcmp(q, template[i].result + temp, n)) {
+ pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n",
+ d, j, e, k, algo);
+ hexdump(q, n);
+ goto out;
+ }
- temp += template[i].tap[k];
+ q += n;
+ if (k == template[i].np - 1 && !enc) {
+ if (!diff_dst &&
+ memcmp(q, template[i].input +
+ temp + n, authsize))
+ n = authsize;
+ else
+ n = 0;
+ } else {
+ for (n = 0; offset_in_page(q + n) && q[n]; n++)
+ ;
+ }
+ if (n) {
+ pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
+ d, j, e, k, algo, n);
+ hexdump(q, n);
+ goto out;
}
+
+ temp += template[i].tap[k];
}
}
@@ -978,78 +955,73 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
j = 0;
for (i = 0; i < tcount; i++) {
+ if (template[i].np && !template[i].also_non_np)
+ continue;
+
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
memset(iv, 0, MAX_IVLEN);
- if (!(template[i].np) || (template[i].also_non_np)) {
- j++;
+ j++;
+ ret = -EINVAL;
+ if (WARN_ON(align_offset + template[i].ilen > PAGE_SIZE))
+ goto out;
- ret = -EINVAL;
- if (WARN_ON(align_offset + template[i].ilen >
- PAGE_SIZE))
- goto out;
+ data = xbuf[0];
+ data += align_offset;
+ memcpy(data, template[i].input, template[i].ilen);
- data = xbuf[0];
- data += align_offset;
- memcpy(data, template[i].input, template[i].ilen);
-
- crypto_ablkcipher_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_ablkcipher_set_flags(
- tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-
- ret = crypto_ablkcipher_setkey(tfm, template[i].key,
- template[i].klen);
- if (!ret == template[i].fail) {
- pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
- d, j, algo,
- crypto_ablkcipher_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+ if (template[i].wk)
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- sg_init_one(&sg[0], data, template[i].ilen);
- if (diff_dst) {
- data = xoutbuf[0];
- data += align_offset;
- sg_init_one(&sgout[0], data, template[i].ilen);
- }
+ ret = crypto_ablkcipher_setkey(tfm, template[i].key,
+ template[i].klen);
+ if (!ret == template[i].fail) {
+ pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
+ d, j, algo, crypto_ablkcipher_get_flags(tfm));
+ goto out;
+ } else if (ret)
+ continue;
+
+ sg_init_one(&sg[0], data, template[i].ilen);
+ if (diff_dst) {
+ data = xoutbuf[0];
+ data += align_offset;
+ sg_init_one(&sgout[0], data, template[i].ilen);
+ }
- ablkcipher_request_set_crypt(req, sg,
- (diff_dst) ? sgout : sg,
- template[i].ilen, iv);
- ret = enc ?
- crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+ template[i].ilen, iv);
+ ret = enc ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
- switch (ret) {
- case 0:
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &result.completion);
+ if (!ret && !((ret = result.err))) {
+ reinit_completion(&result.completion);
break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
- break;
- }
- /* fall through */
- default:
- pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
}
+ /* fall through */
+ default:
+ pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
+ d, e, j, algo, -ret);
+ goto out;
+ }
- q = data;
- if (memcmp(q, template[i].result, template[i].rlen)) {
- pr_err("alg: skcipher%s: Test %d failed on %s for %s\n",
- d, j, e, algo);
- hexdump(q, template[i].rlen);
- ret = -EINVAL;
- goto out;
- }
+ q = data;
+ if (memcmp(q, template[i].result, template[i].rlen)) {
+ pr_err("alg: skcipher%s: Test %d failed on %s for %s\n",
+ d, j, e, algo);
+ hexdump(q, template[i].rlen);
+ ret = -EINVAL;
+ goto out;
}
}
@@ -1059,121 +1031,113 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
if (align_offset != 0)
break;
+ if (!template[i].np)
+ continue;
+
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
memset(iv, 0, MAX_IVLEN);
- if (template[i].np) {
- j++;
+ j++;
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+ if (template[i].wk)
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- crypto_ablkcipher_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_ablkcipher_set_flags(
- tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ ret = crypto_ablkcipher_setkey(tfm, template[i].key,
+ template[i].klen);
+ if (!ret == template[i].fail) {
+ pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
+ d, j, algo, crypto_ablkcipher_get_flags(tfm));
+ goto out;
+ } else if (ret)
+ continue;
- ret = crypto_ablkcipher_setkey(tfm, template[i].key,
- template[i].klen);
- if (!ret == template[i].fail) {
- pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
- d, j, algo,
- crypto_ablkcipher_get_flags(tfm));
+ temp = 0;
+ ret = -EINVAL;
+ sg_init_table(sg, template[i].np);
+ if (diff_dst)
+ sg_init_table(sgout, template[i].np);
+ for (k = 0; k < template[i].np; k++) {
+ if (WARN_ON(offset_in_page(IDX[k]) +
+ template[i].tap[k] > PAGE_SIZE))
goto out;
- } else if (ret)
- continue;
- temp = 0;
- ret = -EINVAL;
- sg_init_table(sg, template[i].np);
- if (diff_dst)
- sg_init_table(sgout, template[i].np);
- for (k = 0; k < template[i].np; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].tap[k] > PAGE_SIZE))
- goto out;
+ q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
- q = xbuf[IDX[k] >> PAGE_SHIFT] +
+ memcpy(q, template[i].input + temp, template[i].tap[k]);
+
+ if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE)
+ q[template[i].tap[k]] = 0;
+
+ sg_set_buf(&sg[k], q, template[i].tap[k]);
+ if (diff_dst) {
+ q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
- memcpy(q, template[i].input + temp,
- template[i].tap[k]);
+ sg_set_buf(&sgout[k], q, template[i].tap[k]);
- if (offset_in_page(q) + template[i].tap[k] <
- PAGE_SIZE)
+ memset(q, 0, template[i].tap[k]);
+ if (offset_in_page(q) +
+ template[i].tap[k] < PAGE_SIZE)
q[template[i].tap[k]] = 0;
+ }
- sg_set_buf(&sg[k], q, template[i].tap[k]);
- if (diff_dst) {
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ temp += template[i].tap[k];
+ }
- sg_set_buf(&sgout[k], q,
- template[i].tap[k]);
+ ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+ template[i].ilen, iv);
- memset(q, 0, template[i].tap[k]);
- if (offset_in_page(q) +
- template[i].tap[k] < PAGE_SIZE)
- q[template[i].tap[k]] = 0;
- }
+ ret = enc ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
- temp += template[i].tap[k];
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &result.completion);
+ if (!ret && !((ret = result.err))) {
+ reinit_completion(&result.completion);
+ break;
}
+ /* fall through */
+ default:
+ pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
+ d, e, j, algo, -ret);
+ goto out;
+ }
- ablkcipher_request_set_crypt(req, sg,
- (diff_dst) ? sgout : sg,
- template[i].ilen, iv);
-
- ret = enc ?
- crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
+ temp = 0;
+ ret = -EINVAL;
+ for (k = 0; k < template[i].np; k++) {
+ if (diff_dst)
+ q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]);
+ else
+ q = xbuf[IDX[k] >> PAGE_SHIFT] +
+ offset_in_page(IDX[k]);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
- break;
- }
- /* fall through */
- default:
- pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
+ if (memcmp(q, template[i].result + temp,
+ template[i].tap[k])) {
+ pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n",
+ d, j, e, k, algo);
+ hexdump(q, template[i].tap[k]);
goto out;
}
- temp = 0;
- ret = -EINVAL;
- for (k = 0; k < template[i].np; k++) {
- if (diff_dst)
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
- else
- q = xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
-
- if (memcmp(q, template[i].result + temp,
- template[i].tap[k])) {
- pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n",
- d, j, e, k, algo);
- hexdump(q, template[i].tap[k]);
- goto out;
- }
-
- q += template[i].tap[k];
- for (n = 0; offset_in_page(q + n) && q[n]; n++)
- ;
- if (n) {
- pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
- d, j, e, k, algo, n);
- hexdump(q, n);
- goto out;
- }
- temp += template[i].tap[k];
+ q += template[i].tap[k];
+ for (n = 0; offset_in_page(q + n) && q[n]; n++)
+ ;
+ if (n) {
+ pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
+ d, j, e, k, algo, n);
+ hexdump(q, n);
+ goto out;
}
+ temp += template[i].tap[k];
}
}
@@ -3213,6 +3177,38 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "lz4",
+ .test = alg_test_comp,
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+ .comp = {
+ .vecs = lz4_comp_tv_template,
+ .count = LZ4_COMP_TEST_VECTORS
+ },
+ .decomp = {
+ .vecs = lz4_decomp_tv_template,
+ .count = LZ4_DECOMP_TEST_VECTORS
+ }
+ }
+ }
+ }, {
+ .alg = "lz4hc",
+ .test = alg_test_comp,
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+ .comp = {
+ .vecs = lz4hc_comp_tv_template,
+ .count = LZ4HC_COMP_TEST_VECTORS
+ },
+ .decomp = {
+ .vecs = lz4hc_decomp_tv_template,
+ .count = LZ4HC_DECOMP_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "lzo",
.test = alg_test_comp,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 6597203eccfa..62e2485bb428 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -29473,4 +29473,70 @@ static struct hash_testvec bfin_crc_tv_template[] = {
};
+#define LZ4_COMP_TEST_VECTORS 1
+#define LZ4_DECOMP_TEST_VECTORS 1
+
+static struct comp_testvec lz4_comp_tv_template[] = {
+ {
+ .inlen = 70,
+ .outlen = 45,
+ .input = "Join us now and share the software "
+ "Join us now and share the software ",
+ .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
+ "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
+ "\x77\x61\x72\x65\x20",
+ },
+};
+
+static struct comp_testvec lz4_decomp_tv_template[] = {
+ {
+ .inlen = 45,
+ .outlen = 70,
+ .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
+ "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
+ "\x77\x61\x72\x65\x20",
+ .output = "Join us now and share the software "
+ "Join us now and share the software ",
+ },
+};
+
+#define LZ4HC_COMP_TEST_VECTORS 1
+#define LZ4HC_DECOMP_TEST_VECTORS 1
+
+static struct comp_testvec lz4hc_comp_tv_template[] = {
+ {
+ .inlen = 70,
+ .outlen = 45,
+ .input = "Join us now and share the software "
+ "Join us now and share the software ",
+ .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
+ "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
+ "\x77\x61\x72\x65\x20",
+ },
+};
+
+static struct comp_testvec lz4hc_decomp_tv_template[] = {
+ {
+ .inlen = 45,
+ .outlen = 70,
+ .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
+ "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
+ "\x77\x61\x72\x65\x20",
+ .output = "Join us now and share the software "
+ "Join us now and share the software ",
+ },
+};
+
#endif /* _CRYPTO_TESTMGR_H */