summaryrefslogtreecommitdiff
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorMegha Dey <megha.dey@linux.intel.com>2016-06-21 18:21:46 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2016-06-23 18:29:55 +0800
commit331bf739c4f9992a73547d20bd8f2378b97d386a (patch)
treeeff59e08cb974478786c2f0906e1b302679d1bbe /arch/x86/crypto
parent820573ebd60d85afb8bb07fa3547ebbf842c59d4 (diff)
crypto: sha1-mb - async implementation for sha1-mb
Herbert wants the sha1-mb algorithm to have an async implementation: https://lkml.org/lkml/2016/4/5/286. Currently, sha1-mb uses an async interface for the outer algorithm and a sync interface for the inner algorithm. This patch introduces a async interface for even the inner algorithm. Signed-off-by: Megha Dey <megha.dey@linux.intel.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c182
1 files changed, 101 insertions, 81 deletions
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 0a464919542c..669cc37268e1 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -80,10 +80,10 @@ struct sha1_mb_ctx {
static inline struct mcryptd_hash_request_ctx
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
{
- struct shash_desc *desc;
+ struct ahash_request *areq;
- desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
- return container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
+ return container_of(areq, struct mcryptd_hash_request_ctx, areq);
}
static inline struct ahash_request
@@ -93,7 +93,7 @@ static inline struct ahash_request
}
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct shash_desc *desc)
+ struct ahash_request *areq)
{
rctx->flag = HASH_UPDATE;
}
@@ -375,9 +375,9 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
}
}
-static int sha1_mb_init(struct shash_desc *desc)
+static int sha1_mb_init(struct ahash_request *areq)
{
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
hash_ctx_init(sctx);
sctx->job.result_digest[0] = SHA1_H0;
@@ -395,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc)
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
{
int i;
- struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
__be32 *dst = (__be32 *) rctx->out;
for (i = 0; i < 5; ++i)
@@ -427,7 +427,7 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
}
sha_ctx = (struct sha1_hash_ctx *)
- shash_desc_ctx(&rctx->desc);
+ ahash_request_ctx(&rctx->areq);
kernel_fpu_begin();
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
rctx->walk.data, nbytes, flag);
@@ -519,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
mcryptd_arm_flusher(cstate, delay);
}
-static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_mb_update(struct ahash_request *areq)
{
struct mcryptd_hash_request_ctx *rctx =
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
@@ -539,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
}
/* need to init context */
- req_ctx_init(rctx, desc);
+ req_ctx_init(rctx, areq);
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
@@ -552,7 +551,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
rctx->flag |= HASH_DONE;
/* submit */
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
@@ -579,11 +578,10 @@ done:
return ret;
}
-static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int sha1_mb_finup(struct ahash_request *areq)
{
struct mcryptd_hash_request_ctx *rctx =
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
@@ -598,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
}
/* need to init context */
- req_ctx_init(rctx, desc);
+ req_ctx_init(rctx, areq);
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
@@ -611,11 +609,10 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
rctx->flag |= HASH_DONE;
flag = HASH_LAST;
}
- rctx->out = out;
/* submit */
rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
@@ -641,10 +638,10 @@ done:
return ret;
}
-static int sha1_mb_final(struct shash_desc *desc, u8 *out)
+static int sha1_mb_final(struct ahash_request *areq)
{
struct mcryptd_hash_request_ctx *rctx =
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
@@ -659,12 +656,11 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
}
/* need to init context */
- req_ctx_init(rctx, desc);
+ req_ctx_init(rctx, areq);
- rctx->out = out;
rctx->flag |= HASH_DONE | HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
/* flag HASH_FINAL and 0 data size */
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
@@ -691,48 +687,98 @@ done:
return ret;
}
-static int sha1_mb_export(struct shash_desc *desc, void *out)
+static int sha1_mb_export(struct ahash_request *areq, void *out)
{
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
-static int sha1_mb_import(struct shash_desc *desc, const void *in)
+static int sha1_mb_import(struct ahash_request *areq, const void *in)
{
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
+static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
+{
+ struct mcryptd_ahash *mcryptd_tfm;
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mcryptd_hash_ctx *mctx;
-static struct shash_alg sha1_mb_shash_alg = {
- .digestsize = SHA1_DIGEST_SIZE,
+ mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(mcryptd_tfm))
+ return PTR_ERR(mcryptd_tfm);
+ mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
+ mctx->alg_state = &sha1_mb_alg_state;
+ ctx->mcryptd_tfm = mcryptd_tfm;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(&mcryptd_tfm->base));
+
+ return 0;
+}
+
+static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ sizeof(struct sha1_hash_ctx));
+
+ return 0;
+}
+
+static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static struct ahash_alg sha1_mb_areq_alg = {
.init = sha1_mb_init,
.update = sha1_mb_update,
.final = sha1_mb_final,
.finup = sha1_mb_finup,
.export = sha1_mb_export,
.import = sha1_mb_import,
- .descsize = sizeof(struct sha1_hash_ctx),
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "__sha1-mb",
- .cra_driver_name = "__intel_sha1-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread sleep
- */
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_hash_ctx),
+ .base = {
+ .cra_name = "__sha1-mb",
+ .cra_driver_name = "__intel_sha1-mb",
+ .cra_priority = 100,
+ /*
+ * use ASYNC flag as some buffers in multi-buffer
+ * algo may not have completed before hashing thread
+ * sleep
+ */
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT
+ (sha1_mb_areq_alg.halg.base.cra_list),
+ .cra_init = sha1_mb_areq_init_tfm,
+ .cra_exit = sha1_mb_areq_exit_tfm,
+ .cra_ctxsize = sizeof(struct sha1_hash_ctx),
+ }
}
};
@@ -817,46 +863,20 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
+ struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
struct mcryptd_hash_request_ctx *rctx;
- struct shash_desc *desc;
+ struct ahash_request *areq;
memcpy(mcryptd_req, req, sizeof(*req));
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
rctx = ahash_request_ctx(mcryptd_req);
- desc = &rctx->desc;
- desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
+ areq = &rctx->areq;
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha1_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+ ahash_request_set_tfm(areq, child);
+ ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ rctx->complete, req);
- mcryptd_free_ahash(ctx->mcryptd_tfm);
+ return crypto_ahash_import(mcryptd_req, in);
}
static struct ahash_alg sha1_mb_async_alg = {
@@ -965,7 +985,7 @@ static int __init sha1_mb_mod_init(void)
}
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
- err = crypto_register_shash(&sha1_mb_shash_alg);
+ err = crypto_register_ahash(&sha1_mb_areq_alg);
if (err)
goto err2;
err = crypto_register_ahash(&sha1_mb_async_alg);
@@ -975,7 +995,7 @@ static int __init sha1_mb_mod_init(void)
return 0;
err1:
- crypto_unregister_shash(&sha1_mb_shash_alg);
+ crypto_unregister_ahash(&sha1_mb_areq_alg);
err2:
for_each_possible_cpu(cpu) {
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
@@ -991,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void)
struct mcryptd_alg_cstate *cpu_state;
crypto_unregister_ahash(&sha1_mb_async_alg);
- crypto_unregister_shash(&sha1_mb_shash_alg);
+ crypto_unregister_ahash(&sha1_mb_areq_alg);
for_each_possible_cpu(cpu) {
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
kfree(cpu_state->mgr);