diff options
Diffstat (limited to 'drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c')
| -rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 334 |
1 files changed, 174 insertions, 160 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 8b5b9b9d04c3..d01594353d9a 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -7,65 +7,77 @@ * * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. * - * You could find the datasheet in Documentation/arm/sunxi.rst + * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ + +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <linux/bottom_half.h> #include <linux/dma-mapping.h> +#include <linux/kernel.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> -#include <crypto/internal/hash.h> -#include <crypto/sha1.h> -#include <crypto/sha2.h> -#include <crypto/md5.h> +#include <linux/slab.h> +#include <linux/string.h> #include "sun8i-ce.h" -int sun8i_ce_hash_crainit(struct crypto_tfm *tfm) +static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm) { - struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct sun8i_ce_alg_template *algt; + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.hash.base); + algt->stat_fb++; + } +} + +int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) +{ + struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ce_alg_template *algt; int err; - memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx)); - - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); op->ce = algt->ce; - op->enginectx.op.do_one_request = sun8i_ce_hash_run; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; - /* FALLBACK */ - op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(algt->ce->dev, "Fallback driver could no be loaded\n"); return PTR_ERR(op->fallback_tfm); } - if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm)) - algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm); + crypto_ahash_set_statesize(tfm, + crypto_ahash_statesize(op->fallback_tfm)); - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + crypto_ahash_set_reqsize(tfm, sizeof(struct sun8i_ce_hash_reqctx) + - crypto_ahash_reqsize(op->fallback_tfm)); + crypto_ahash_reqsize(op->fallback_tfm) + + CRYPTO_DMA_PADDING); - memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), - CRYPTO_MAX_ALG_NAME); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + memcpy(algt->fbname, + crypto_ahash_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); - err = pm_runtime_get_sync(op->ce->dev); + err = pm_runtime_resume_and_get(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: - pm_runtime_put_noidle(op->ce->dev); crypto_free_ahash(op->fallback_tfm); return err; } -void sun8i_ce_hash_craexit(struct crypto_tfm *tfm) +void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm) { - struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); crypto_free_ahash(tfmctx->fallback_tfm); pm_runtime_put_sync_suspend(tfmctx->ce->dev); @@ -73,126 +85,112 @@ void sun8i_ce_hash_craexit(struct crypto_tfm *tfm) int sun8i_ce_hash_init(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_init(&rctx->fallback_req); } int sun8i_ce_hash_export(struct ahash_request *areq, void *out) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } int sun8i_ce_hash_final(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_alg_template *algt; -#endif - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = areq->result; + sun8i_ce_hash_stat_fb_inc(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - algt->stat_fb++; -#endif + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0); return crypto_ahash_final(&rctx->fallback_req); } int sun8i_ce_hash_update(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes); return crypto_ahash_update(&rctx->fallback_req); } int sun8i_ce_hash_finup(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_alg_template *algt; -#endif - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + sun8i_ce_hash_stat_fb_inc(tfm); - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - algt->stat_fb++; -#endif + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_alg_template *algt; -#endif - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + sun8i_ce_hash_stat_fb_inc(tfm); - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - algt->stat_fb++; -#endif + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -204,25 +202,33 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) struct sun8i_ce_alg_template *algt; struct scatterlist *sg; - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); if (areq->nbytes == 0) { - algt->stat_fb_len0++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_len0++; + return true; } /* we need to reserve one SG for padding one */ if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) { - algt->stat_fb_maxsg++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_maxsg++; + return true; } sg = areq->src; while (sg) { if (sg->length % 4) { - algt->stat_fb_srclen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srclen++; + return true; } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_srcali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srcali++; + return true; } sg = sg_next(sg); @@ -233,29 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) int sun8i_ce_hash_digest(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); - struct sun8i_ce_alg_template *algt; - struct sun8i_ce_dev *ce; + struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); + struct sun8i_ce_dev *ce = ctx->ce; struct crypto_engine *engine; - struct scatterlist *sg; - int nr_sgs, e, i; + int e; if (sun8i_ce_hash_need_fallback(areq)) return sun8i_ce_hash_digest_fb(areq); - nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); - if (nr_sgs > MAX_SG - 1) - return sun8i_ce_hash_digest_fb(areq); - - for_each_sg(areq->src, sg, nr_sgs, i) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) - return sun8i_ce_hash_digest_fb(areq); - } - - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - ce = algt->ce; - e = sun8i_ce_get_engine_number(ce); rctx->flow = e; engine = ce->chanlist[e].engine; @@ -321,66 +313,43 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, return j; } -int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) +static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet) { - struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; - struct sun8i_ce_flow *chan; - struct ce_task *cet; struct scatterlist *sg; - int nr_sgs, flow, err; + int nr_sgs, err; unsigned int len; u32 common; u64 byte_count; __le32 *bf; - void *buf = NULL; int j, i, todo; - void *result = NULL; u64 bs; int digestsize; - dma_addr_t addr_res, addr_pad; - int ns = sg_nents_for_len(areq->src, areq->nbytes); - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; - bs = algt->alg.hash.halg.base.cra_blocksize; - digestsize = algt->alg.hash.halg.digestsize; + bs = crypto_ahash_blocksize(tfm); + digestsize = crypto_ahash_digestsize(tfm); if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; if (digestsize == SHA384_DIGEST_SIZE) digestsize = SHA512_DIGEST_SIZE; - /* the padding could be up to two block. */ - buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA); - if (!buf) { - err = -ENOMEM; - goto theend; - } - bf = (__le32 *)buf; - - result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); - if (!result) { - err = -ENOMEM; - goto theend; - } + bf = (__le32 *)rctx->pad; - flow = rctx->flow; - chan = &ce->chanlist[flow]; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_req++; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_req++; -#endif dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); - cet = chan->tl; memset(cet, 0, sizeof(struct ce_task)); - cet->t_id = cpu_to_le32(flow); + cet->t_id = cpu_to_le32(rctx->flow); common = ce->variant->alg_hash[algt->ce_algo_id]; common |= CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); @@ -388,16 +357,17 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) cet->t_sym_ctl = 0; cet->t_asym_ctl = 0; - nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); + nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; - goto theend; + goto err_out; } len = areq->nbytes; for_each_sg(areq->src, sg, nr_sgs, i) { - cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); + cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_src[i].len = cpu_to_le32(todo / 4); len -= todo; @@ -405,15 +375,18 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; - goto theend; + goto err_unmap_src; } - addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); - cet->t_dst[0].addr = cpu_to_le32(addr_res); - cet->t_dst[0].len = cpu_to_le32(digestsize / 4); - if (dma_mapping_error(ce->dev, addr_res)) { + + rctx->result_len = digestsize; + rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len, + DMA_FROM_DEVICE); + cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res); + cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4); + if (dma_mapping_error(ce->dev, rctx->addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; - goto theend; + goto err_unmap_src; } byte_count = areq->nbytes; @@ -435,16 +408,18 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) } if (!j) { err = -EINVAL; - goto theend; + goto err_unmap_result; } - addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); - cet->t_src[i].addr = cpu_to_le32(addr_pad); + rctx->pad_len = j * 4; + rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len, + DMA_TO_DEVICE); + cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad); cet->t_src[i].len = cpu_to_le32(j); - if (dma_mapping_error(ce->dev, addr_pad)) { + if (dma_mapping_error(ce->dev, rctx->addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; - goto theend; + goto err_unmap_result; } if (ce->variant->hash_t_dlen_in_bits) @@ -452,21 +427,60 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) else cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j); - chan->timeout = areq->nbytes; + return 0; + +err_unmap_result: + dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len, + DMA_FROM_DEVICE); - err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); +err_unmap_src: + dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE); - dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); +err_out: + return err; +} +static void sun8i_ce_hash_unprepare(struct ahash_request *areq, + struct ce_task *cet) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct sun8i_ce_dev *ce = ctx->ce; + + dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE); + dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len, + DMA_FROM_DEVICE); + dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE); +} + +int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req) +{ + struct ahash_request *areq = ahash_request_cast(async_req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); + struct sun8i_ce_dev *ce = ctx->ce; + struct sun8i_ce_flow *chan; + int err; + + chan = &ce->chanlist[rctx->flow]; + + err = sun8i_ce_hash_prepare(areq, chan->tl); + if (err) + return err; + + err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm)); + + sun8i_ce_hash_unprepare(areq, chan->tl); + + if (!err) + memcpy(areq->result, rctx->result, + crypto_ahash_digestsize(tfm)); - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); -theend: - kfree(buf); - kfree(result); local_bh_disable(); - crypto_finalize_hash_request(engine, breq, err); + crypto_finalize_hash_request(engine, async_req, err); local_bh_enable(); + return 0; } |
