diff options
Diffstat (limited to 'drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c')
| -rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 220 |
1 files changed, 139 insertions, 81 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9ef1c85c4aaa..8831bcb230c2 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -8,9 +8,10 @@ * This file add support for AES cipher with 128,192,256 bits keysize in * CBC and ECB mode. * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -21,34 +22,53 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq) { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; struct scatterlist *sg; + unsigned int todo, len; - if (areq->cryptlen == 0 || areq->cryptlen % 16) + if (areq->cryptlen == 0 || areq->cryptlen % 16) { + algt->stat_fb_len++; return true; + } - if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8) + if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 || + sg_nents_for_len(areq->dst, areq->cryptlen) > 8) { + algt->stat_fb_sgnum++; return true; + } + len = areq->cryptlen; sg = areq->src; while (sg) { - if ((sg->length % 16) != 0) - return true; - if ((sg_dma_len(sg) % 16) != 0) + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; return true; - if (!IS_ALIGNED(sg->offset, 16)) + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; return true; + } + len -= todo; sg = sg_next(sg); } + len = areq->cryptlen; sg = areq->dst; while (sg) { - if ((sg->length % 16) != 0) + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; return true; - if ((sg_dma_len(sg) % 16) != 0) - return true; - if (!IS_ALIGNED(sg->offset, 16)) + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; return true; + } + len -= todo; sg = sg_next(sg); } @@ -73,13 +93,18 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct sun8i_ss_alg_template *algt; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); - algt->stat_fb++; + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.skcipher.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_fb++; #endif + } + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); @@ -92,6 +117,69 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) return err; } +static int sun8i_ss_setup_ivs(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sg = areq->src; + unsigned int todo, offset; + unsigned int len = areq->cryptlen; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + int i = 0; + dma_addr_t a; + int err; + + rctx->ivlen = ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(sf->biv, areq->src, offset, + ivsize, 0); + } + + /* we need to copy all IVs from source in case DMA is bi-directionnal */ + while (sg && len) { + if (sg->length == 0) { + sg = sg_next(sg); + continue; + } + if (i == 0) + memcpy(sf->iv[0], areq->iv, ivsize); + a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, a)) { + memzero_explicit(sf->iv[i], ivsize); + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto dma_iv_error; + } + rctx->p_iv[i] = a; + /* we need to setup all others IVs only in the decrypt way */ + if (rctx->op_dir == SS_ENCRYPTION) + return 0; + todo = min(len, sg_dma_len(sg)); + len -= todo; + i++; + if (i < MAX_SG) { + offset = sg->length - ivsize; + scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); + } + rctx->niv = i; + sg = sg_next(sg); + } + + return 0; +dma_iv_error: + i--; + while (i >= 0) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + i--; + } + return err; +} + static int sun8i_ss_cipher(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); @@ -100,15 +188,17 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; - void *backup_iv = NULL; int nr_sgs = 0; int nr_sgd = 0; int err = 0; + int nsgs = sg_nents_for_len(areq->src, areq->cryptlen); + int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen); int i; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, crypto_tfm_alg_name(areq->base.tfm), @@ -133,34 +223,12 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { - rctx->ivlen = ivsize; - rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); - if (!rctx->biv) { - err = -ENOMEM; + err = sun8i_ss_setup_ivs(areq); + if (err) goto theend_key; - } - if (rctx->op_dir & SS_DECRYPTION) { - backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!backup_iv) { - err = -ENOMEM; - goto theend_key; - } - offset = areq->cryptlen - ivsize; - scatterwalk_map_and_copy(backup_iv, areq->src, offset, - ivsize, 0); - } - memcpy(rctx->biv, areq->iv, ivsize); - rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen, - DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, rctx->p_iv)) { - dev_err(ss->dev, "Cannot DMA MAP IV\n"); - err = -ENOMEM; - goto theend_iv; - } } if (areq->src == areq->dst) { - nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_BIDIRECTIONAL); + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; @@ -168,15 +236,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) } nr_sgd = nr_sgs; } else { - nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } - nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst), - DMA_FROM_DEVICE); + nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); if (nr_sgd <= 0 || nr_sgd > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; @@ -232,31 +298,26 @@ sgd_next: theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_BIDIRECTIONAL); + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst), - DMA_FROM_DEVICE); + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); } theend_iv: - if (rctx->p_iv) - dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen, - DMA_TO_DEVICE); - if (areq->iv && ivsize > 0) { - if (rctx->biv) { - offset = areq->cryptlen - ivsize; - if (rctx->op_dir & SS_DECRYPTION) { - memcpy(areq->iv, backup_iv, ivsize); - kfree_sensitive(backup_iv); - } else { - scatterwalk_map_and_copy(areq->iv, areq->dst, offset, - ivsize, 0); - } - kfree(rctx->biv); + for (i = 0; i < rctx->niv; i++) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + } + + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + memcpy(areq->iv, sf->biv, ivsize); + memzero_explicit(sf->biv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); } } @@ -268,13 +329,15 @@ theend: return err; } -static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) +int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) { int err; struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sun8i_ss_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } @@ -332,7 +395,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); - algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); op->ss = algt->ss; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -342,17 +405,12 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) return PTR_ERR(op->fallback_tfm); } - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + - crypto_skcipher_reqsize(op->fallback_tfm); - - - dev_info(op->ss->dev, "Fallback for %s is %s\n", - crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); + crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm)); - op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; + memcpy(algt->fbname, + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), + CRYPTO_MAX_ALG_NAME); err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) { @@ -394,7 +452,7 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, } kfree_sensitive(op->key); op->keylen = keylen; - op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + op->key = kmemdup(key, keylen, GFP_KERNEL); if (!op->key) return -ENOMEM; @@ -417,7 +475,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, kfree_sensitive(op->key); op->keylen = keylen; - op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + op->key = kmemdup(key, keylen, GFP_KERNEL); if (!op->key) return -ENOMEM; |
