diff options
Diffstat (limited to 'drivers/crypto/allwinner/sun8i-ce')
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/Makefile | 3 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 131 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c | 405 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 413 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c | 164 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c | 127 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 139 |
7 files changed, 1298 insertions, 84 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile index 08b68c3c1ca9..0842eb2d9408 100644 --- a/drivers/crypto/allwinner/sun8i-ce/Makefile +++ b/drivers/crypto/allwinner/sun8i-ce/Makefile @@ -1,2 +1,5 @@ obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o +sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_HASH) += sun8i-ce-hash.o +sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG) += sun8i-ce-prng.o +sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) += sun8i-ce-trng.o diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index b4d5fea27d20..33707a2e55ff 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -75,8 +75,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) return err; } -static int sun8i_ce_cipher(struct skcipher_request *areq) +static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req) { + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; @@ -87,8 +88,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) struct ce_task *cet; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; - dma_addr_t addr_iv = 0, addr_key = 0; - void *backup_iv = NULL; u32 common, sym; int flow, i; int nr_sgs = 0; @@ -119,7 +118,7 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) common |= rctx->op_dir | CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); /* CTS and recent CE (H6) need length in bytes, in word otherwise */ - if (ce->variant->has_t_dlen_in_bytes) + if (ce->variant->cipher_t_dlen_in_bytes) cet->t_dlen = cpu_to_le32(areq->cryptlen); else cet->t_dlen = cpu_to_le32(areq->cryptlen / 4); @@ -141,41 +140,41 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) cet->t_sym_ctl = cpu_to_le32(sym); cet->t_asym_ctl = 0; - addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); - cet->t_key = cpu_to_le32(addr_key); - if (dma_mapping_error(ce->dev, addr_key)) { + rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); + if (dma_mapping_error(ce->dev, rctx->addr_key)) { dev_err(ce->dev, "Cannot DMA MAP KEY\n"); err = -EFAULT; goto theend; } + cet->t_key = cpu_to_le32(rctx->addr_key); ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { - chan->ivlen = ivsize; - chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); - if (!chan->bounce_iv) { + rctx->ivlen = ivsize; + rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); + if (!rctx->bounce_iv) { err = -ENOMEM; goto theend_key; } if (rctx->op_dir & CE_DECRYPTION) { - backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!backup_iv) { + rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL); + if (!rctx->backup_iv) { err = -ENOMEM; goto theend_key; } offset = areq->cryptlen - ivsize; - scatterwalk_map_and_copy(backup_iv, areq->src, offset, - ivsize, 0); + scatterwalk_map_and_copy(rctx->backup_iv, areq->src, + offset, ivsize, 0); } - memcpy(chan->bounce_iv, areq->iv, ivsize); - addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen, - DMA_TO_DEVICE); - cet->t_iv = cpu_to_le32(addr_iv); - if (dma_mapping_error(ce->dev, addr_iv)) { + memcpy(rctx->bounce_iv, areq->iv, ivsize); + rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen, + DMA_TO_DEVICE); + if (dma_mapping_error(ce->dev, rctx->addr_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); err = -ENOMEM; goto theend_iv; } + cet->t_iv = cpu_to_le32(rctx->addr_iv); } if (areq->src == areq->dst) { @@ -235,7 +234,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) } chan->timeout = areq->cryptlen; - err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); + rctx->nr_sgs = nr_sgs; + rctx->nr_sgd = nr_sgd; + return 0; theend_sgs: if (areq->src == areq->dst) { @@ -248,34 +249,83 @@ theend_sgs: theend_iv: if (areq->iv && ivsize > 0) { - if (addr_iv) - dma_unmap_single(ce->dev, addr_iv, chan->ivlen, - DMA_TO_DEVICE); + if (rctx->addr_iv) + dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { - memcpy(areq->iv, backup_iv, ivsize); - kfree_sensitive(backup_iv); + memcpy(areq->iv, rctx->backup_iv, ivsize); + kfree_sensitive(rctx->backup_iv); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } - kfree(chan->bounce_iv); + kfree(rctx->bounce_iv); } theend_key: - dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE); + dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); theend: return err; } -static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq) +static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) { - int err; struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq); + int flow, err; - err = sun8i_ce_cipher(breq); + flow = rctx->flow; + err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); crypto_finalize_skcipher_request(engine, breq, err); + return 0; +} + +static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req) +{ + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct sun8i_ce_flow *chan; + struct ce_task *cet; + unsigned int ivsize, offset; + int nr_sgs = rctx->nr_sgs; + int nr_sgd = rctx->nr_sgd; + int flow; + + flow = rctx->flow; + chan = &ce->chanlist[flow]; + cet = chan->tl; + ivsize = crypto_skcipher_ivsize(tfm); + + if (areq->src == areq->dst) { + dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + } else { + if (nr_sgs > 0) + dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + } + + if (areq->iv && ivsize > 0) { + if (cet->t_iv) + dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & CE_DECRYPTION) { + memcpy(areq->iv, rctx->backup_iv, ivsize); + kfree_sensitive(rctx->backup_iv); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } + kfree(rctx->bounce_iv); + } + + dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); return 0; } @@ -347,9 +397,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) crypto_tfm_alg_driver_name(&sktfm->base), crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); - op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; + op->enginectx.op.do_one_request = sun8i_ce_cipher_run; + op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare; + op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare; err = pm_runtime_get_sync(op->ce->dev); if (err < 0) @@ -366,10 +416,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm) { struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); - if (op->key) { - memzero_explicit(op->key, op->keylen); - kfree(op->key); - } + kfree_sensitive(op->key); crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync_suspend(op->ce->dev); } @@ -391,10 +438,7 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen); return -EINVAL; } - if (op->key) { - memzero_explicit(op->key, op->keylen); - kfree(op->key); - } + kfree_sensitive(op->key); op->keylen = keylen; op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); if (!op->key) @@ -416,10 +460,7 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (err) return err; - if (op->key) { - memzero_explicit(op->key, op->keylen); - kfree(op->key); - } + kfree_sensitive(op->key); op->keylen = keylen; op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); if (!op->key) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 138759dc8190..158422ff5695 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> +#include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> #include "sun8i-ce.h" @@ -35,73 +36,108 @@ static const struct ce_variant ce_h3_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ALG_SHA384, CE_ALG_SHA512 + }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 50000000, 0 }, - } + }, + .esr = ESR_H3, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, }; static const struct ce_variant ce_h5_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ID_NOTSUPP, CE_ID_NOTSUPP + }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, - } + }, + .esr = ESR_H5, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, }; static const struct ce_variant ce_h6_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ALG_SHA384, CE_ALG_SHA512 + }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, - .has_t_dlen_in_bytes = true, + .cipher_t_dlen_in_bytes = true, + .hash_t_dlen_in_bits = true, + .prng_t_dlen_in_bytes = true, + .trng_t_dlen_in_bytes = true, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, { "ram", 0, 400000000 }, - } + }, + .esr = ESR_H6, + .prng = CE_ALG_PRNG_V2, + .trng = CE_ALG_TRNG_V2, }; static const struct ce_variant ce_a64_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ID_NOTSUPP, CE_ID_NOTSUPP + }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, - } + }, + .esr = ESR_A64, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, }; static const struct ce_variant ce_r40_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ID_NOTSUPP, CE_ID_NOTSUPP + }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, - } + }, + .esr = ESR_R40, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, }; /* * sun8i_ce_get_engine_number() get the next channel slot * This is a simple round-robin way of getting the next channel + * The flow 3 is reserve for xRNG operations */ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce) { - return atomic_inc_return(&ce->flow) % MAXFLOW; + return atomic_inc_return(&ce->flow) % (MAXFLOW - 1); } int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) { u32 v; int err = 0; + struct ce_task *cet = ce->chanlist[flow].tl; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG ce->chanlist[flow].stat_req++; @@ -120,7 +156,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) /* Be sure all data is written before enabling the task */ wmb(); - v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8; + /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored + * on older SoCs, we have no reason to complicate things. + */ + v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8); writel(v, ce->base + CE_TLR); mutex_unlock(&ce->mlock); @@ -128,19 +167,56 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) msecs_to_jiffies(ce->chanlist[flow].timeout)); if (ce->chanlist[flow].status == 0) { - dev_err(ce->dev, "DMA timeout for %s\n", name); + dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name, + ce->chanlist[flow].timeout, flow); err = -EFAULT; } /* No need to lock for this read, the channel is locked so * nothing could modify the error value for this channel */ v = readl(ce->base + CE_ESR); - if (v) { + switch (ce->variant->esr) { + case ESR_H3: + /* Sadly, the error bit is not per flow */ + if (v) { + dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + err = -EFAULT; + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + cet, sizeof(struct ce_task), false); + } + if (v & CE_ERR_ALGO_NOTSUP) + dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); + if (v & CE_ERR_DATALEN) + dev_err(ce->dev, "CE ERROR: data length error\n"); + if (v & CE_ERR_KEYSRAM) + dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); + break; + case ESR_A64: + case ESR_H5: + case ESR_R40: v >>= (flow * 4); + v &= 0xF; + if (v) { + dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + err = -EFAULT; + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + cet, sizeof(struct ce_task), false); + } + if (v & CE_ERR_ALGO_NOTSUP) + dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); + if (v & CE_ERR_DATALEN) + dev_err(ce->dev, "CE ERROR: data length error\n"); + if (v & CE_ERR_KEYSRAM) + dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); + break; + case ESR_H6: + v >>= (flow * 8); v &= 0xFF; if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); err = -EFAULT; + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); @@ -150,7 +226,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); if (v & CE_ERR_ADDR_INVALID) dev_err(ce->dev, "CE ERROR: address invalid\n"); - } + if (v & CE_ERR_KEYLADDER) + dev_err(ce->dev, "CE ERROR: key ladder configuration error\n"); + break; + } return err; } @@ -280,13 +359,214 @@ static struct sun8i_ce_alg_template ce_algs[] = { .decrypt = sun8i_ce_skdecrypt, } }, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_MD5, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA1, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA224, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA256, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA384, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA512, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +#endif +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun8i-ce-prng", + .cra_priority = 300, + .cra_ctxsize = sizeof(struct sun8i_ce_rng_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_prng_init, + .cra_exit = sun8i_ce_prng_exit, + }, + .generate = sun8i_ce_prng_generate, + .seed = sun8i_ce_prng_seed, + .seedsize = PRNG_SEED_SIZE, + } +}, +#endif }; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG -static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v) +static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) { struct sun8i_ce_dev *ce = seq->private; - int i; + unsigned int i; for (i = 0; i < MAXFLOW; i++) seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req); @@ -301,23 +581,28 @@ static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v) ce_algs[i].alg.skcipher.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); break; + case CRYPTO_ALG_TYPE_AHASH: + seq_printf(seq, "%s %s %lu %lu\n", + ce_algs[i].alg.hash.halg.base.cra_driver_name, + ce_algs[i].alg.hash.halg.base.cra_name, + ce_algs[i].stat_req, ce_algs[i].stat_fb); + break; + case CRYPTO_ALG_TYPE_RNG: + seq_printf(seq, "%s %s %lu %lu\n", + ce_algs[i].alg.rng.base.cra_driver_name, + ce_algs[i].alg.rng.base.cra_name, + ce_algs[i].stat_req, ce_algs[i].stat_bytes); + break; } } +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + seq_printf(seq, "HWRNG %lu %lu\n", + ce->hwrng_stat_req, ce->hwrng_stat_bytes); +#endif return 0; } -static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, sun8i_ce_dbgfs_read, inode->i_private); -} - -static const struct file_operations sun8i_ce_debugfs_fops = { - .owner = THIS_MODULE, - .open = sun8i_ce_dbgfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs); #endif static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i) @@ -482,7 +767,8 @@ static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) { - int ce_method, err, id, i; + int ce_method, err, id; + unsigned int i; for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { ce_algs[i].ce = ce; @@ -515,6 +801,43 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) return err; } break; + case CRYPTO_ALG_TYPE_AHASH: + id = ce_algs[i].ce_algo_id; + ce_method = ce->variant->alg_hash[id]; + if (ce_method == CE_ID_NOTSUPP) { + dev_info(ce->dev, + "DEBUG: Algo of %s not supported\n", + ce_algs[i].alg.hash.halg.base.cra_name); + ce_algs[i].ce = NULL; + break; + } + dev_info(ce->dev, "Register %s\n", + ce_algs[i].alg.hash.halg.base.cra_name); + err = crypto_register_ahash(&ce_algs[i].alg.hash); + if (err) { + dev_err(ce->dev, "ERROR: Fail to register %s\n", + ce_algs[i].alg.hash.halg.base.cra_name); + ce_algs[i].ce = NULL; + return err; + } + break; + case CRYPTO_ALG_TYPE_RNG: + if (ce->variant->prng == CE_ID_NOTSUPP) { + dev_info(ce->dev, + "DEBUG: Algo of %s not supported\n", + ce_algs[i].alg.rng.base.cra_name); + ce_algs[i].ce = NULL; + break; + } + dev_info(ce->dev, "Register %s\n", + ce_algs[i].alg.rng.base.cra_name); + err = crypto_register_rng(&ce_algs[i].alg.rng); + if (err) { + dev_err(ce->dev, "Fail to register %s\n", + ce_algs[i].alg.rng.base.cra_name); + ce_algs[i].ce = NULL; + } + break; default: ce_algs[i].ce = NULL; dev_err(ce->dev, "ERROR: tried to register an unknown algo\n"); @@ -525,7 +848,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce) { - int i; + unsigned int i; for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { if (!ce_algs[i].ce) @@ -536,6 +859,16 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce) ce_algs[i].alg.skcipher.base.cra_name); crypto_unregister_skcipher(&ce_algs[i].alg.skcipher); break; + case CRYPTO_ALG_TYPE_AHASH: + dev_info(ce->dev, "Unregister %d %s\n", i, + ce_algs[i].alg.hash.halg.base.cra_name); + crypto_unregister_ahash(&ce_algs[i].alg.hash); + break; + case CRYPTO_ALG_TYPE_RNG: + dev_info(ce->dev, "Unregister %d %s\n", i, + ce_algs[i].alg.rng.base.cra_name); + crypto_unregister_rng(&ce_algs[i].alg.rng); + break; } } } @@ -573,14 +906,12 @@ static int sun8i_ce_probe(struct platform_device *pdev) return irq; ce->reset = devm_reset_control_get(&pdev->dev, NULL); - if (IS_ERR(ce->reset)) { - if (PTR_ERR(ce->reset) == -EPROBE_DEFER) - return PTR_ERR(ce->reset); - dev_err(&pdev->dev, "No reset control found\n"); - return PTR_ERR(ce->reset); - } + if (IS_ERR(ce->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset), + "No reset control found\n"); mutex_init(&ce->mlock); + mutex_init(&ce->rnglock); err = sun8i_ce_allocate_chanlist(ce); if (err) @@ -605,6 +936,10 @@ static int sun8i_ce_probe(struct platform_device *pdev) if (err < 0) goto error_alg; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + sun8i_ce_hwrng_register(ce); +#endif + v = readl(ce->base + CE_CTR); v >>= CE_DIE_ID_SHIFT; v &= CE_DIE_ID_MASK; @@ -634,6 +969,10 @@ static int sun8i_ce_remove(struct platform_device *pdev) { struct sun8i_ce_dev *ce = platform_get_drvdata(pdev); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + sun8i_ce_hwrng_unregister(ce); +#endif + sun8i_ce_unregister_algs(ce); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c new file mode 100644 index 000000000000..fa2f1b4fad7b --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-hash.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. + * + * You could find the datasheet in Documentation/arm/sunxi/README + */ +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/scatterlist.h> +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <crypto/md5.h> +#include "sun8i-ce.h" + +int sun8i_ce_hash_crainit(struct crypto_tfm *tfm) +{ + struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun8i_ce_alg_template *algt; + int err; + + memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx)); + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + op->ce = algt->ce; + + op->enginectx.op.do_one_request = sun8i_ce_hash_run; + op->enginectx.op.prepare_request = NULL; + op->enginectx.op.unprepare_request = NULL; + + /* FALLBACK */ + op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(algt->ce->dev, "Fallback driver could no be loaded\n"); + return PTR_ERR(op->fallback_tfm); + } + + if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm)) + algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sun8i_ce_hash_reqctx) + + crypto_ahash_reqsize(op->fallback_tfm)); + + dev_info(op->ce->dev, "Fallback for %s is %s\n", + crypto_tfm_alg_driver_name(tfm), + crypto_tfm_alg_driver_name(&op->fallback_tfm->base)); + err = pm_runtime_get_sync(op->ce->dev); + if (err < 0) + goto error_pm; + return 0; +error_pm: + pm_runtime_put_noidle(op->ce->dev); + crypto_free_ahash(op->fallback_tfm); + return err; +} + +void sun8i_ce_hash_craexit(struct crypto_tfm *tfm) +{ + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(tfmctx->fallback_tfm); + pm_runtime_put_sync_suspend(tfmctx->ce->dev); +} + +int sun8i_ce_hash_init(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx)); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +int sun8i_ce_hash_export(struct ahash_request *areq, void *out) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + +int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +int sun8i_ce_hash_final(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = areq->result; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_final(&rctx->fallback_req); +} + +int sun8i_ce_hash_update(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +int sun8i_ce_hash_finup(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_digest(&rctx->fallback_req); +} + +static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) +{ + struct scatterlist *sg; + + if (areq->nbytes == 0) + return true; + /* we need to reserve one SG for padding one */ + if (sg_nents(areq->src) > MAX_SG - 1) + return true; + sg = areq->src; + while (sg) { + if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) + return true; + sg = sg_next(sg); + } + return false; +} + +int sun8i_ce_hash_digest(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_dev *ce; + struct crypto_engine *engine; + struct scatterlist *sg; + int nr_sgs, e, i; + + if (sun8i_ce_hash_need_fallback(areq)) + return sun8i_ce_hash_digest_fb(areq); + + nr_sgs = sg_nents(areq->src); + if (nr_sgs > MAX_SG - 1) + return sun8i_ce_hash_digest_fb(areq); + + for_each_sg(areq->src, sg, nr_sgs, i) { + if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) + return sun8i_ce_hash_digest_fb(areq); + } + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + ce = algt->ce; + + e = sun8i_ce_get_engine_number(ce); + rctx->flow = e; + engine = ce->chanlist[e].engine; + + return crypto_transfer_hash_request_to_engine(engine, areq); +} + +int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_dev *ce; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + struct scatterlist *sg; + int nr_sgs, flow, err; + unsigned int len; + u32 common; + u64 byte_count; + __le32 *bf; + void *buf; + int j, i, todo; + int nbw = 0; + u64 fill, min_fill; + __be64 *bebits; + __le64 *lebits; + void *result; + u64 bs; + int digestsize; + dma_addr_t addr_res, addr_pad; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + ce = algt->ce; + + bs = algt->alg.hash.halg.base.cra_blocksize; + digestsize = algt->alg.hash.halg.digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + digestsize = SHA256_DIGEST_SIZE; + if (digestsize == SHA384_DIGEST_SIZE) + digestsize = SHA512_DIGEST_SIZE; + + /* the padding could be up to two block. */ + buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA); + if (!buf) + return -ENOMEM; + bf = (__le32 *)buf; + + result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); + if (!result) + return -ENOMEM; + + flow = rctx->flow; + chan = &ce->chanlist[flow]; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt->stat_req++; +#endif + dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); + + cet = chan->tl; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->alg_hash[algt->ce_algo_id]; + common |= CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + + cet->t_sym_ctl = 0; + cet->t_asym_ctl = 0; + + nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend; + } + + len = areq->nbytes; + for_each_sg(areq->src, sg, nr_sgs, i) { + cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); + todo = min(len, sg_dma_len(sg)); + cet->t_src[i].len = cpu_to_le32(todo / 4); + len -= todo; + } + if (len > 0) { + dev_err(ce->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend; + } + addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); + cet->t_dst[0].addr = cpu_to_le32(addr_res); + cet->t_dst[0].len = cpu_to_le32(digestsize / 4); + if (dma_mapping_error(ce->dev, addr_res)) { + dev_err(ce->dev, "DMA map dest\n"); + err = -EINVAL; + goto theend; + } + + byte_count = areq->nbytes; + j = 0; + bf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); + } + + if (fill < min_fill) + fill += bs; + + j += (fill - min_fill) / sizeof(u32); + + switch (algt->ce_algo_id) { + case CE_ID_HASH_MD5: + lebits = (__le64 *)&bf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + break; + case CE_ID_HASH_SHA1: + case CE_ID_HASH_SHA224: + case CE_ID_HASH_SHA256: + bebits = (__be64 *)&bf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + break; + case CE_ID_HASH_SHA384: + case CE_ID_HASH_SHA512: + bebits = (__be64 *)&bf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&bf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + break; + } + + addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); + cet->t_src[i].addr = cpu_to_le32(addr_pad); + cet->t_src[i].len = cpu_to_le32(j); + if (dma_mapping_error(ce->dev, addr_pad)) { + dev_err(ce->dev, "DMA error on padding SG\n"); + err = -EINVAL; + goto theend; + } + + if (ce->variant->hash_t_dlen_in_bits) + cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8); + else + cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j); + + chan->timeout = areq->nbytes; + + err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); + + dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + + kfree(buf); + + memcpy(areq->result, result, algt->alg.hash.halg.digestsize); + kfree(result); +theend: + crypto_finalize_hash_request(engine, breq, err); + return 0; +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c new file mode 100644 index 000000000000..78503006949c --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-prng.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file handle the PRNG + * + * You could find a link for the datasheet in Documentation/arm/sunxi/README + */ +#include "sun8i-ce.h" +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <crypto/internal/rng.h> + +int sun8i_ce_prng_init(struct crypto_tfm *tfm) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, 0, sizeof(struct sun8i_ce_rng_tfm_ctx)); + return 0; +} + +void sun8i_ce_prng_exit(struct crypto_tfm *tfm) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + memzero_explicit(ctx->seed, ctx->slen); + kfree(ctx->seed); + ctx->seed = NULL; + ctx->slen = 0; +} + +int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + + if (ctx->seed && ctx->slen != slen) { + memzero_explicit(ctx->seed, ctx->slen); + kfree(ctx->seed); + ctx->slen = 0; + ctx->seed = NULL; + } + if (!ctx->seed) + ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA); + if (!ctx->seed) + return -ENOMEM; + + memcpy(ctx->seed, seed, slen); + ctx->slen = slen; + + return 0; +} + +int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + struct rng_alg *alg = crypto_rng_alg(tfm); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_dev *ce; + dma_addr_t dma_iv, dma_dst; + int err = 0; + int flow = 3; + unsigned int todo; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + u32 common, sym; + void *d; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.rng); + ce = algt->ce; + + if (ctx->slen == 0) { + dev_err(ce->dev, "not seeded\n"); + return -EINVAL; + } + + /* we want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE */ + todo = dlen + ctx->slen + PRNG_DATA_SIZE * 2; + todo -= todo % PRNG_DATA_SIZE; + + d = kzalloc(todo, GFP_KERNEL | GFP_DMA); + if (!d) { + err = -ENOMEM; + goto err_mem; + } + + dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__, + slen, dlen, todo, todo / PRNG_DATA_SIZE); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt->stat_req++; + algt->stat_bytes += todo; +#endif + + dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); + if (dma_mapping_error(ce->dev, dma_iv)) { + dev_err(ce->dev, "Cannot DMA MAP IV\n"); + goto err_iv; + } + + dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); + if (dma_mapping_error(ce->dev, dma_dst)) { + dev_err(ce->dev, "Cannot DMA MAP DST\n"); + err = -EFAULT; + goto err_dst; + } + + err = pm_runtime_get_sync(ce->dev); + if (err < 0) { + pm_runtime_put_noidle(ce->dev); + goto err_pm; + } + + mutex_lock(&ce->rnglock); + chan = &ce->chanlist[flow]; + + cet = &chan->tl[0]; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->prng | CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + + /* recent CE (H6) need length in bytes, in word otherwise */ + if (ce->variant->prng_t_dlen_in_bytes) + cet->t_dlen = cpu_to_le32(todo); + else + cet->t_dlen = cpu_to_le32(todo / 4); + + sym = PRNG_LD; + cet->t_sym_ctl = cpu_to_le32(sym); + cet->t_asym_ctl = 0; + + cet->t_key = cpu_to_le32(dma_iv); + cet->t_iv = cpu_to_le32(dma_iv); + + cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].len = cpu_to_le32(todo / 4); + ce->chanlist[flow].timeout = 2000; + + err = sun8i_ce_run_task(ce, 3, "PRNG"); + mutex_unlock(&ce->rnglock); + + pm_runtime_put(ce->dev); + +err_pm: + dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE); +err_dst: + dma_unmap_single(ce->dev, dma_iv, ctx->slen, DMA_TO_DEVICE); + + if (!err) { + memcpy(dst, d, dlen); + memcpy(ctx->seed, d + dlen, ctx->slen); + } + memzero_explicit(d, todo); +err_iv: + kfree(d); +err_mem: + return err; +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c new file mode 100644 index 000000000000..654328160d19 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-trng.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file handle the TRNG + * + * You could find a link for the datasheet in Documentation/arm/sunxi/README + */ +#include "sun8i-ce.h" +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/hw_random.h> +/* + * Note that according to the algorithm ID, 2 versions of the TRNG exists, + * The first present in H3/H5/R40/A64 and the second present in H6. + * This file adds support for both, but only the second is working + * reliabily according to rngtest. + **/ + +static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct sun8i_ce_dev *ce; + dma_addr_t dma_dst; + int err = 0; + int flow = 3; + unsigned int todo; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + u32 common; + void *d; + + ce = container_of(rng, struct sun8i_ce_dev, trng); + + /* round the data length to a multiple of 32*/ + todo = max + 32; + todo -= todo % 32; + + d = kzalloc(todo, GFP_KERNEL | GFP_DMA); + if (!d) + return -ENOMEM; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + ce->hwrng_stat_req++; + ce->hwrng_stat_bytes += todo; +#endif + + dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); + if (dma_mapping_error(ce->dev, dma_dst)) { + dev_err(ce->dev, "Cannot DMA MAP DST\n"); + err = -EFAULT; + goto err_dst; + } + + err = pm_runtime_get_sync(ce->dev); + if (err < 0) { + pm_runtime_put_noidle(ce->dev); + goto err_pm; + } + + mutex_lock(&ce->rnglock); + chan = &ce->chanlist[flow]; + + cet = &chan->tl[0]; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->trng | CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + + /* recent CE (H6) need length in bytes, in word otherwise */ + if (ce->variant->trng_t_dlen_in_bytes) + cet->t_dlen = cpu_to_le32(todo); + else + cet->t_dlen = cpu_to_le32(todo / 4); + + cet->t_sym_ctl = 0; + cet->t_asym_ctl = 0; + + cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].len = cpu_to_le32(todo / 4); + ce->chanlist[flow].timeout = todo; + + err = sun8i_ce_run_task(ce, 3, "TRNG"); + mutex_unlock(&ce->rnglock); + + pm_runtime_put(ce->dev); + +err_pm: + dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE); + + if (!err) { + memcpy(data, d, max); + err = max; + } + memzero_explicit(d, todo); +err_dst: + kfree(d); + return err; +} + +int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce) +{ + int ret; + + if (ce->variant->trng == CE_ID_NOTSUPP) { + dev_info(ce->dev, "TRNG not supported\n"); + return 0; + } + ce->trng.name = "sun8i Crypto Engine TRNG"; + ce->trng.read = sun8i_ce_trng_read; + ce->trng.quality = 1000; + + ret = hwrng_register(&ce->trng); + if (ret) + dev_err(ce->dev, "Fail to register the TRNG\n"); + return ret; +} + +void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce) +{ + if (ce->variant->trng == CE_ID_NOTSUPP) + return; + hwrng_unregister(&ce->trng); +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 963645fe4adb..558027516aed 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -12,6 +12,11 @@ #include <linux/atomic.h> #include <linux/debugfs.h> #include <linux/crypto.h> +#include <linux/hw_random.h> +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/rng.h> +#include <crypto/sha.h> /* CE Registers */ #define CE_TDQ 0x00 @@ -45,6 +50,16 @@ #define CE_ALG_AES 0 #define CE_ALG_DES 1 #define CE_ALG_3DES 2 +#define CE_ALG_MD5 16 +#define CE_ALG_SHA1 17 +#define CE_ALG_SHA224 18 +#define CE_ALG_SHA256 19 +#define CE_ALG_SHA384 20 +#define CE_ALG_SHA512 21 +#define CE_ALG_TRNG 48 +#define CE_ALG_PRNG 49 +#define CE_ALG_TRNG_V2 0x1c +#define CE_ALG_PRNG_V2 0x1d /* Used in ce_variant */ #define CE_ID_NOTSUPP 0xFF @@ -54,6 +69,14 @@ #define CE_ID_CIPHER_DES3 2 #define CE_ID_CIPHER_MAX 3 +#define CE_ID_HASH_MD5 0 +#define CE_ID_HASH_SHA1 1 +#define CE_ID_HASH_SHA224 2 +#define CE_ID_HASH_SHA256 3 +#define CE_ID_HASH_SHA384 4 +#define CE_ID_HASH_SHA512 5 +#define CE_ID_HASH_MAX 6 + #define CE_ID_OP_ECB 0 #define CE_ID_OP_CBC 1 #define CE_ID_OP_MAX 2 @@ -65,6 +88,16 @@ #define CE_ERR_ADDR_INVALID BIT(5) #define CE_ERR_KEYLADDER BIT(6) +#define ESR_H3 0 +#define ESR_A64 1 +#define ESR_R40 2 +#define ESR_H5 3 +#define ESR_H6 4 + +#define PRNG_DATA_SIZE (160 / 8) +#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8) +#define PRNG_LD BIT(17) + #define CE_DIE_ID_SHIFT 16 #define CE_DIE_ID_MASK 0x07 @@ -90,16 +123,34 @@ struct ce_clock { * struct ce_variant - Describe CE capability for each variant hardware * @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the * coresponding CE_ALG_XXX value + * @alg_hash: list of supported hashes. for each CE_ID_ this will give the + * corresponding CE_ALG_XXX value * @op_mode: list of supported block modes - * @has_t_dlen_in_bytes: Does the request size for cipher is in + * @cipher_t_dlen_in_bytes: Does the request size for cipher is in + * bytes or words + * @hash_t_dlen_in_bytes: Does the request size for hash is in + * bits or words + * @prng_t_dlen_in_bytes: Does the request size for PRNG is in + * bytes or words + * @trng_t_dlen_in_bytes: Does the request size for TRNG is in * bytes or words * @ce_clks: list of clocks needed by this variant + * @esr: The type of error register + * @prng: The CE_ALG_XXX value for the PRNG + * @trng: The CE_ALG_XXX value for the TRNG */ struct ce_variant { char alg_cipher[CE_ID_CIPHER_MAX]; + char alg_hash[CE_ID_HASH_MAX]; u32 op_mode[CE_ID_OP_MAX]; - bool has_t_dlen_in_bytes; + bool cipher_t_dlen_in_bytes; + bool hash_t_dlen_in_bits; + bool prng_t_dlen_in_bytes; + bool trng_t_dlen_in_bytes; struct ce_clock ce_clks[CE_MAX_CLOCKS]; + int esr; + unsigned char prng; + unsigned char trng; }; struct sginfo { @@ -129,8 +180,6 @@ struct ce_task { /* * struct sun8i_ce_flow - Information used by each flow * @engine: ptr to the crypto_engine for this flow - * @bounce_iv: buffer which contain the IV - * @ivlen: size of bounce_iv * @complete: completion for the current task on this flow * @status: set to 1 by interrupt if task is done * @t_phy: Physical address of task @@ -139,8 +188,6 @@ struct ce_task { */ struct sun8i_ce_flow { struct crypto_engine *engine; - void *bounce_iv; - unsigned int ivlen; struct completion complete; int status; dma_addr_t t_phy; @@ -158,6 +205,7 @@ struct sun8i_ce_flow { * @reset: pointer to reset controller * @dev: the platform device * @mlock: Control access to device registers + * @rnglock: Control access to the RNG (dedicated channel 3) * @chanlist: array of all flow * @flow: flow to use in next request * @variant: pointer to variant specific data @@ -170,6 +218,7 @@ struct sun8i_ce_dev { struct reset_control *reset; struct device *dev; struct mutex mlock; + struct mutex rnglock; struct sun8i_ce_flow *chanlist; atomic_t flow; const struct ce_variant *variant; @@ -177,17 +226,38 @@ struct sun8i_ce_dev { struct dentry *dbgfs_dir; struct dentry *dbgfs_stats; #endif +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + struct hwrng trng; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + unsigned long hwrng_stat_req; + unsigned long hwrng_stat_bytes; +#endif +#endif }; /* * struct sun8i_cipher_req_ctx - context for a skcipher request * @op_dir: direction (encrypt vs decrypt) for this request * @flow: the flow to use for this request + * @backup_iv: buffer which contain the next IV to store + * @bounce_iv: buffer which contain the IV + * @ivlen: size of bounce_iv + * @nr_sgs: The number of source SG (as given by dma_map_sg()) + * @nr_sgd: The number of destination SG (as given by dma_map_sg()) + * @addr_iv: The IV addr returned by dma_map_single, need to unmap later + * @addr_key: The key addr returned by dma_map_single, need to unmap later * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { u32 op_dir; int flow; + void *backup_iv; + void *bounce_iv; + unsigned int ivlen; + int nr_sgs; + int nr_sgd; + dma_addr_t addr_iv; + dma_addr_t addr_key; struct skcipher_request fallback_req; // keep at the end }; @@ -208,6 +278,38 @@ struct sun8i_cipher_tfm_ctx { }; /* + * struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM + * @enginectx: crypto_engine used by this TFM + * @ce: pointer to the private data of driver handling this TFM + * @fallback_tfm: pointer to the fallback TFM + */ +struct sun8i_ce_hash_tfm_ctx { + struct crypto_engine_ctx enginectx; + struct sun8i_ce_dev *ce; + struct crypto_ahash *fallback_tfm; +}; + +/* + * struct sun8i_ce_hash_reqctx - context for an ahash request + * @fallback_req: pre-allocated fallback request + * @flow: the flow to use for this request + */ +struct sun8i_ce_hash_reqctx { + struct ahash_request fallback_req; + int flow; +}; + +/* + * struct sun8i_ce_prng_ctx - context for PRNG TFM + * @seed: The seed to use + * @slen: The size of the seed + */ +struct sun8i_ce_rng_tfm_ctx { + void *seed; + unsigned int slen; +}; + +/* * struct sun8i_ce_alg_template - crypto_alg template * @type: the CRYPTO_ALG_TYPE for this template * @ce_algo_id: the CE_ID for this template @@ -217,6 +319,7 @@ struct sun8i_cipher_tfm_ctx { * @alg: one of sub struct must be used * @stat_req: number of request done on this template * @stat_fb: number of request which has fallbacked + * @stat_bytes: total data size done by this template */ struct sun8i_ce_alg_template { u32 type; @@ -225,10 +328,13 @@ struct sun8i_ce_alg_template { struct sun8i_ce_dev *ce; union { struct skcipher_alg skcipher; + struct ahash_alg hash; + struct rng_alg rng; } alg; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG unsigned long stat_req; unsigned long stat_fb; + unsigned long stat_bytes; #endif }; @@ -246,3 +352,24 @@ int sun8i_ce_skencrypt(struct skcipher_request *areq); int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce); int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name); + +int sun8i_ce_hash_crainit(struct crypto_tfm *tfm); +void sun8i_ce_hash_craexit(struct crypto_tfm *tfm); +int sun8i_ce_hash_init(struct ahash_request *areq); +int sun8i_ce_hash_export(struct ahash_request *areq, void *out); +int sun8i_ce_hash_import(struct ahash_request *areq, const void *in); +int sun8i_ce_hash(struct ahash_request *areq); +int sun8i_ce_hash_final(struct ahash_request *areq); +int sun8i_ce_hash_update(struct ahash_request *areq); +int sun8i_ce_hash_finup(struct ahash_request *areq); +int sun8i_ce_hash_digest(struct ahash_request *areq); +int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq); + +int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); +void sun8i_ce_prng_exit(struct crypto_tfm *tfm); +int sun8i_ce_prng_init(struct crypto_tfm *tfm); + +int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce); +void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce); |