diff options
Diffstat (limited to 'drivers/crypto/mxs-dcp.c')
| -rw-r--r-- | drivers/crypto/mxs-dcp.c | 625 |
1 files changed, 395 insertions, 230 deletions
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 625ee50fd78b..133ebc998236 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -1,14 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale i.MX23/i.MX28 Data Co-Processor driver * * Copyright (C) 2013 Marek Vasut <marex@denx.de> - * - * The code contained herein is licensed under the GNU General Public - * License. You may obtain a copy of the GNU General Public License - * Version 2 or later at the following locations: - * - * http://www.opensource.org/licenses/gpl-license.html - * http://www.gnu.org/copyleft/gpl.html */ #include <linux/dma-mapping.h> @@ -20,17 +14,36 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/stmp_device.h> +#include <linux/clk.h> +#include <soc/fsl/dcp.h> #include <crypto/aes.h> -#include <crypto/sha.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #define DCP_MAX_CHANS 4 #define DCP_BUF_SZ PAGE_SIZE +#define DCP_SHA_PAY_SZ 64 #define DCP_ALIGNMENT 64 +/* + * Null hashes to align with hw behavior on imx6sl and ull + * these are flipped for consistency with hw output + */ +static const uint8_t sha1_null_hash[] = + "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" + "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; + +static const uint8_t sha256_null_hash[] = + "\x55\xb8\x52\x78\x1b\x99\x95\xa4" + "\x4c\x93\x9b\x64\xe4\x41\xae\x27" + "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" + "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; + /* DCP DMA descriptor. */ struct dcp_dma_desc { uint32_t next_cmd_addr; @@ -48,6 +61,7 @@ struct dcp_coherent_block { uint8_t aes_in_buf[DCP_BUF_SZ]; uint8_t aes_out_buf[DCP_BUF_SZ]; uint8_t sha_in_buf[DCP_BUF_SZ]; + uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; uint8_t aes_key[2 * AES_KEYSIZE_128]; @@ -63,9 +77,10 @@ struct dcp { struct dcp_coherent_block *coh; struct completion completion[DCP_MAX_CHANS]; - struct mutex mutex[DCP_MAX_CHANS]; + spinlock_t lock[DCP_MAX_CHANS]; struct task_struct *thread[DCP_MAX_CHANS]; struct crypto_queue queue[DCP_MAX_CHANS]; + struct clk *dcp_clk; }; enum dcp_chan { @@ -87,11 +102,13 @@ struct dcp_async_ctx { struct crypto_skcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; + bool key_referenced; }; struct dcp_aes_req_ctx { unsigned int enc:1; unsigned int ecb:1; + struct skcipher_request fallback_req; // keep at the end }; struct dcp_sha_req_ctx { @@ -99,6 +116,11 @@ struct dcp_sha_req_ctx { unsigned int fini:1; }; +struct dcp_export_state { + struct dcp_sha_req_ctx req_ctx; + struct dcp_async_ctx async_ctx; +}; + /* * There can even be only one instance of the MXS DCP due to the * design of Linux Crypto API. @@ -135,6 +157,7 @@ static struct dcp *global_sdcp; #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) +#define MXS_DCP_CONTROL0_OTP_KEY (1 << 10) #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) @@ -148,17 +171,23 @@ static struct dcp *global_sdcp; #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) +#define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8 + static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) { + int dma_err; struct dcp *sdcp = global_sdcp; const int chan = actx->chan; uint32_t stat; unsigned long ret; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; - dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), DMA_TO_DEVICE); + dma_err = dma_mapping_error(sdcp->dev, desc_phys); + if (dma_err) + return dma_err; + reinit_completion(&sdcp->completion[chan]); /* Clear status register. */ @@ -194,28 +223,54 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) * Encryption (AES128) */ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, - struct ablkcipher_request *req, int init) + struct skcipher_request *req, int init) { + dma_addr_t key_phys, src_phys, dst_phys; struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; - struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); + bool key_referenced = actx->key_referenced; int ret; - dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, - 2 * AES_KEYSIZE_128, - DMA_TO_DEVICE); - dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, - DCP_BUF_SZ, DMA_TO_DEVICE); - dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, - DCP_BUF_SZ, DMA_FROM_DEVICE); + if (key_referenced) + key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128, + AES_KEYSIZE_128, DMA_TO_DEVICE); + else + key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, + 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, key_phys); + if (ret) + return ret; + + src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, + DCP_BUF_SZ, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, src_phys); + if (ret) + goto err_src; + + dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, + DCP_BUF_SZ, DMA_FROM_DEVICE); + ret = dma_mapping_error(sdcp->dev, dst_phys); + if (ret) + goto err_dst; + + if (actx->fill % AES_BLOCK_SIZE) { + dev_err(sdcp->dev, "Invalid block size!\n"); + ret = -EINVAL; + goto aes_done_run; + } /* Fill in the DMA descriptor. */ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | MXS_DCP_CONTROL0_INTERRUPT | MXS_DCP_CONTROL0_ENABLE_CIPHER; - /* Payload contains the key. */ - desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + if (!key_referenced) + /* Payload contains the key. */ + desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + else if (actx->key[0] == DCP_PAES_KEY_OTP) + /* Set OTP key bit to select the key via KEY_SELECT. */ + desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; @@ -229,6 +284,9 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, else desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; + if (key_referenced) + desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT; + desc->next_cmd_addr = 0; desc->source = src_phys; desc->destination = dst_phys; @@ -238,11 +296,17 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, ret = mxs_dcp_start_dma(actx); - dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, - DMA_TO_DEVICE); - dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); +aes_done_run: dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); - +err_dst: + dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); +err_src: + if (key_referenced) + dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128, + DMA_TO_DEVICE); + else + dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, + DMA_TO_DEVICE); return ret; } @@ -250,27 +314,28 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) { struct dcp *sdcp = global_sdcp; - struct ablkcipher_request *req = ablkcipher_request_cast(arq); + struct skcipher_request *req = skcipher_request_cast(arq); struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); - struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); struct scatterlist *dst = req->dst; struct scatterlist *src = req->src; - const int nents = sg_nents(req->src); + int dst_nents = sg_nents(dst); const int out_off = DCP_BUF_SZ; uint8_t *in_buf = sdcp->coh->aes_in_buf; uint8_t *out_buf = sdcp->coh->aes_out_buf; - uint8_t *out_tmp, *src_buf, *dst_buf = NULL; uint32_t dst_off = 0; + uint8_t *src_buf = NULL; + uint32_t last_out_len = 0; uint8_t *key = sdcp->coh->aes_key; int ret = 0; - int split = 0; - unsigned int i, len, clen, rem = 0; + unsigned int i, len, clen, tlen = 0; int init = 0; + bool limit_hit = false; actx->fill = 0; @@ -279,16 +344,21 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) if (!rctx->ecb) { /* Copy the CBC IV just past the key. */ - memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); + memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); /* CBC needs the INIT set. */ init = 1; } else { memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, nents, i) { + for_each_sg(req->src, src, sg_nents(req->src), i) { src_buf = sg_virt(src); len = sg_dma_len(src); + tlen += len; + limit_hit = tlen > req->cryptlen; + + if (limit_hit) + len = req->cryptlen - (tlen - len); do { if (actx->fill + len > out_off) @@ -305,35 +375,33 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * If we filled the buffer or this is the last SG, * submit the buffer. */ - if (actx->fill == out_off || sg_is_last(src)) { + if (actx->fill == out_off || sg_is_last(src) || + limit_hit) { ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; - out_tmp = out_buf; - while (dst && actx->fill) { - if (!split) { - dst_buf = sg_virt(dst); - dst_off = 0; - } - rem = min(sg_dma_len(dst) - dst_off, - actx->fill); - - memcpy(dst_buf + dst_off, out_tmp, rem); - out_tmp += rem; - dst_off += rem; - actx->fill -= rem; - - if (dst_off == sg_dma_len(dst)) { - dst = sg_next(dst); - split = 0; - } else { - split = 1; - } - } + sg_pcopy_from_buffer(dst, dst_nents, out_buf, + actx->fill, dst_off); + dst_off += actx->fill; + last_out_len = actx->fill; + actx->fill = 0; } } while (len); + + if (limit_hit) + break; + } + + /* Copy the IV for CBC for chaining */ + if (!rctx->ecb) { + if (rctx->enc) + memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), + AES_BLOCK_SIZE); + else + memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), + AES_BLOCK_SIZE); } return ret; @@ -349,100 +417,102 @@ static int dcp_chan_thread_aes(void *data) int ret; - do { - __set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&sdcp->mutex[chan]); + spin_lock(&sdcp->lock[chan]); backlog = crypto_get_backlog(&sdcp->queue[chan]); arq = crypto_dequeue_request(&sdcp->queue[chan]); - mutex_unlock(&sdcp->mutex[chan]); + spin_unlock(&sdcp->lock[chan]); + + if (!backlog && !arq) { + schedule(); + continue; + } + + set_current_state(TASK_RUNNING); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); if (arq) { ret = mxs_dcp_aes_block_crypt(arq); - arq->complete(arq, ret); - continue; + crypto_request_complete(arq, ret); } - - schedule(); - } while (!kthread_should_stop()); + } return 0; } -static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) +static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); - SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; - skcipher_request_set_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->nbytes, req->info); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, + req->cryptlen, req->iv); if (enc) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } -static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) +static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) { struct dcp *sdcp = global_sdcp; struct crypto_async_request *arq = &req->base; struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); - struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); int ret; - if (unlikely(actx->key_len != AES_KEYSIZE_128)) + if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced)) return mxs_dcp_block_fallback(req, enc); rctx->enc = enc; rctx->ecb = ecb; actx->chan = DCP_CHAN_CRYPTO; - mutex_lock(&sdcp->mutex[actx->chan]); + spin_lock(&sdcp->lock[actx->chan]); ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); - mutex_unlock(&sdcp->mutex[actx->chan]); + spin_unlock(&sdcp->lock[actx->chan]); wake_up_process(sdcp->thread[actx->chan]); - return -EINPROGRESS; + return ret; } -static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) +static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) { return mxs_dcp_aes_enqueue(req, 0, 1); } -static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) +static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) { return mxs_dcp_aes_enqueue(req, 1, 1); } -static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) +static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) { return mxs_dcp_aes_enqueue(req, 0, 0); } -static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) +static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) { return mxs_dcp_aes_enqueue(req, 1, 0); } -static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { - struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); - unsigned int ret; + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); /* * AES 128 is supposed by the hardware, store key into temporary @@ -450,6 +520,7 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, * there can still be an operation in progress. */ actx->key_len = len; + actx->key_referenced = false; if (len == AES_KEYSIZE_128) { memcpy(actx->key, key, len); return 0; @@ -463,41 +534,65 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(actx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(actx->fallback, key, len); +} - ret = crypto_skcipher_setkey(actx->fallback, key, len); - if (!ret) - return 0; +static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int len) +{ + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) & - CRYPTO_TFM_RES_MASK; + if (len != DCP_PAES_KEYSIZE) + return -EINVAL; - return ret; + switch (key[0]) { + case DCP_PAES_KEY_SLOT0: + case DCP_PAES_KEY_SLOT1: + case DCP_PAES_KEY_SLOT2: + case DCP_PAES_KEY_SLOT3: + case DCP_PAES_KEY_UNIQUE: + case DCP_PAES_KEY_OTP: + memcpy(actx->key, key, len); + actx->key_len = len; + actx->key_referenced = true; + break; + default: + return -EINVAL; + } + + return 0; } -static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) +static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) { - const char *name = crypto_tfm_alg_name(tfm); - const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; - struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); + const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *blk; - blk = crypto_alloc_skcipher(name, 0, flags); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); actx->fallback = blk; - tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + + crypto_skcipher_reqsize(blk)); return 0; } -static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) +static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) { - struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(actx->fallback); } +static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm) +{ + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); + + return 0; +} + /* * Hashing (SHA1/SHA256) */ @@ -509,14 +604,16 @@ static int mxs_dcp_run_sha(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); - struct hash_alg_common *halg = crypto_hash_alg_common(tfm); - struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; dma_addr_t digest_phys = 0; dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, DCP_BUF_SZ, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, buf_phys); + if (ret) + return ret; + /* Fill in the DMA descriptor. */ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | MXS_DCP_CONTROL0_INTERRUPT | @@ -532,10 +629,27 @@ static int mxs_dcp_run_sha(struct ahash_request *req) desc->payload = 0; desc->status = 0; + /* + * Align driver with hw behavior when generating null hashes + */ + if (rctx->init && rctx->fini && desc->size == 0) { + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); + const uint8_t *sha_buf = + (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? + sha1_null_hash : sha256_null_hash; + memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); + ret = 0; + goto done_run; + } + /* Set HASH_TERM bit for last transfer block. */ if (rctx->fini) { - digest_phys = dma_map_single(sdcp->dev, req->result, - halg->digestsize, DMA_FROM_DEVICE); + digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, + DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); + ret = dma_mapping_error(sdcp->dev, digest_phys); + if (ret) + goto done_run; + desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; desc->payload = digest_phys; } @@ -543,9 +657,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req) ret = mxs_dcp_start_dma(actx); if (rctx->fini) - dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, + dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); +done_run: dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); return ret; @@ -560,48 +675,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); struct hash_alg_common *halg = crypto_hash_alg_common(tfm); - const int nents = sg_nents(req->src); uint8_t *in_buf = sdcp->coh->sha_in_buf; - - uint8_t *src_buf; + uint8_t *out_buf = sdcp->coh->sha_out_buf; struct scatterlist *src; - unsigned int i, len, clen; + unsigned int i, len, clen, oft = 0; int ret; int fin = rctx->fini; if (fin) rctx->fini = 0; - for_each_sg(req->src, src, nents, i) { - src_buf = sg_virt(src); - len = sg_dma_len(src); - - do { - if (actx->fill + len > DCP_BUF_SZ) - clen = DCP_BUF_SZ - actx->fill; - else - clen = len; - - memcpy(in_buf + actx->fill, src_buf, clen); - len -= clen; - src_buf += clen; - actx->fill += clen; - - /* - * If we filled the buffer and still have some - * more data, submit the buffer. - */ - if (len && actx->fill == DCP_BUF_SZ) { - ret = mxs_dcp_run_sha(req); - if (ret) - return ret; - actx->fill = 0; - rctx->init = 0; - } - } while (len); + src = req->src; + len = req->nbytes; + + while (len) { + if (actx->fill + len > DCP_BUF_SZ) + clen = DCP_BUF_SZ - actx->fill; + else + clen = len; + + scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, + 0); + + len -= clen; + oft += clen; + actx->fill += clen; + + /* + * If we filled the buffer and still have some + * more data, submit the buffer. + */ + if (len && actx->fill == DCP_BUF_SZ) { + ret = mxs_dcp_run_sha(req); + if (ret) + return ret; + actx->fill = 0; + rctx->init = 0; + } } if (fin) { @@ -617,11 +730,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) actx->fill = 0; - /* For some reason, the result is flipped. */ - for (i = 0; i < halg->digestsize / 2; i++) { - swap(req->result[i], - req->result[halg->digestsize - i - 1]); - } + /* For some reason the result is flipped */ + for (i = 0; i < halg->digestsize; i++) + req->result[i] = out_buf[halg->digestsize - i - 1]; } return 0; @@ -634,36 +745,31 @@ static int dcp_chan_thread_sha(void *data) struct crypto_async_request *backlog; struct crypto_async_request *arq; + int ret; - struct dcp_sha_req_ctx *rctx; - - struct ahash_request *req; - int ret, fini; - - do { - __set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&sdcp->mutex[chan]); + spin_lock(&sdcp->lock[chan]); backlog = crypto_get_backlog(&sdcp->queue[chan]); arq = crypto_dequeue_request(&sdcp->queue[chan]); - mutex_unlock(&sdcp->mutex[chan]); + spin_unlock(&sdcp->lock[chan]); + + if (!backlog && !arq) { + schedule(); + continue; + } + + set_current_state(TASK_RUNNING); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); if (arq) { - req = ahash_request_cast(arq); - rctx = ahash_request_ctx(req); - ret = dcp_sha_req_to_buf(arq); - fini = rctx->fini; - arq->complete(arq, ret); - if (!fini) - continue; + crypto_request_complete(arq, ret); } - - schedule(); - } while (!kthread_should_stop()); + } return 0; } @@ -721,14 +827,14 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini) rctx->init = 1; } - mutex_lock(&sdcp->mutex[actx->chan]); + spin_lock(&sdcp->lock[actx->chan]); ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); - mutex_unlock(&sdcp->mutex[actx->chan]); + spin_unlock(&sdcp->lock[actx->chan]); wake_up_process(sdcp->thread[actx->chan]); mutex_unlock(&actx->mutex); - return -EINPROGRESS; + return ret; } static int dcp_sha_update(struct ahash_request *req) @@ -759,6 +865,34 @@ static int dcp_sha_digest(struct ahash_request *req) return dcp_sha_finup(req); } +static int dcp_sha_import(struct ahash_request *req, const void *in) +{ + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + const struct dcp_export_state *export = in; + + memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); + memset(actx, 0, sizeof(struct dcp_async_ctx)); + memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); + memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); + + return 0; +} + +static int dcp_sha_export(struct ahash_request *req, void *out) +{ + struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); + struct dcp_export_state *export = out; + + memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); + memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); + + return 0; +} + static int dcp_sha_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), @@ -771,54 +905,77 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm) } /* AES 128 ECB and AES 128 CBC */ -static struct crypto_alg dcp_aes_algs[] = { +static struct skcipher_alg dcp_aes_algs[] = { { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-dcp", - .cra_priority = 400, - .cra_alignmask = 15, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-dcp", + .base.cra_priority = 400, + .base.cra_alignmask = 15, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_init = mxs_dcp_aes_fallback_init, - .cra_exit = mxs_dcp_aes_fallback_exit, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct dcp_async_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = mxs_dcp_aes_setkey, - .encrypt = mxs_dcp_aes_ecb_encrypt, - .decrypt = mxs_dcp_aes_ecb_decrypt - }, - }, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mxs_dcp_aes_setkey, + .encrypt = mxs_dcp_aes_ecb_encrypt, + .decrypt = mxs_dcp_aes_ecb_decrypt, + .init = mxs_dcp_aes_fallback_init_tfm, + .exit = mxs_dcp_aes_fallback_exit_tfm, }, { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-dcp", - .cra_priority = 400, - .cra_alignmask = 15, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-dcp", + .base.cra_priority = 400, + .base.cra_alignmask = 15, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_init = mxs_dcp_aes_fallback_init, - .cra_exit = mxs_dcp_aes_fallback_exit, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct dcp_async_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = mxs_dcp_aes_setkey, - .encrypt = mxs_dcp_aes_cbc_encrypt, - .decrypt = mxs_dcp_aes_cbc_decrypt, - .ivsize = AES_BLOCK_SIZE, - }, - }, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mxs_dcp_aes_setkey, + .encrypt = mxs_dcp_aes_cbc_encrypt, + .decrypt = mxs_dcp_aes_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + .init = mxs_dcp_aes_fallback_init_tfm, + .exit = mxs_dcp_aes_fallback_exit_tfm, + }, { + .base.cra_name = "ecb(paes)", + .base.cra_driver_name = "ecb-paes-dcp", + .base.cra_priority = 401, + .base.cra_alignmask = 15, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = DCP_PAES_KEYSIZE, + .max_keysize = DCP_PAES_KEYSIZE, + .setkey = mxs_dcp_aes_setrefkey, + .encrypt = mxs_dcp_aes_ecb_encrypt, + .decrypt = mxs_dcp_aes_ecb_decrypt, + .init = mxs_dcp_paes_init_tfm, + }, { + .base.cra_name = "cbc(paes)", + .base.cra_driver_name = "cbc-paes-dcp", + .base.cra_priority = 401, + .base.cra_alignmask = 15, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = DCP_PAES_KEYSIZE, + .max_keysize = DCP_PAES_KEYSIZE, + .setkey = mxs_dcp_aes_setrefkey, + .encrypt = mxs_dcp_aes_cbc_encrypt, + .decrypt = mxs_dcp_aes_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + .init = mxs_dcp_paes_init_tfm, }, }; @@ -829,13 +986,15 @@ static struct ahash_alg dcp_sha1_alg = { .final = dcp_sha_final, .finup = dcp_sha_finup, .digest = dcp_sha_digest, + .import = dcp_sha_import, + .export = dcp_sha_export, .halg = { .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct dcp_export_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-dcp", .cra_priority = 400, - .cra_alignmask = 63, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct dcp_async_ctx), @@ -853,13 +1012,15 @@ static struct ahash_alg dcp_sha256_alg = { .final = dcp_sha_final, .finup = dcp_sha_finup, .digest = dcp_sha_digest, + .import = dcp_sha_import, + .export = dcp_sha_export, .halg = { .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct dcp_export_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-dcp", .cra_priority = 400, - .cra_alignmask = 63, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct dcp_async_ctx), @@ -897,8 +1058,6 @@ static int mxs_dcp_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dcp *sdcp = NULL; int i, ret; - - struct resource *iores; int dcp_vmi_irq, dcp_irq; if (global_sdcp) { @@ -906,7 +1065,6 @@ static int mxs_dcp_probe(struct platform_device *pdev) return -ENODEV; } - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); dcp_vmi_irq = platform_get_irq(pdev, 0); if (dcp_vmi_irq < 0) return dcp_vmi_irq; @@ -920,7 +1078,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) return -ENOMEM; sdcp->dev = dev; - sdcp->base = devm_ioremap_resource(dev, iores); + sdcp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sdcp->base)) return PTR_ERR(sdcp->base); @@ -948,10 +1106,17 @@ static int mxs_dcp_probe(struct platform_device *pdev) /* Re-align the structure so it fits the DCP constraints. */ sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); + /* DCP clock is optional, only used on some SOCs */ + sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp"); + if (IS_ERR(sdcp->dcp_clk)) + return PTR_ERR(sdcp->dcp_clk); + /* Restart the DCP block. */ ret = stmp_reset_block(sdcp->base); - if (ret) + if (ret) { + dev_err(dev, "Failed reset\n"); return ret; + } /* Initialize control register. */ writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | @@ -979,7 +1144,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sdcp); for (i = 0; i < DCP_MAX_CHANS; i++) { - mutex_init(&sdcp->mutex[i]); + spin_lock_init(&sdcp->lock[i]); init_completion(&sdcp->completion[i]); crypto_init_queue(&sdcp->queue[i], 50); } @@ -989,7 +1154,8 @@ static int mxs_dcp_probe(struct platform_device *pdev) NULL, "mxs_dcp_chan/sha"); if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { dev_err(dev, "Error starting SHA thread!\n"); - return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + return ret; } sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, @@ -1004,8 +1170,8 @@ static int mxs_dcp_probe(struct platform_device *pdev) sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { - ret = crypto_register_algs(dcp_aes_algs, - ARRAY_SIZE(dcp_aes_algs)); + ret = crypto_register_skciphers(dcp_aes_algs, + ARRAY_SIZE(dcp_aes_algs)); if (ret) { /* Failed to register algorithm. */ dev_err(dev, "Failed to register AES crypto!\n"); @@ -1039,17 +1205,18 @@ err_unregister_sha1: err_unregister_aes: if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) - crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); + crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); err_destroy_aes_thread: kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); err_destroy_sha_thread: kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); + return ret; } -static int mxs_dcp_remove(struct platform_device *pdev) +static void mxs_dcp_remove(struct platform_device *pdev) { struct dcp *sdcp = platform_get_drvdata(pdev); @@ -1060,7 +1227,7 @@ static int mxs_dcp_remove(struct platform_device *pdev) crypto_unregister_ahash(&dcp_sha1_alg); if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) - crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); + crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); @@ -1068,8 +1235,6 @@ static int mxs_dcp_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); global_sdcp = NULL; - - return 0; } static const struct of_device_id mxs_dcp_dt_ids[] = { @@ -1082,7 +1247,7 @@ MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); static struct platform_driver mxs_dcp_driver = { .probe = mxs_dcp_probe, - .remove = mxs_dcp_remove, + .remove = mxs_dcp_remove, .driver = { .name = "mxs-dcp", .of_match_table = mxs_dcp_dt_ids, |
