diff options
Diffstat (limited to 'drivers/crypto/stm32/stm32-hash.c')
| -rw-r--r-- | drivers/crypto/stm32/stm32-hash.c | 1922 |
1 files changed, 1415 insertions, 507 deletions
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index d33006d43f76..a4436728b0db 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -6,32 +6,31 @@ * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics. */ +#include <crypto/engine.h> +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/sha3.h> #include <linux/clk.h> -#include <linux/crypto.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> -#include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> - -#include <crypto/engine.h> -#include <crypto/hash.h> -#include <crypto/md5.h> -#include <crypto/scatterwalk.h> -#include <crypto/sha1.h> -#include <crypto/sha2.h> -#include <crypto/internal/hash.h> +#include <linux/string.h> #define HASH_CR 0x00 #define HASH_DIN 0x04 #define HASH_STR 0x08 +#define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04)) #define HASH_IMR 0x20 #define HASH_SR 0x24 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04)) @@ -45,15 +44,11 @@ #define HASH_CR_DMAE BIT(3) #define HASH_CR_DATATYPE_POS 4 #define HASH_CR_MODE BIT(6) +#define HASH_CR_ALGO_POS 7 #define HASH_CR_MDMAT BIT(13) #define HASH_CR_DMAA BIT(14) #define HASH_CR_LKEY BIT(16) -#define HASH_CR_ALGO_SHA1 0x0 -#define HASH_CR_ALGO_MD5 0x80 -#define HASH_CR_ALGO_SHA224 0x40000 -#define HASH_CR_ALGO_SHA256 0x40080 - /* Interrupt */ #define HASH_DINIE BIT(0) #define HASH_DCIE BIT(1) @@ -62,9 +57,6 @@ #define HASH_MASK_CALC_COMPLETION BIT(0) #define HASH_MASK_DATA_INPUT BIT(1) -/* Context swap register */ -#define HASH_CSR_REGISTER_NUMBER 53 - /* Status Flags */ #define HASH_SR_DATA_INPUT_READY BIT(0) #define HASH_SR_OUTPUT_READY BIT(1) @@ -75,28 +67,40 @@ #define HASH_STR_NBLW_MASK GENMASK(4, 0) #define HASH_STR_DCAL BIT(8) +/* HWCFGR Register */ +#define HASH_HWCFG_DMA_MASK GENMASK(3, 0) + +/* Context swap register */ +#define HASH_CSR_NB_SHA256_HMAC 54 +#define HASH_CSR_NB_SHA256 38 +#define HASH_CSR_NB_SHA512_HMAC 103 +#define HASH_CSR_NB_SHA512 91 +#define HASH_CSR_NB_SHA3_HMAC 88 +#define HASH_CSR_NB_SHA3 72 +#define HASH_CSR_NB_MAX HASH_CSR_NB_SHA512_HMAC + #define HASH_FLAGS_INIT BIT(0) #define HASH_FLAGS_OUTPUT_READY BIT(1) #define HASH_FLAGS_CPU BIT(2) -#define HASH_FLAGS_DMA_READY BIT(3) -#define HASH_FLAGS_DMA_ACTIVE BIT(4) -#define HASH_FLAGS_HMAC_INIT BIT(5) -#define HASH_FLAGS_HMAC_FINAL BIT(6) -#define HASH_FLAGS_HMAC_KEY BIT(7) - +#define HASH_FLAGS_DMA_ACTIVE BIT(3) +#define HASH_FLAGS_HMAC_INIT BIT(4) +#define HASH_FLAGS_HMAC_FINAL BIT(5) +#define HASH_FLAGS_HMAC_KEY BIT(6) +#define HASH_FLAGS_SHA3_MODE BIT(7) #define HASH_FLAGS_FINAL BIT(15) #define HASH_FLAGS_FINUP BIT(16) -#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18) -#define HASH_FLAGS_MD5 BIT(18) -#define HASH_FLAGS_SHA1 BIT(19) -#define HASH_FLAGS_SHA224 BIT(20) -#define HASH_FLAGS_SHA256 BIT(21) -#define HASH_FLAGS_ERRORS BIT(22) +#define HASH_FLAGS_ALGO_MASK GENMASK(20, 17) +#define HASH_FLAGS_ALGO_SHIFT 17 +#define HASH_FLAGS_ERRORS BIT(21) +#define HASH_FLAGS_EMPTY BIT(22) #define HASH_FLAGS_HMAC BIT(23) +#define HASH_FLAGS_SGS_COPIED BIT(24) #define HASH_OP_UPDATE 1 #define HASH_OP_FINAL 2 +#define HASH_BURST_LEVEL 4 + enum stm32_hash_data_format { HASH_DATA_32_BITS = 0x0, HASH_DATA_16_BITS = 0x1, @@ -104,35 +108,59 @@ enum stm32_hash_data_format { HASH_DATA_1_BIT = 0x3 }; -#define HASH_BUFLEN 256 -#define HASH_LONG_KEY 64 -#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8) -#define HASH_QUEUE_LENGTH 16 -#define HASH_DMA_THRESHOLD 50 +#define HASH_BUFLEN (SHA3_224_BLOCK_SIZE + 4) +#define HASH_MAX_KEY_SIZE (SHA512_BLOCK_SIZE * 8) + +enum stm32_hash_algo { + HASH_SHA1 = 0, + HASH_MD5 = 1, + HASH_SHA224 = 2, + HASH_SHA256 = 3, + HASH_SHA3_224 = 4, + HASH_SHA3_256 = 5, + HASH_SHA3_384 = 6, + HASH_SHA3_512 = 7, + HASH_SHA384 = 12, + HASH_SHA512 = 15, +}; + +enum ux500_hash_algo { + HASH_SHA256_UX500 = 0, + HASH_SHA1_UX500 = 1, +}; #define HASH_AUTOSUSPEND_DELAY 50 struct stm32_hash_ctx { - struct crypto_engine_ctx enginectx; struct stm32_hash_dev *hdev; + struct crypto_shash *xtfm; unsigned long flags; u8 key[HASH_MAX_KEY_SIZE]; int keylen; }; +struct stm32_hash_state { + u32 flags; + + u16 bufcnt; + u16 blocklen; + + u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32)); + + /* hash state */ + u32 hw_context[3 + HASH_CSR_NB_MAX]; +}; + struct stm32_hash_request_ctx { struct stm32_hash_dev *hdev; - unsigned long flags; unsigned long op; - u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); size_t digcnt; - size_t bufcnt; - size_t buflen; - /* DMA */ struct scatterlist *sg; + struct scatterlist sgl[2]; /* scatterlist used to realize alignment */ unsigned int offset; unsigned int total; struct scatterlist sg_key; @@ -143,20 +171,23 @@ struct stm32_hash_request_ctx { u8 data_type; - u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32)); - - /* Export Context */ - u32 *hw_context; + struct stm32_hash_state state; }; struct stm32_hash_algs_info { - struct ahash_alg *algs_list; + struct ahash_engine_alg *algs_list; size_t size; }; struct stm32_hash_pdata { - struct stm32_hash_algs_info *algs_info; - size_t algs_info_size; + const int alg_shift; + const struct stm32_hash_algs_info *algs_info; + size_t algs_info_size; + bool has_sr; + bool has_mdmat; + bool context_secured; + bool broken_emptymsg; + bool ux500; }; struct stm32_hash_dev { @@ -166,13 +197,13 @@ struct stm32_hash_dev { struct reset_control *rst; void __iomem *io_base; phys_addr_t phys_base; + u8 xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32)); u32 dma_mode; - u32 dma_maxburst; + bool polled; struct ahash_request *req; struct crypto_engine *engine; - int err; unsigned long flags; struct dma_chan *dma_lch; @@ -192,6 +223,8 @@ static struct stm32_hash_drv stm32_hash = { }; static void stm32_hash_dma_callback(void *param); +static int stm32_hash_prepare_request(struct ahash_request *req); +static void stm32_hash_unprepare_request(struct ahash_request *req); static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset) { @@ -204,14 +237,29 @@ static inline void stm32_hash_write(struct stm32_hash_dev *hdev, writel_relaxed(value, hdev->io_base + offset); } +/** + * stm32_hash_wait_busy - wait until hash processor is available. It return an + * error if the hash core is processing a block of data for more than 10 ms. + * @hdev: the stm32_hash_dev device. + */ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) { u32 status; + /* The Ux500 lacks the special status register, we poll the DCAL bit instead */ + if (!hdev->pdata->has_sr) + return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status, + !(status & HASH_STR_DCAL), 10, 10000); + return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, !(status & HASH_SR_BUSY), 10, 10000); } +/** + * stm32_hash_set_nblw - set the number of valid bytes in the last word. + * @hdev: the stm32_hash_dev device. + * @length: the length of the final word. + */ static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length) { u32 reg; @@ -249,58 +297,66 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev) return 0; } +/** + * stm32_hash_write_ctrl - Initialize the hash processor, only if + * HASH_FLAGS_INIT is set. + * @hdev: the stm32_hash_dev device + */ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct stm32_hash_state *state = &rctx->state; + u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT; u32 reg = HASH_CR_INIT; if (!(hdev->flags & HASH_FLAGS_INIT)) { - switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { - case HASH_FLAGS_MD5: - reg |= HASH_CR_ALGO_MD5; - break; - case HASH_FLAGS_SHA1: - reg |= HASH_CR_ALGO_SHA1; - break; - case HASH_FLAGS_SHA224: - reg |= HASH_CR_ALGO_SHA224; - break; - case HASH_FLAGS_SHA256: - reg |= HASH_CR_ALGO_SHA256; - break; - default: - reg |= HASH_CR_ALGO_MD5; + if (hdev->pdata->ux500) { + reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS); + } else { + if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS) + reg |= ((alg & BIT(1)) << 17) | + ((alg & BIT(0)) << HASH_CR_ALGO_POS); + else + reg |= alg << hdev->pdata->alg_shift; } reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); - if (rctx->flags & HASH_FLAGS_HMAC) { + if (state->flags & HASH_FLAGS_HMAC) { hdev->flags |= HASH_FLAGS_HMAC; reg |= HASH_CR_MODE; - if (ctx->keylen > HASH_LONG_KEY) + if (ctx->keylen > crypto_ahash_blocksize(tfm)) reg |= HASH_CR_LKEY; } - stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); + if (!hdev->polled) + stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); stm32_hash_write(hdev, HASH_CR, reg); hdev->flags |= HASH_FLAGS_INIT; + /* + * After first block + 1 words are fill up, + * we only need to fill 1 block to start partial computation + */ + rctx->state.blocklen -= sizeof(u32); + dev_dbg(hdev->dev, "Write Control %x\n", reg); } } static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) { + struct stm32_hash_state *state = &rctx->state; size_t count; - while ((rctx->bufcnt < rctx->buflen) && rctx->total) { + while ((state->bufcnt < state->blocklen) && rctx->total) { count = min(rctx->sg->length - rctx->offset, rctx->total); - count = min(count, rctx->buflen - rctx->bufcnt); + count = min_t(size_t, count, state->blocklen - state->bufcnt); if (count <= 0) { if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { @@ -311,10 +367,10 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) } } - scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg, - rctx->offset, count, 0); + scatterwalk_map_and_copy(state->buffer + state->bufcnt, + rctx->sg, rctx->offset, count, 0); - rctx->bufcnt += count; + state->bufcnt += count; rctx->offset += count; rctx->total -= count; @@ -331,13 +387,23 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, const u8 *buf, size_t length, int final) { + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct stm32_hash_state *state = &rctx->state; unsigned int count, len32; const u32 *buffer = (const u32 *)buf; u32 reg; - if (final) + if (final) { hdev->flags |= HASH_FLAGS_FINAL; + /* Do not process empty messages if hw is buggy. */ + if (!(hdev->flags & HASH_FLAGS_INIT) && !length && + hdev->pdata->broken_emptymsg) { + state->flags |= HASH_FLAGS_EMPTY; + return 0; + } + } + len32 = DIV_ROUND_UP(length, sizeof(u32)); dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n", @@ -362,6 +428,9 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, stm32_hash_write(hdev, HASH_DIN, buffer[count]); if (final) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + stm32_hash_set_nblw(hdev, length); reg = stm32_hash_read(hdev, HASH_STR); reg |= HASH_STR_DCAL; @@ -377,43 +446,90 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, return 0; } +static int hash_swap_reg(struct stm32_hash_request_ctx *rctx) +{ + struct stm32_hash_state *state = &rctx->state; + + switch ((state->flags & HASH_FLAGS_ALGO_MASK) >> + HASH_FLAGS_ALGO_SHIFT) { + case HASH_MD5: + case HASH_SHA1: + case HASH_SHA224: + case HASH_SHA256: + if (state->flags & HASH_FLAGS_HMAC) + return HASH_CSR_NB_SHA256_HMAC; + else + return HASH_CSR_NB_SHA256; + break; + + case HASH_SHA384: + case HASH_SHA512: + if (state->flags & HASH_FLAGS_HMAC) + return HASH_CSR_NB_SHA512_HMAC; + else + return HASH_CSR_NB_SHA512; + break; + + case HASH_SHA3_224: + case HASH_SHA3_256: + case HASH_SHA3_384: + case HASH_SHA3_512: + if (state->flags & HASH_FLAGS_HMAC) + return HASH_CSR_NB_SHA3_HMAC; + else + return HASH_CSR_NB_SHA3; + break; + + default: + return -EINVAL; + } +} + static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct stm32_hash_state *state = &rctx->state; int bufcnt, err = 0, final; - dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags); + dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags); - final = (rctx->flags & HASH_FLAGS_FINUP); + final = state->flags & HASH_FLAGS_FINAL; - while ((rctx->total >= rctx->buflen) || - (rctx->bufcnt + rctx->total >= rctx->buflen)) { + while ((rctx->total >= state->blocklen) || + (state->bufcnt + rctx->total >= state->blocklen)) { stm32_hash_append_sg(rctx); - bufcnt = rctx->bufcnt; - rctx->bufcnt = 0; - err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0); + bufcnt = state->bufcnt; + state->bufcnt = 0; + err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0); + if (err) + return err; } stm32_hash_append_sg(rctx); if (final) { - bufcnt = rctx->bufcnt; - rctx->bufcnt = 0; - err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, - (rctx->flags & HASH_FLAGS_FINUP)); + bufcnt = state->bufcnt; + state->bufcnt = 0; + return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1); } return err; } static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, - struct scatterlist *sg, int length, int mdma) + struct scatterlist *sg, int length, int mdmat) { struct dma_async_tx_descriptor *in_desc; dma_cookie_t cookie; u32 reg; int err; + dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length); + + /* do not use dma if there is no data to send */ + if (length <= 0) + return 0; + in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -426,21 +542,20 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, in_desc->callback = stm32_hash_dma_callback; in_desc->callback_param = hdev; - hdev->flags |= HASH_FLAGS_FINAL; hdev->flags |= HASH_FLAGS_DMA_ACTIVE; reg = stm32_hash_read(hdev, HASH_CR); - if (mdma) - reg |= HASH_CR_MDMAT; - else - reg &= ~HASH_CR_MDMAT; - + if (hdev->pdata->has_mdmat) { + if (mdmat) + reg |= HASH_CR_MDMAT; + else + reg &= ~HASH_CR_MDMAT; + } reg |= HASH_CR_DMAE; stm32_hash_write(hdev, HASH_CR, reg); - stm32_hash_set_nblw(hdev, length); cookie = dmaengine_submit(in_desc); err = dma_submit_error(cookie); @@ -471,8 +586,6 @@ static void stm32_hash_dma_callback(void *param) struct stm32_hash_dev *hdev = param; complete(&hdev->dma_completion); - - hdev->flags |= HASH_FLAGS_DMA_READY; } static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) @@ -482,7 +595,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); int err; - if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) { + if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) { err = stm32_hash_write_key(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; @@ -517,8 +630,8 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) dma_conf.direction = DMA_MEM_TO_DEV; dma_conf.dst_addr = hdev->phys_base + HASH_DIN; dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_conf.src_maxburst = hdev->dma_maxburst; - dma_conf.dst_maxburst = hdev->dma_maxburst; + dma_conf.src_maxburst = HASH_BURST_LEVEL; + dma_conf.dst_maxburst = HASH_BURST_LEVEL; dma_conf.device_fc = false; chan = dma_request_chan(hdev->dev, "in"); @@ -543,46 +656,64 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + u32 *buffer = (void *)rctx->state.buffer; struct scatterlist sg[1], *tsg; - int err = 0, len = 0, reg, ncp = 0; - unsigned int i; - u32 *buffer = (void *)rctx->buffer; + int err = 0, reg, ncp = 0; + unsigned int i, len = 0, bufcnt = 0; + bool final = hdev->flags & HASH_FLAGS_FINAL; + bool is_last = false; + u32 last_word; - rctx->sg = hdev->req->src; - rctx->total = hdev->req->nbytes; - - rctx->nents = sg_nents(rctx->sg); + dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n", + __func__, rctx->total, rctx->state.bufcnt, final); if (rctx->nents < 0) return -EINVAL; stm32_hash_write_ctrl(hdev); - if (hdev->flags & HASH_FLAGS_HMAC) { + if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { + hdev->flags |= HASH_FLAGS_HMAC_KEY; err = stm32_hash_hmac_dma_send(hdev); if (err != -EINPROGRESS) return err; } for_each_sg(rctx->sg, tsg, rctx->nents, i) { - len = sg->length; - sg[0] = *tsg; - if (sg_is_last(sg)) { - if (hdev->dma_mode == 1) { - len = (ALIGN(sg->length, 16) - 16); - - ncp = sg_pcopy_to_buffer( - rctx->sg, rctx->nents, - rctx->buffer, sg->length - len, - rctx->total - sg->length + len); + len = sg->length; - sg->length = len; + if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) { + if (!final) { + /* Always manually put the last word of a non-final transfer. */ + len -= sizeof(u32); + sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len); + sg->length -= sizeof(u32); } else { - if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { - len = sg->length; - sg->length = ALIGN(sg->length, - sizeof(u32)); + /* + * In Multiple DMA mode, DMA must be aborted before the final + * transfer. + */ + sg->length = rctx->total - bufcnt; + if (hdev->dma_mode > 0) { + len = (ALIGN(sg->length, 16) - 16); + + ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents, + rctx->state.buffer, + sg->length - len, + rctx->total - sg->length + len); + + if (!len) + break; + + sg->length = len; + } else { + is_last = true; + if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { + len = sg->length; + sg->length = ALIGN(sg->length, + sizeof(u32)); + } } } } @@ -594,43 +725,69 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) return -ENOMEM; } - err = stm32_hash_xmit_dma(hdev, sg, len, - !sg_is_last(sg)); + err = stm32_hash_xmit_dma(hdev, sg, len, !is_last); + /* The last word of a non final transfer is sent manually. */ + if (!final) { + stm32_hash_write(hdev, HASH_DIN, last_word); + len += sizeof(u32); + } + + rctx->total -= len; + + bufcnt += sg[0].length; dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); - if (err == -ENOMEM) + if (err == -ENOMEM || err == -ETIMEDOUT) return err; + if (is_last) + break; } - if (hdev->dma_mode == 1) { - if (stm32_hash_wait_busy(hdev)) - return -ETIMEDOUT; - reg = stm32_hash_read(hdev, HASH_CR); - reg &= ~HASH_CR_DMAE; - reg |= HASH_CR_DMAA; - stm32_hash_write(hdev, HASH_CR, reg); + /* + * When the second last block transfer of 4 words is performed by the DMA, + * the software must set the DMA Abort bit (DMAA) to 1 before completing the + * last transfer of 4 words or less. + */ + if (final) { + if (hdev->dma_mode > 0) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + reg = stm32_hash_read(hdev, HASH_CR); + reg &= ~HASH_CR_DMAE; + reg |= HASH_CR_DMAA; + stm32_hash_write(hdev, HASH_CR, reg); + + if (ncp) { + memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32))); + writesl(hdev->io_base + HASH_DIN, buffer, + DIV_ROUND_UP(ncp, sizeof(u32))); + } - if (ncp) { - memset(buffer + ncp, 0, - DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); - writesl(hdev->io_base + HASH_DIN, buffer, - DIV_ROUND_UP(ncp, sizeof(u32))); + stm32_hash_set_nblw(hdev, ncp); + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + err = -EINPROGRESS; } - stm32_hash_set_nblw(hdev, ncp); - reg = stm32_hash_read(hdev, HASH_STR); - reg |= HASH_STR_DCAL; - stm32_hash_write(hdev, HASH_STR, reg); - err = -EINPROGRESS; - } - if (hdev->flags & HASH_FLAGS_HMAC) { - if (stm32_hash_wait_busy(hdev)) - return -ETIMEDOUT; - err = stm32_hash_hmac_dma_send(hdev); + /* + * The hash processor needs the key to be loaded a second time in order + * to process the HMAC. + */ + if (hdev->flags & HASH_FLAGS_HMAC) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + err = stm32_hash_hmac_dma_send(hdev); + } + + return err; } - return err; + if (err != -EINPROGRESS) + return err; + + return 0; } static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) @@ -653,79 +810,97 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) return hdev; } -static bool stm32_hash_dma_aligned_data(struct ahash_request *req) -{ - struct scatterlist *sg; - struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - int i; - - if (req->nbytes <= HASH_DMA_THRESHOLD) - return false; - - if (sg_nents(req->src) > 1) { - if (hdev->dma_mode == 1) - return false; - for_each_sg(req->src, sg, sg_nents(req->src), i) { - if ((!IS_ALIGNED(sg->length, sizeof(u32))) && - (!sg_is_last(sg))) - return false; - } - } - - if (req->src->offset % 4) - return false; - - return true; -} - static int stm32_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_state *state = &rctx->state; + bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE; rctx->hdev = hdev; + state->flags = 0; - rctx->flags = HASH_FLAGS_CPU; + if (!(hdev->dma_lch && hdev->pdata->has_mdmat)) + state->flags |= HASH_FLAGS_CPU; + + if (sha3_mode) + state->flags |= HASH_FLAGS_SHA3_MODE; rctx->digcnt = crypto_ahash_digestsize(tfm); switch (rctx->digcnt) { case MD5_DIGEST_SIZE: - rctx->flags |= HASH_FLAGS_MD5; + state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT; break; case SHA1_DIGEST_SIZE: - rctx->flags |= HASH_FLAGS_SHA1; + if (hdev->pdata->ux500) + state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT; break; case SHA224_DIGEST_SIZE: - rctx->flags |= HASH_FLAGS_SHA224; + if (sha3_mode) + state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT; break; case SHA256_DIGEST_SIZE: - rctx->flags |= HASH_FLAGS_SHA256; + if (sha3_mode) { + state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT; + } else { + if (hdev->pdata->ux500) + state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT; + } + break; + case SHA384_DIGEST_SIZE: + if (sha3_mode) + state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT; + break; + case SHA512_DIGEST_SIZE: + if (sha3_mode) + state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT; break; default: return -EINVAL; } - rctx->bufcnt = 0; - rctx->buflen = HASH_BUFLEN; + rctx->state.bufcnt = 0; + rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32); + if (rctx->state.blocklen > HASH_BUFLEN) { + dev_err(hdev->dev, "Error, block too large"); + return -EINVAL; + } + rctx->nents = 0; rctx->total = 0; rctx->offset = 0; rctx->data_type = HASH_DATA_8_BITS; - memset(rctx->buffer, 0, HASH_BUFLEN); - if (ctx->flags & HASH_FLAGS_HMAC) - rctx->flags |= HASH_FLAGS_HMAC; + state->flags |= HASH_FLAGS_HMAC; - dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags); + dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags); return 0; } static int stm32_hash_update_req(struct stm32_hash_dev *hdev) { + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct stm32_hash_state *state = &rctx->state; + + dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0", + rctx->total, rctx->digcnt); + + if (!(state->flags & HASH_FLAGS_CPU)) + return stm32_hash_dma_send(hdev); + return stm32_hash_update_cpu(hdev); } @@ -733,51 +908,83 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev) { struct ahash_request *req = hdev->req; struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - int err; - int buflen = rctx->bufcnt; + struct stm32_hash_state *state = &rctx->state; + int buflen = state->bufcnt; + + if (!(state->flags & HASH_FLAGS_CPU)) { + hdev->flags |= HASH_FLAGS_FINAL; + return stm32_hash_dma_send(hdev); + } - rctx->bufcnt = 0; + if (state->flags & HASH_FLAGS_FINUP) + return stm32_hash_update_req(hdev); - if (!(rctx->flags & HASH_FLAGS_CPU)) - err = stm32_hash_dma_send(hdev); - else - err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1); + state->bufcnt = 0; + return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1); +} - return err; +static void stm32_hash_emptymsg_fallback(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = rctx->hdev; + int ret; + + dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n", + ctx->keylen); + + if (!ctx->xtfm) { + dev_err(hdev->dev, "no fallback engine\n"); + return; + } + + if (ctx->keylen) { + ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen); + if (ret) { + dev_err(hdev->dev, "failed to set key ret=%d\n", ret); + return; + } + } + + ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest); + if (ret) + dev_err(hdev->dev, "shash digest error\n"); } static void stm32_hash_copy_hash(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; + struct stm32_hash_dev *hdev = rctx->hdev; __be32 *hash = (void *)rctx->digest; unsigned int i, hashsize; - switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { - case HASH_FLAGS_MD5: - hashsize = MD5_DIGEST_SIZE; - break; - case HASH_FLAGS_SHA1: - hashsize = SHA1_DIGEST_SIZE; - break; - case HASH_FLAGS_SHA224: - hashsize = SHA224_DIGEST_SIZE; - break; - case HASH_FLAGS_SHA256: - hashsize = SHA256_DIGEST_SIZE; - break; - default: - return; - } + if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY)) + return stm32_hash_emptymsg_fallback(req); - for (i = 0; i < hashsize / sizeof(u32); i++) - hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev, - HASH_HREG(i))); + hashsize = crypto_ahash_digestsize(tfm); + + for (i = 0; i < hashsize / sizeof(u32); i++) { + if (hdev->pdata->ux500) + hash[i] = cpu_to_be32(stm32_hash_read(hdev, + HASH_UX500_HREG(i))); + else + hash[i] = cpu_to_be32(stm32_hash_read(hdev, + HASH_HREG(i))); + } } static int stm32_hash_finish(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + u32 reg; + + reg = stm32_hash_read(rctx->hdev, HASH_SR); + reg &= ~HASH_SR_OUTPUT_READY; + stm32_hash_write(rctx->hdev, HASH_SR, reg); if (!req->result) return -EINVAL; @@ -790,93 +997,101 @@ static int stm32_hash_finish(struct ahash_request *req) static void stm32_hash_finish_req(struct ahash_request *req, int err) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; struct stm32_hash_dev *hdev = rctx->hdev; + if (hdev->flags & HASH_FLAGS_DMA_ACTIVE) + state->flags |= HASH_FLAGS_DMA_ACTIVE; + else + state->flags &= ~HASH_FLAGS_DMA_ACTIVE; + if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { stm32_hash_copy_hash(req); err = stm32_hash_finish(req); - hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU | - HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY | - HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC | - HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL | - HASH_FLAGS_HMAC_KEY); - } else { - rctx->flags |= HASH_FLAGS_ERRORS; } - pm_runtime_mark_last_busy(hdev->dev); - pm_runtime_put_autosuspend(hdev->dev); + /* Finalized request mist be unprepared here */ + stm32_hash_unprepare_request(req); crypto_finalize_hash_request(hdev->engine, req, err); } -static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, - struct stm32_hash_request_ctx *rctx) -{ - pm_runtime_get_sync(hdev->dev); - - if (!(HASH_FLAGS_INIT & hdev->flags)) { - stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); - stm32_hash_write(hdev, HASH_STR, 0); - stm32_hash_write(hdev, HASH_DIN, 0); - stm32_hash_write(hdev, HASH_IMR, 0); - hdev->err = 0; - } - - return 0; -} - -static int stm32_hash_one_request(struct crypto_engine *engine, void *areq); -static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq); - static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev, struct ahash_request *req) { return crypto_transfer_hash_request_to_engine(hdev->engine, req); } -static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq) +static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) { struct ahash_request *req = container_of(areq, struct ahash_request, base); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - struct stm32_hash_request_ctx *rctx; + struct stm32_hash_state *state = &rctx->state; + int swap_reg; + int err = 0; if (!hdev) return -ENODEV; - hdev->req = req; - - rctx = ahash_request_ctx(req); - dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", rctx->op, req->nbytes); - return stm32_hash_hw_init(hdev, rctx); -} - -static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) -{ - struct ahash_request *req = container_of(areq, struct ahash_request, - base); - struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - struct stm32_hash_request_ctx *rctx; - int err = 0; + pm_runtime_get_sync(hdev->dev); - if (!hdev) - return -ENODEV; + err = stm32_hash_prepare_request(req); + if (err) + return err; hdev->req = req; + hdev->flags = 0; + swap_reg = hash_swap_reg(rctx); + + if (state->flags & HASH_FLAGS_INIT) { + u32 *preg = rctx->state.hw_context; + u32 reg; + int i; + + if (!hdev->pdata->ux500) + stm32_hash_write(hdev, HASH_IMR, *preg++); + stm32_hash_write(hdev, HASH_STR, *preg++); + stm32_hash_write(hdev, HASH_CR, *preg); + reg = *preg++ | HASH_CR_INIT; + stm32_hash_write(hdev, HASH_CR, reg); + + for (i = 0; i < swap_reg; i++) + stm32_hash_write(hdev, HASH_CSR(i), *preg++); + + hdev->flags |= HASH_FLAGS_INIT; - rctx = ahash_request_ctx(req); + if (state->flags & HASH_FLAGS_HMAC) + hdev->flags |= HASH_FLAGS_HMAC | + HASH_FLAGS_HMAC_KEY; + + if (state->flags & HASH_FLAGS_CPU) + hdev->flags |= HASH_FLAGS_CPU; + + if (state->flags & HASH_FLAGS_DMA_ACTIVE) + hdev->flags |= HASH_FLAGS_DMA_ACTIVE; + } if (rctx->op == HASH_OP_UPDATE) err = stm32_hash_update_req(hdev); else if (rctx->op == HASH_OP_FINAL) err = stm32_hash_final_req(hdev); + /* If we have an IRQ, wait for that, else poll for completion */ + if (err == -EINPROGRESS && hdev->polled) { + if (stm32_hash_wait_busy(hdev)) + err = -ETIMEDOUT; + else { + hdev->flags |= HASH_FLAGS_OUTPUT_READY; + err = 0; + } + } + if (err != -EINPROGRESS) /* done task will not finish it, so do it here */ stm32_hash_finish_req(req, err); @@ -884,137 +1099,374 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) return 0; } -static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) +static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx, + struct scatterlist *sg, int bs, + unsigned int new_len) { - struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct stm32_hash_dev *hdev = ctx->hdev; + struct stm32_hash_state *state = &rctx->state; + int pages; + void *buf; - rctx->op = op; + pages = get_order(new_len); - return stm32_hash_handle_queue(hdev, req); + buf = (void *)__get_free_pages(GFP_ATOMIC, pages); + if (!buf) { + pr_err("Couldn't allocate pages for unaligned cases.\n"); + return -ENOMEM; + } + + if (state->bufcnt) + memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt); + + scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset, + min(new_len, rctx->total) - state->bufcnt, 0); + sg_init_table(rctx->sgl, 1); + sg_set_buf(rctx->sgl, buf, new_len); + rctx->sg = rctx->sgl; + state->flags |= HASH_FLAGS_SGS_COPIED; + rctx->nents = 1; + rctx->offset += new_len - state->bufcnt; + state->bufcnt = 0; + rctx->total = new_len; + + return 0; } -static int stm32_hash_update(struct ahash_request *req) +static int stm32_hash_align_sgs(struct scatterlist *sg, + int nbytes, int bs, bool init, bool final, + struct stm32_hash_request_ctx *rctx) { - struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; + struct stm32_hash_dev *hdev = rctx->hdev; + struct scatterlist *sg_tmp = sg; + int offset = rctx->offset; + int new_len; + int n = 0; + int bufcnt = state->bufcnt; + bool secure_ctx = hdev->pdata->context_secured; + bool aligned = true; + + if (!sg || !sg->length || !nbytes) { + if (bufcnt) { + bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs; + sg_init_table(rctx->sgl, 1); + sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt); + rctx->sg = rctx->sgl; + rctx->nents = 1; + } - if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) return 0; + } - rctx->total = req->nbytes; - rctx->sg = req->src; - rctx->offset = 0; + new_len = nbytes; - if ((rctx->bufcnt + rctx->total < rctx->buflen)) { - stm32_hash_append_sg(rctx); + if (offset) + aligned = false; + + if (final) { + new_len = DIV_ROUND_UP(new_len, bs) * bs; + } else { + new_len = (new_len - 1) / bs * bs; // return n block - 1 block + + /* + * Context save in some version of HASH IP can only be done when the + * FIFO is ready to get a new block. This implies to send n block plus a + * 32 bit word in the first DMA send. + */ + if (init && secure_ctx) { + new_len += sizeof(u32); + if (unlikely(new_len > nbytes)) + new_len -= bs; + } + } + + if (!new_len) return 0; + + if (nbytes != new_len) + aligned = false; + + while (nbytes > 0 && sg_tmp) { + n++; + + if (bufcnt) { + if (!IS_ALIGNED(bufcnt, bs)) { + aligned = false; + break; + } + nbytes -= bufcnt; + bufcnt = 0; + if (!nbytes) + aligned = false; + + continue; + } + + if (offset < sg_tmp->length) { + if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { + aligned = false; + break; + } + + if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { + aligned = false; + break; + } + } + + if (offset) { + offset -= sg_tmp->length; + if (offset < 0) { + nbytes += offset; + offset = 0; + } + } else { + nbytes -= sg_tmp->length; + } + + sg_tmp = sg_next(sg_tmp); + + if (nbytes < 0) { + aligned = false; + break; + } } - return stm32_hash_enqueue(req, HASH_OP_UPDATE); + if (!aligned) + return stm32_hash_copy_sgs(rctx, sg, bs, new_len); + + rctx->total = new_len; + rctx->offset += new_len; + rctx->nents = n; + if (state->bufcnt) { + sg_init_table(rctx->sgl, 2); + sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt); + sg_chain(rctx->sgl, 2, sg); + rctx->sg = rctx->sgl; + } else { + rctx->sg = sg; + } + + return 0; } -static int stm32_hash_final(struct ahash_request *req) +static int stm32_hash_prepare_request(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_state *state = &rctx->state; + unsigned int nbytes; + int ret, hash_later, bs; + bool update = rctx->op & HASH_OP_UPDATE; + bool init = !(state->flags & HASH_FLAGS_INIT); + bool finup = state->flags & HASH_FLAGS_FINUP; + bool final = state->flags & HASH_FLAGS_FINAL; + + if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU) + return 0; - rctx->flags |= HASH_FLAGS_FINUP; + bs = crypto_ahash_blocksize(tfm); - return stm32_hash_enqueue(req, HASH_OP_FINAL); + nbytes = state->bufcnt; + + /* + * In case of update request nbytes must correspond to the content of the + * buffer + the offset minus the content of the request already in the + * buffer. + */ + if (update || finup) + nbytes += req->nbytes - rctx->offset; + + dev_dbg(hdev->dev, + "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n", + __func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt); + + if (!nbytes) + return 0; + + rctx->total = nbytes; + + if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) { + int len = bs - state->bufcnt % bs; + + if (len > req->nbytes) + len = req->nbytes; + scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src, + 0, len, 0); + state->bufcnt += len; + rctx->offset = len; + } + + /* copy buffer in a temporary one that is used for sg alignment */ + if (state->bufcnt) + memcpy(hdev->xmit_buf, state->buffer, state->bufcnt); + + ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx); + if (ret) + return ret; + + hash_later = nbytes - rctx->total; + if (hash_later < 0) + hash_later = 0; + + if (hash_later && hash_later <= state->blocklen) { + scatterwalk_map_and_copy(state->buffer, + req->src, + req->nbytes - hash_later, + hash_later, 0); + + state->bufcnt = hash_later; + } else { + state->bufcnt = 0; + } + + if (hash_later > state->blocklen) { + /* FIXME: add support of this case */ + pr_err("Buffer contains more than one block.\n"); + return -ENOMEM; + } + + rctx->total = min(nbytes, rctx->total); + + return 0; } -static int stm32_hash_finup(struct ahash_request *req) +static void stm32_hash_unprepare_request(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - int err1, err2; + u32 *preg = state->hw_context; + int swap_reg, i; - rctx->flags |= HASH_FLAGS_FINUP; + if (hdev->dma_lch) + dmaengine_terminate_sync(hdev->dma_lch); - if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) - rctx->flags &= ~HASH_FLAGS_CPU; + if (state->flags & HASH_FLAGS_SGS_COPIED) + free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length)); - err1 = stm32_hash_update(req); + rctx->sg = NULL; + rctx->offset = 0; - if (err1 == -EINPROGRESS || err1 == -EBUSY) - return err1; + state->flags &= ~(HASH_FLAGS_SGS_COPIED); - /* - * final() has to be always called to cleanup resources - * even if update() failed, except EINPROGRESS - */ - err2 = stm32_hash_final(req); + if (!(hdev->flags & HASH_FLAGS_INIT)) + goto pm_runtime; + + state->flags |= HASH_FLAGS_INIT; + + if (stm32_hash_wait_busy(hdev)) { + dev_warn(hdev->dev, "Wait busy failed."); + return; + } - return err1 ?: err2; + swap_reg = hash_swap_reg(rctx); + + if (!hdev->pdata->ux500) + *preg++ = stm32_hash_read(hdev, HASH_IMR); + *preg++ = stm32_hash_read(hdev, HASH_STR); + *preg++ = stm32_hash_read(hdev, HASH_CR); + for (i = 0; i < swap_reg; i++) + *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); + +pm_runtime: + pm_runtime_put_autosuspend(hdev->dev); } -static int stm32_hash_digest(struct ahash_request *req) +static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) { - return stm32_hash_init(req) ?: stm32_hash_finup(req); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct stm32_hash_dev *hdev = ctx->hdev; + + rctx->op = op; + + return stm32_hash_handle_queue(hdev, req); } -static int stm32_hash_export(struct ahash_request *req, void *out) +static int stm32_hash_update(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - u32 *preg; - unsigned int i; + struct stm32_hash_state *state = &rctx->state; - pm_runtime_get_sync(hdev->dev); + if (!req->nbytes) + return 0; - while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) - cpu_relax(); - rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER, - sizeof(u32), - GFP_KERNEL); + if (state->flags & HASH_FLAGS_CPU) { + rctx->total = req->nbytes; + rctx->sg = req->src; + rctx->offset = 0; - preg = rctx->hw_context; + if ((state->bufcnt + rctx->total < state->blocklen)) { + stm32_hash_append_sg(rctx); + return 0; + } + } else { /* DMA mode */ + if (state->bufcnt + req->nbytes <= state->blocklen) { + scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src, + 0, req->nbytes, 0); + state->bufcnt += req->nbytes; + return 0; + } + } - *preg++ = stm32_hash_read(hdev, HASH_IMR); - *preg++ = stm32_hash_read(hdev, HASH_STR); - *preg++ = stm32_hash_read(hdev, HASH_CR); - for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) - *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); + return stm32_hash_enqueue(req, HASH_OP_UPDATE); +} - pm_runtime_mark_last_busy(hdev->dev); - pm_runtime_put_autosuspend(hdev->dev); +static int stm32_hash_final(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; - memcpy(out, rctx, sizeof(*rctx)); + state->flags |= HASH_FLAGS_FINAL; - return 0; + return stm32_hash_enqueue(req, HASH_OP_FINAL); } -static int stm32_hash_import(struct ahash_request *req, const void *in) +static int stm32_hash_finup(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - const u32 *preg = in; - u32 reg; - unsigned int i; + struct stm32_hash_state *state = &rctx->state; - memcpy(rctx, in, sizeof(*rctx)); + if (!req->nbytes) + goto out; - preg = rctx->hw_context; + state->flags |= HASH_FLAGS_FINUP; - pm_runtime_get_sync(hdev->dev); + if ((state->flags & HASH_FLAGS_CPU)) { + rctx->total = req->nbytes; + rctx->sg = req->src; + rctx->offset = 0; + } - stm32_hash_write(hdev, HASH_IMR, *preg++); - stm32_hash_write(hdev, HASH_STR, *preg++); - stm32_hash_write(hdev, HASH_CR, *preg); - reg = *preg++ | HASH_CR_INIT; - stm32_hash_write(hdev, HASH_CR, reg); +out: + return stm32_hash_final(req); +} - for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) - stm32_hash_write(hdev, HASH_CSR(i), *preg++); +static int stm32_hash_digest(struct ahash_request *req) +{ + return stm32_hash_init(req) ?: stm32_hash_finup(req); +} - pm_runtime_mark_last_busy(hdev->dev); - pm_runtime_put_autosuspend(hdev->dev); +static int stm32_hash_export(struct ahash_request *req, void *out) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + + memcpy(out, &rctx->state, sizeof(rctx->state)); + + return 0; +} + +static int stm32_hash_import(struct ahash_request *req, const void *in) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - kfree(rctx->hw_context); + stm32_hash_init(req); + memcpy(&rctx->state, in, sizeof(rctx->state)); return 0; } @@ -1034,8 +1486,30 @@ static int stm32_hash_setkey(struct crypto_ahash *tfm, return 0; } -static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, - const char *algs_hmac_name) +static int stm32_hash_init_fallback(struct crypto_tfm *tfm) +{ + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + const char *name = crypto_tfm_alg_name(tfm); + struct crypto_shash *xtfm; + + /* The fallback is only needed on Ux500 */ + if (!hdev->pdata->ux500) + return 0; + + xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(xtfm)) { + dev_err(hdev->dev, "failed to allocate %s fallback\n", + name); + return PTR_ERR(xtfm); + } + dev_info(hdev->dev, "allocated %s fallback\n", name); + ctx->xtfm = xtfm; + + return 0; +} + +static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags) { struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1044,54 +1518,48 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, ctx->keylen = 0; - if (algs_hmac_name) - ctx->flags |= HASH_FLAGS_HMAC; + if (algs_flags) + ctx->flags |= algs_flags; - ctx->enginectx.op.do_one_request = stm32_hash_one_request; - ctx->enginectx.op.prepare_request = stm32_hash_prepare_req; - ctx->enginectx.op.unprepare_request = NULL; - return 0; + return stm32_hash_init_fallback(tfm); } static int stm32_hash_cra_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, NULL); + return stm32_hash_cra_init_algs(tfm, 0); } -static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm) +static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "md5"); + return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC); } -static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm) +static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "sha1"); + return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE); } -static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm) +static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "sha224"); + return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE | + HASH_FLAGS_HMAC); } -static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) +static void stm32_hash_cra_exit(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "sha256"); + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->xtfm) + crypto_free_shash(ctx->xtfm); } static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) { struct stm32_hash_dev *hdev = dev_id; - if (HASH_FLAGS_CPU & hdev->flags) { - if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { - hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; - goto finish; - } - } else if (HASH_FLAGS_DMA_READY & hdev->flags) { - if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { - hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; - goto finish; - } + if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; + goto finish; } return IRQ_HANDLED; @@ -1110,8 +1578,6 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) reg = stm32_hash_read(hdev, HASH_SR); if (reg & HASH_SR_OUTPUT_READY) { - reg &= ~HASH_SR_OUTPUT_READY; - stm32_hash_write(hdev, HASH_SR, reg); hdev->flags |= HASH_FLAGS_OUTPUT_READY; /* Disable IT*/ stm32_hash_write(hdev, HASH_IMR, 0); @@ -1121,18 +1587,18 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) return IRQ_NONE; } -static struct ahash_alg algs_md5_sha1[] = { +static struct ahash_engine_alg algs_md5[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = MD5_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "md5", .cra_driver_name = "stm32-md5", @@ -1141,24 +1607,27 @@ static struct ahash_alg algs_md5_sha1[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = MD5_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(md5)", .cra_driver_name = "stm32-hmac-md5", @@ -1167,23 +1636,29 @@ static struct ahash_alg algs_md5_sha1[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, - .cra_init = stm32_hash_cra_md5_init, + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } - }, + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + } +}; + +static struct ahash_engine_alg algs_sha1[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha1", .cra_driver_name = "stm32-sha1", @@ -1192,24 +1667,27 @@ static struct ahash_alg algs_md5_sha1[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "stm32-hmac-sha1", @@ -1218,26 +1696,29 @@ static struct ahash_alg algs_md5_sha1[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, - .cra_init = stm32_hash_cra_sha1_init, + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, }; -static struct ahash_alg algs_sha224_sha256[] = { +static struct ahash_engine_alg algs_sha224[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha224", .cra_driver_name = "stm32-sha224", @@ -1246,24 +1727,27 @@ static struct ahash_alg algs_sha224_sha256[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .setkey = stm32_hash_setkey, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.setkey = stm32_hash_setkey, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha224)", .cra_driver_name = "stm32-hmac-sha224", @@ -1272,23 +1756,29 @@ static struct ahash_alg algs_sha224_sha256[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, - .cra_init = stm32_hash_cra_sha224_init, + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, +}; + +static struct ahash_engine_alg algs_sha256[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha256", .cra_driver_name = "stm32-sha256", @@ -1297,24 +1787,27 @@ static struct ahash_alg algs_sha224_sha256[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct stm32_hash_request_ctx), + .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "stm32-hmac-sha256", @@ -1323,12 +1816,363 @@ static struct ahash_alg algs_sha224_sha256[] = { CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), - .cra_alignmask = 3, - .cra_init = stm32_hash_cra_sha256_init, + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, +}; + +static struct ahash_engine_alg algs_sha384_sha512[] = { + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "stm32-sha384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.setkey = stm32_hash_setkey, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "stm32-hmac-sha384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "stm32-sha512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "stm32-hmac-sha512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, +}; + +static struct ahash_engine_alg algs_sha3[] = { + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA3_224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-224", + .cra_driver_name = "stm32-sha3-224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { + .digestsize = SHA3_224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-224)", + .cra_driver_name = "stm32-hmac-sha3-224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA3_256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-256", + .cra_driver_name = "stm32-sha3-256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { + .digestsize = SHA3_256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-256)", + .cra_driver_name = "stm32-hmac-sha3-256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA3_384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-384", + .cra_driver_name = "stm32-sha3-384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { + .digestsize = SHA3_384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-384)", + .cra_driver_name = "stm32-hmac-sha3-384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { + .digestsize = SHA3_512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-512", + .cra_driver_name = "stm32-sha3-512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + }, + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { + .digestsize = SHA3_512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-512)", + .cra_driver_name = "stm32-hmac-sha3-512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, + } }; static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) @@ -1338,7 +2182,7 @@ static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) for (i = 0; i < hdev->pdata->algs_info_size; i++) { for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { - err = crypto_register_ahash( + err = crypto_engine_register_ahash( &hdev->pdata->algs_info[i].algs_list[j]); if (err) goto err_algs; @@ -1350,7 +2194,7 @@ err_algs: dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); for (; i--; ) { for (; j--;) - crypto_unregister_ahash( + crypto_engine_unregister_ahash( &hdev->pdata->algs_info[i].algs_list[j]); } @@ -1363,50 +2207,115 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) for (i = 0; i < hdev->pdata->algs_info_size; i++) { for (j = 0; j < hdev->pdata->algs_info[i].size; j++) - crypto_unregister_ahash( + crypto_engine_unregister_ahash( &hdev->pdata->algs_info[i].algs_list[j]); } return 0; } +static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = { + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), + }, + { + .algs_list = algs_sha256, + .size = ARRAY_SIZE(algs_sha256), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = { + .alg_shift = 7, + .algs_info = stm32_hash_algs_info_ux500, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500), + .broken_emptymsg = true, + .ux500 = true, +}; + static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { { - .algs_list = algs_md5_sha1, - .size = ARRAY_SIZE(algs_md5_sha1), + .algs_list = algs_md5, + .size = ARRAY_SIZE(algs_md5), + }, + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { + .alg_shift = 7, .algs_info = stm32_hash_algs_info_stm32f4, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), + .has_sr = true, + .has_mdmat = true, }; static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { { - .algs_list = algs_md5_sha1, - .size = ARRAY_SIZE(algs_md5_sha1), + .algs_list = algs_md5, + .size = ARRAY_SIZE(algs_md5), + }, + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), }, { - .algs_list = algs_sha224_sha256, - .size = ARRAY_SIZE(algs_sha224_sha256), + .algs_list = algs_sha224, + .size = ARRAY_SIZE(algs_sha224), + }, + { + .algs_list = algs_sha256, + .size = ARRAY_SIZE(algs_sha256), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { + .alg_shift = 7, .algs_info = stm32_hash_algs_info_stm32f7, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), + .has_sr = true, + .has_mdmat = true, }; -static const struct of_device_id stm32_hash_of_match[] = { +static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = { { - .compatible = "st,stm32f456-hash", - .data = &stm32_hash_pdata_stm32f4, + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), }, { - .compatible = "st,stm32f756-hash", - .data = &stm32_hash_pdata_stm32f7, + .algs_list = algs_sha224, + .size = ARRAY_SIZE(algs_sha224), }, + { + .algs_list = algs_sha256, + .size = ARRAY_SIZE(algs_sha256), + }, + { + .algs_list = algs_sha384_sha512, + .size = ARRAY_SIZE(algs_sha384_sha512), + }, + { + .algs_list = algs_sha3, + .size = ARRAY_SIZE(algs_sha3), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = { + .alg_shift = 17, + .algs_info = stm32_hash_algs_info_stm32mp13, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13), + .has_sr = true, + .has_mdmat = true, + .context_secured = true, +}; + +static const struct of_device_id stm32_hash_of_match[] = { + { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 }, + { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 }, + { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 }, + { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 }, {}, }; @@ -1421,12 +2330,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, return -EINVAL; } - if (of_property_read_u32(dev->of_node, "dma-maxburst", - &hdev->dma_maxburst)) { - dev_info(dev, "dma-maxburst not specified, using 0\n"); - hdev->dma_maxburst = 0; - } - return 0; } @@ -1441,8 +2344,7 @@ static int stm32_hash_probe(struct platform_device *pdev) if (!hdev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdev->io_base = devm_ioremap_resource(dev, res); + hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hdev->io_base)) return PTR_ERR(hdev->io_base); @@ -1452,16 +2354,23 @@ static int stm32_hash_probe(struct platform_device *pdev) if (ret) return ret; - irq = platform_get_irq(pdev, 0); - if (irq < 0) + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) return irq; - ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, - stm32_hash_irq_thread, IRQF_ONESHOT, - dev_name(dev), hdev); - if (ret) { - dev_err(dev, "Cannot grab IRQ\n"); - return ret; + if (irq > 0) { + ret = devm_request_threaded_irq(dev, irq, + stm32_hash_irq_handler, + stm32_hash_irq_thread, + IRQF_ONESHOT, + dev_name(dev), hdev); + if (ret) { + dev_err(dev, "Cannot grab IRQ\n"); + return ret; + } + } else { + dev_info(dev, "No IRQ, use polling mode\n"); + hdev->polled = true; } hdev->clk = devm_clk_get(&pdev->dev, NULL); @@ -1503,9 +2412,11 @@ static int stm32_hash_probe(struct platform_device *pdev) case 0: break; case -ENOENT: - dev_dbg(dev, "DMA mode not available\n"); + case -ENODEV: + dev_info(dev, "DMA mode not available\n"); break; default: + dev_err(dev, "DMA init error %d\n", ret); goto err_dma; } @@ -1524,7 +2435,11 @@ static int stm32_hash_probe(struct platform_device *pdev) if (ret) goto err_engine_start; - hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); + if (hdev->pdata->ux500) + /* FIXME: implement DMA mode for Ux500 */ + hdev->dma_mode = 0; + else + hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK; /* Register algos */ ret = stm32_hash_register_algs(hdev); @@ -1557,18 +2472,12 @@ err_reset: return ret; } -static int stm32_hash_remove(struct platform_device *pdev) +static void stm32_hash_remove(struct platform_device *pdev) { - struct stm32_hash_dev *hdev; + struct stm32_hash_dev *hdev = platform_get_drvdata(pdev); int ret; - hdev = platform_get_drvdata(pdev); - if (!hdev) - return -ENODEV; - - ret = pm_runtime_resume_and_get(hdev->dev); - if (ret < 0) - return ret; + ret = pm_runtime_get_sync(hdev->dev); stm32_hash_unregister_algs(hdev); @@ -1584,9 +2493,8 @@ static int stm32_hash_remove(struct platform_device *pdev) pm_runtime_disable(hdev->dev); pm_runtime_put_noidle(hdev->dev); - clk_disable_unprepare(hdev->clk); - - return 0; + if (ret >= 0) + clk_disable_unprepare(hdev->clk); } #ifdef CONFIG_PM @@ -1633,6 +2541,6 @@ static struct platform_driver stm32_hash_driver = { module_platform_driver(stm32_hash_driver); -MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver"); +MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver"); MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>"); MODULE_LICENSE("GPL v2"); |
