diff options
Diffstat (limited to 'drivers/crypto/stm32/stm32-cryp.c')
| -rw-r--r-- | drivers/crypto/stm32/stm32-cryp.c | 2491 |
1 files changed, 1576 insertions, 915 deletions
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 23b0b7bd64c7..5e82e8a1f71a 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -1,24 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) STMicroelectronics SA 2017 * Author: Fabien Dessenne <fabien.dessenne@st.com> - * License terms: GNU General Public License (GPL), version 2 + * Ux500 support taken from snippets in the old Ux500 cryp driver */ +#include <crypto/aes.h> +#include <crypto/engine.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/des.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <linux/bottom_half.h> #include <linux/clk.h> #include <linux/delay.h> -#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/err.h> #include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> - -#include <crypto/aes.h> -#include <crypto/des.h> -#include <crypto/engine.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/aead.h> +#include <linux/string.h> #define DRIVER_NAME "stm32-cryp" @@ -36,7 +43,8 @@ /* Mode mask = bits [15..0] */ #define FLG_MODE_MASK GENMASK(15, 0) /* Bit [31..16] status */ -#define FLG_CCM_PADDED_WA BIT(16) +#define FLG_IN_OUT_DMA BIT(16) +#define FLG_HEADER_DMA BIT(17) /* Registers */ #define CRYP_CR 0x00000000 @@ -62,6 +70,29 @@ #define CRYP_CSGCMCCM0R 0x00000050 #define CRYP_CSGCM0R 0x00000070 +#define UX500_CRYP_CR 0x00000000 +#define UX500_CRYP_SR 0x00000004 +#define UX500_CRYP_DIN 0x00000008 +#define UX500_CRYP_DINSIZE 0x0000000C +#define UX500_CRYP_DOUT 0x00000010 +#define UX500_CRYP_DOUSIZE 0x00000014 +#define UX500_CRYP_DMACR 0x00000018 +#define UX500_CRYP_IMSC 0x0000001C +#define UX500_CRYP_RIS 0x00000020 +#define UX500_CRYP_MIS 0x00000024 +#define UX500_CRYP_K1L 0x00000028 +#define UX500_CRYP_K1R 0x0000002C +#define UX500_CRYP_K2L 0x00000030 +#define UX500_CRYP_K2R 0x00000034 +#define UX500_CRYP_K3L 0x00000038 +#define UX500_CRYP_K3R 0x0000003C +#define UX500_CRYP_K4L 0x00000040 +#define UX500_CRYP_K4R 0x00000044 +#define UX500_CRYP_IV0L 0x00000048 +#define UX500_CRYP_IV0R 0x0000004C +#define UX500_CRYP_IV1L 0x00000050 +#define UX500_CRYP_IV1R 0x00000054 + /* Registers values */ #define CR_DEC_NOT_ENC 0x00000004 #define CR_TDES_ECB 0x00000000 @@ -71,7 +102,8 @@ #define CR_AES_ECB 0x00000020 #define CR_AES_CBC 0x00000028 #define CR_AES_CTR 0x00000030 -#define CR_AES_KP 0x00000038 +#define CR_AES_KP 0x00000038 /* Not on Ux500 */ +#define CR_AES_XTS 0x00000038 /* Only on Ux500 */ #define CR_AES_GCM 0x00080000 #define CR_AES_CCM 0x00080008 #define CR_AES_UNKNOWN 0xFFFFFFFF @@ -83,6 +115,8 @@ #define CR_KEY128 0x00000000 #define CR_KEY192 0x00000100 #define CR_KEY256 0x00000200 +#define CR_KEYRDEN 0x00000400 /* Only on Ux500 */ +#define CR_KSE 0x00000800 /* Only on Ux500 */ #define CR_FFLUSH 0x00004000 #define CR_CRYPEN 0x00008000 #define CR_PH_INIT 0x00000000 @@ -92,8 +126,12 @@ #define CR_PH_MASK 0x00030000 #define CR_NBPBL_SHIFT 20 -#define SR_BUSY 0x00000010 -#define SR_OFNE 0x00000004 +#define SR_IFNF BIT(1) +#define SR_OFNE BIT(2) +#define SR_BUSY BIT(8) + +#define DMACR_DIEN BIT(0) +#define DMACR_DOEN BIT(1) #define IMSCR_IN BIT(0) #define IMSCR_OUT BIT(1) @@ -104,20 +142,43 @@ /* Misc */ #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) #define GCM_CTR_INIT 2 -#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) -#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) -#define CRYP_AUTOSUSPEND_DELAY 50 +#define CRYP_AUTOSUSPEND_DELAY 50 + +#define CRYP_DMA_BURST_REG 4 + +enum stm32_dma_mode { + NO_DMA, + DMA_PLAIN_SG, + DMA_NEED_SG_TRUNC +}; struct stm32_cryp_caps { - bool swap_final; - bool padding_wa; + bool aeads_support; + bool linear_aes_key; + bool kp_mode; + bool iv_protection; + bool swap_final; + bool padding_wa; + u32 cr; + u32 sr; + u32 din; + u32 dout; + u32 dmacr; + u32 imsc; + u32 mis; + u32 k1l; + u32 k1r; + u32 k3r; + u32 iv0l; + u32 iv0r; + u32 iv1l; + u32 iv1r; }; struct stm32_cryp_ctx { - struct crypto_engine_ctx enginectx; struct stm32_cryp *cryp; int keylen; - u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + __be32 key[AES_KEYSIZE_256 / sizeof(u32)]; unsigned long flags; }; @@ -129,6 +190,7 @@ struct stm32_cryp { struct list_head list; struct device *dev; void __iomem *regs; + phys_addr_t phys_base; struct clk *clk; unsigned long flags; u32 irq_status; @@ -137,33 +199,34 @@ struct stm32_cryp { struct crypto_engine *engine; - struct mutex lock; /* protects req / areq */ - struct ablkcipher_request *req; + struct skcipher_request *req; struct aead_request *areq; size_t authsize; size_t hw_blocksize; - size_t total_in; - size_t total_in_save; - size_t total_out; - size_t total_out_save; + size_t payload_in; + size_t header_in; + size_t payload_out; + /* DMA process fields */ struct scatterlist *in_sg; + struct scatterlist *header_sg; struct scatterlist *out_sg; - struct scatterlist *out_sg_save; - - struct scatterlist in_sgl; - struct scatterlist out_sgl; - bool sgs_copied; + size_t in_sg_len; + size_t header_sg_len; + size_t out_sg_len; + struct completion dma_completion; - int in_sg_len; - int out_sg_len; + struct dma_chan *dma_lch_in; + struct dma_chan *dma_lch_out; + enum stm32_dma_mode dma_mode; + /* IT process fields */ struct scatter_walk in_walk; struct scatter_walk out_walk; - u32 last_ctr[4]; + __be32 last_ctr[4]; u32 gcm_ctr; }; @@ -241,27 +304,59 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status, + return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status, !(status & SR_BUSY), 10, 100000); } +static inline void stm32_cryp_enable(struct stm32_cryp *cryp) +{ + writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_CRYPEN, + cryp->regs + cryp->caps->cr); +} + static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status, + return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->cr, status, !(status & CR_CRYPEN), 10, 100000); } +static inline int stm32_cryp_wait_input(struct stm32_cryp *cryp) +{ + u32 status; + + return readl_relaxed_poll_timeout_atomic(cryp->regs + cryp->caps->sr, status, + status & SR_IFNF, 1, 10); +} + static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status, - status & SR_OFNE, 10, 100000); + return readl_relaxed_poll_timeout_atomic(cryp->regs + cryp->caps->sr, status, + status & SR_OFNE, 1, 10); +} + +static inline void stm32_cryp_key_read_enable(struct stm32_cryp *cryp) +{ + writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_KEYRDEN, + cryp->regs + cryp->caps->cr); +} + +static inline void stm32_cryp_key_read_disable(struct stm32_cryp *cryp) +{ + writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) & ~CR_KEYRDEN, + cryp->regs + cryp->caps->cr); } +static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp); +static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp); +static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp); static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); +static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); +static int stm32_cryp_dma_start(struct stm32_cryp *cryp); +static int stm32_cryp_it_start(struct stm32_cryp *cryp); static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) { @@ -283,114 +378,124 @@ static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) return cryp; } -static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total, - size_t align) +static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv) { - int len = 0; - - if (!total) - return 0; - - if (!IS_ALIGNED(total, align)) - return -EINVAL; - - while (sg) { - if (!IS_ALIGNED(sg->offset, sizeof(u32))) - return -EINVAL; + if (!iv) + return; - if (!IS_ALIGNED(sg->length, align)) - return -EINVAL; + stm32_cryp_write(cryp, cryp->caps->iv0l, be32_to_cpu(*iv++)); + stm32_cryp_write(cryp, cryp->caps->iv0r, be32_to_cpu(*iv++)); - len += sg->length; - sg = sg_next(sg); + if (is_aes(cryp)) { + stm32_cryp_write(cryp, cryp->caps->iv1l, be32_to_cpu(*iv++)); + stm32_cryp_write(cryp, cryp->caps->iv1r, be32_to_cpu(*iv++)); } - - if (len != total) - return -EINVAL; - - return 0; -} - -static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp) -{ - int ret; - - ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in, - cryp->hw_blocksize); - if (ret) - return ret; - - ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out, - cryp->hw_blocksize); - - return ret; } -static void sg_copy_buf(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out) +static void stm32_cryp_get_iv(struct stm32_cryp *cryp) { - struct scatter_walk walk; + struct skcipher_request *req = cryp->req; + __be32 *tmp = (void *)req->iv; - if (!nbytes) + if (!tmp) return; - scatterwalk_start(&walk, sg); - scatterwalk_advance(&walk, start); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); -} - -static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp) -{ - void *buf_in, *buf_out; - int pages, total_in, total_out; - - if (!stm32_cryp_check_io_aligned(cryp)) { - cryp->sgs_copied = 0; - return 0; - } + if (cryp->caps->iv_protection) + stm32_cryp_key_read_enable(cryp); - total_in = ALIGN(cryp->total_in, cryp->hw_blocksize); - pages = total_in ? get_order(total_in) : 1; - buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l)); + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r)); - total_out = ALIGN(cryp->total_out, cryp->hw_blocksize); - pages = total_out ? get_order(total_out) : 1; - buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); - - if (!buf_in || !buf_out) { - dev_err(cryp->dev, "Can't allocate pages when unaligned\n"); - cryp->sgs_copied = 0; - return -EFAULT; + if (is_aes(cryp)) { + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l)); + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r)); } - sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0); - - sg_init_one(&cryp->in_sgl, buf_in, total_in); - cryp->in_sg = &cryp->in_sgl; - cryp->in_sg_len = 1; - - sg_init_one(&cryp->out_sgl, buf_out, total_out); - cryp->out_sg_save = cryp->out_sg; - cryp->out_sg = &cryp->out_sgl; - cryp->out_sg_len = 1; - - cryp->sgs_copied = 1; - - return 0; -} - -static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv) -{ - if (!iv) - return; - - stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++)); - stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++)); - - if (is_aes(cryp)) { - stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++)); - stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++)); + if (cryp->caps->iv_protection) + stm32_cryp_key_read_disable(cryp); +} + +/** + * ux500_swap_bits_in_byte() - mirror the bits in a byte + * @b: the byte to be mirrored + * + * The bits are swapped the following way: + * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and + * nibble 2 (n2) bits 4-7. + * + * Nibble 1 (n1): + * (The "old" (moved) bit is replaced with a zero) + * 1. Move bit 6 and 7, 4 positions to the left. + * 2. Move bit 3 and 5, 2 positions to the left. + * 3. Move bit 1-4, 1 position to the left. + * + * Nibble 2 (n2): + * 1. Move bit 0 and 1, 4 positions to the right. + * 2. Move bit 2 and 4, 2 positions to the right. + * 3. Move bit 3-6, 1 position to the right. + * + * Combine the two nibbles to a complete and swapped byte. + */ +static inline u8 ux500_swap_bits_in_byte(u8 b) +{ +#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */ +#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5, + right shift 2 */ +#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4, + right shift 1 */ +#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */ +#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4, + left shift 2 */ +#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6, + left shift 1 */ + + u8 n1; + u8 n2; + + /* Swap most significant nibble */ + /* Right shift 4, bits 6 and 7 */ + n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); + /* Right shift 2, bits 3 and 5 */ + n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); + /* Right shift 1, bits 1-4 */ + n1 = (n1 & R_SHIFT_1_MASK) >> 1; + + /* Swap least significant nibble */ + /* Left shift 4, bits 0 and 1 */ + n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); + /* Left shift 2, bits 2 and 4 */ + n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); + /* Left shift 1, bits 3-6 */ + n2 = (n2 & L_SHIFT_1_MASK) << 1; + + return n1 | n2; +} + +/** + * ux500_swizzle_key() - Shuffle around words and bits in the AES key + * @in: key to swizzle + * @out: swizzled key + * @len: length of key, in bytes + * + * This "key swizzling procedure" is described in the examples in the + * DB8500 design specification. There is no real description of why + * the bits have been arranged like this in the hardware. + */ +static inline void ux500_swizzle_key(const u8 *in, u8 *out, u32 len) +{ + int i = 0; + int bpw = sizeof(u32); + int j; + int index = 0; + + j = len - bpw; + while (j >= 0) { + for (i = 0; i < bpw; i++) { + index = len - j - bpw + i; + out[j + i] = + ux500_swap_bits_in_byte(in[index]); + } + j -= bpw; } } @@ -400,14 +505,33 @@ static void stm32_cryp_hw_write_key(struct stm32_cryp *c) int r_id; if (is_des(c)) { - stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0])); - stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1])); - } else { - r_id = CRYP_K3RR; - for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) - stm32_cryp_write(c, r_id, - cpu_to_be32(c->ctx->key[i - 1])); + stm32_cryp_write(c, c->caps->k1l, be32_to_cpu(c->ctx->key[0])); + stm32_cryp_write(c, c->caps->k1r, be32_to_cpu(c->ctx->key[1])); + return; + } + + /* + * On the Ux500 the AES key is considered as a single bit sequence + * of 128, 192 or 256 bits length. It is written linearly into the + * registers from K1L and down, and need to be processed to become + * a proper big-endian bit sequence. + */ + if (is_aes(c) && c->caps->linear_aes_key) { + u32 tmpkey[8]; + + ux500_swizzle_key((u8 *)c->ctx->key, + (u8 *)tmpkey, c->ctx->keylen); + + r_id = c->caps->k1l; + for (i = 0; i < c->ctx->keylen / sizeof(u32); i++, r_id += 4) + stm32_cryp_write(c, r_id, tmpkey[i]); + + return; } + + r_id = c->caps->k3r; + for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) + stm32_cryp_write(c, r_id, be32_to_cpu(c->ctx->key[i - 1])); } static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp) @@ -452,7 +576,7 @@ static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp) static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) { int ret; - u32 iv[4]; + __be32 iv[4]; /* Phase 1 : init */ memcpy(iv, cryp->areq->iv, 12); @@ -460,20 +584,103 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) cryp->gcm_ctr = GCM_CTR_INIT; stm32_cryp_hw_write_iv(cryp, iv); - stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN); + stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN); /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); - if (ret) + if (ret) { dev_err(cryp->dev, "Timeout (gcm init)\n"); + return ret; + } - return ret; + /* Prepare next phase */ + if (cryp->areq->assoclen) { + cfg |= CR_PH_HEADER; + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + } else if (stm32_cryp_get_input_text_len(cryp)) { + cfg |= CR_PH_PAYLOAD; + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + } + + return 0; +} + +static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) +{ + u32 cfg; + int err; + + /* Check if whole header written */ + if (!cryp->header_in) { + /* Wait for completion */ + err = stm32_cryp_wait_busy(cryp); + if (err) { + dev_err(cryp->dev, "Timeout (gcm/ccm header)\n"); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); + stm32_cryp_finish_req(cryp, err); + return; + } + + if (stm32_cryp_get_input_text_len(cryp)) { + /* Phase 3 : payload */ + cfg = stm32_cryp_read(cryp, cryp->caps->cr); + cfg &= ~CR_CRYPEN; + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + + cfg &= ~CR_PH_MASK; + cfg |= CR_PH_PAYLOAD | CR_CRYPEN; + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + } else { + /* + * Phase 4 : tag. + * Nothing to read, nothing to write, caller have to + * end request + */ + } + } +} + +static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) +{ + size_t written; + size_t len; + u32 alen = cryp->areq->assoclen; + u32 block[AES_BLOCK_32] = {0}; + u8 *b8 = (u8 *)block; + + if (alen <= 65280) { + /* Write first u32 of B1 */ + b8[0] = (alen >> 8) & 0xFF; + b8[1] = alen & 0xFF; + len = 2; + } else { + /* Build the two first u32 of B1 */ + b8[0] = 0xFF; + b8[1] = 0xFE; + b8[2] = (alen & 0xFF000000) >> 24; + b8[3] = (alen & 0x00FF0000) >> 16; + b8[4] = (alen & 0x0000FF00) >> 8; + b8[5] = alen & 0x000000FF; + len = 6; + } + + written = min_t(size_t, AES_BLOCK_SIZE - len, alen); + + memcpy_from_scatterwalk((char *)block + len, &cryp->in_walk, written); + + writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); + + cryp->header_in -= written; + + stm32_crypt_gcmccm_end_header(cryp); } static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) { int ret; - u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE]; + u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32]; + u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32; + __be32 *bd; u32 *d; unsigned int i, textlen; @@ -481,7 +688,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); iv[AES_BLOCK_SIZE - 1] = 1; - stm32_cryp_hw_write_iv(cryp, (u32 *)iv); + stm32_cryp_hw_write_iv(cryp, (__be32 *)iv); /* Build B0 */ memcpy(b0, iv, AES_BLOCK_SIZE); @@ -497,23 +704,40 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF; /* Enable HW */ - stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN); + stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN); /* Write B0 */ d = (u32 *)b0; + bd = (__be32 *)b0; for (i = 0; i < AES_BLOCK_32; i++) { + u32 xd = d[i]; + if (!cryp->caps->padding_wa) - *d = cpu_to_be32(*d); - stm32_cryp_write(cryp, CRYP_DIN, *d++); + xd = be32_to_cpu(bd[i]); + stm32_cryp_write(cryp, cryp->caps->din, xd); } /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); - if (ret) + if (ret) { dev_err(cryp->dev, "Timeout (ccm init)\n"); + return ret; + } - return ret; + /* Prepare next phase */ + if (cryp->areq->assoclen) { + cfg |= CR_PH_HEADER | CR_CRYPEN; + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + + /* Write first (special) block (may move to next phase [payload]) */ + stm32_cryp_write_ccm_first_header(cryp); + } else if (stm32_cryp_get_input_text_len(cryp)) { + cfg |= CR_PH_PAYLOAD; + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + } + + return 0; } static int stm32_cryp_hw_init(struct stm32_cryp *cryp) @@ -524,10 +748,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) pm_runtime_get_sync(cryp->dev); /* Disable interrupt */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - - /* Set key */ - stm32_cryp_hw_write_key(cryp); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); /* Set configuration */ cfg = CR_DATA8 | CR_FFLUSH; @@ -554,23 +775,41 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) /* AES ECB/CBC decrypt: run key preparation first */ if (is_decrypt(cryp) && ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) { - stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN); + /* Configure in key preparation mode */ + if (cryp->caps->kp_mode) + stm32_cryp_write(cryp, cryp->caps->cr, + cfg | CR_AES_KP); + else + stm32_cryp_write(cryp, + cryp->caps->cr, cfg | CR_AES_ECB | CR_KSE); + + /* Set key only after full configuration done */ + stm32_cryp_hw_write_key(cryp); + /* Start prepare key */ + stm32_cryp_enable(cryp); /* Wait for end of processing */ ret = stm32_cryp_wait_busy(cryp); if (ret) { dev_err(cryp->dev, "Timeout (key preparation)\n"); return ret; } - } - cfg |= hw_mode; + cfg |= hw_mode | CR_DEC_NOT_ENC; + + /* Apply updated config (Decrypt + algo) and flush */ + stm32_cryp_write(cryp, cryp->caps->cr, cfg); + } else { + cfg |= hw_mode; + if (is_decrypt(cryp)) + cfg |= CR_DEC_NOT_ENC; - if (is_decrypt(cryp)) - cfg |= CR_DEC_NOT_ENC; + /* Apply config and flush */ + stm32_cryp_write(cryp, cryp->caps->cr, cfg); - /* Apply config and flush (valid when CRYPEN = 0) */ - stm32_cryp_write(cryp, CRYP_CR, cfg); + /* Set key only after configuration done */ + stm32_cryp_hw_write_key(cryp); + } switch (hw_mode) { case CR_AES_GCM: @@ -584,23 +823,13 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) if (ret) return ret; - /* Phase 2 : header (authenticated data) */ - if (cryp->areq->assoclen) { - cfg |= CR_PH_HEADER; - } else if (stm32_cryp_get_input_text_len(cryp)) { - cfg |= CR_PH_PAYLOAD; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - cfg |= CR_PH_INIT; - } - break; case CR_DES_CBC: case CR_TDES_CBC: case CR_AES_CBC: case CR_AES_CTR: - stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info); + stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->req->iv); break; default: @@ -608,11 +837,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) } /* Enable now */ - cfg |= CR_CRYPEN; - - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cryp->flags &= ~FLG_CCM_PADDED_WA; + stm32_cryp_enable(cryp); return 0; } @@ -623,88 +848,276 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) /* Phase 4 : output tag */ err = stm32_cryp_read_auth_tag(cryp); - if (cryp->sgs_copied) { - void *buf_in, *buf_out; - int pages, len; + if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp)))) + stm32_cryp_get_iv(cryp); + + pm_runtime_put_autosuspend(cryp->dev); + + if (is_gcm(cryp) || is_ccm(cryp)) + crypto_finalize_aead_request(cryp->engine, cryp->areq, err); + else + crypto_finalize_skcipher_request(cryp->engine, cryp->req, err); +} - buf_in = sg_virt(&cryp->in_sgl); - buf_out = sg_virt(&cryp->out_sgl); +static void stm32_cryp_header_dma_callback(void *param) +{ + struct stm32_cryp *cryp = (struct stm32_cryp *)param; + int ret; + u32 reg; - sg_copy_buf(buf_out, cryp->out_sg_save, 0, - cryp->total_out_save, 1); + dma_unmap_sg(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE); - len = ALIGN(cryp->total_in_save, cryp->hw_blocksize); - pages = len ? get_order(len) : 1; - free_pages((unsigned long)buf_in, pages); + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg & ~(DMACR_DOEN | DMACR_DIEN)); - len = ALIGN(cryp->total_out_save, cryp->hw_blocksize); - pages = len ? get_order(len) : 1; - free_pages((unsigned long)buf_out, pages); + kfree(cryp->header_sg); + + reg = stm32_cryp_read(cryp, cryp->caps->cr); + + if (cryp->header_in) { + stm32_cryp_write(cryp, cryp->caps->cr, reg | CR_CRYPEN); + + ret = stm32_cryp_wait_input(cryp); + if (ret) { + dev_err(cryp->dev, "input header ready timeout after dma\n"); + stm32_cryp_finish_req(cryp, ret); + return; + } + stm32_cryp_irq_write_gcmccm_header(cryp); + WARN_ON(cryp->header_in); } - pm_runtime_mark_last_busy(cryp->dev); - pm_runtime_put_autosuspend(cryp->dev); + if (stm32_cryp_get_input_text_len(cryp)) { + /* Phase 3 : payload */ + reg = stm32_cryp_read(cryp, cryp->caps->cr); + stm32_cryp_write(cryp, cryp->caps->cr, reg & ~CR_CRYPEN); + + reg &= ~CR_PH_MASK; + reg |= CR_PH_PAYLOAD | CR_CRYPEN; + stm32_cryp_write(cryp, cryp->caps->cr, reg); + + if (cryp->flags & FLG_IN_OUT_DMA) { + ret = stm32_cryp_dma_start(cryp); + if (ret) + stm32_cryp_finish_req(cryp, ret); + } else { + stm32_cryp_it_start(cryp); + } + } else { + /* + * Phase 4 : tag. + * Nothing to read, nothing to write => end request + */ + stm32_cryp_finish_req(cryp, 0); + } +} + +static void stm32_cryp_dma_callback(void *param) +{ + struct stm32_cryp *cryp = (struct stm32_cryp *)param; + int ret; + u32 reg; + + complete(&cryp->dma_completion); /* completion to indicate no timeout */ + + dma_sync_sg_for_device(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE); + + if (cryp->in_sg != cryp->out_sg) + dma_unmap_sg(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE); + + dma_unmap_sg(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg & ~(DMACR_DOEN | DMACR_DIEN)); + + reg = stm32_cryp_read(cryp, cryp->caps->cr); if (is_gcm(cryp) || is_ccm(cryp)) { - crypto_finalize_aead_request(cryp->engine, cryp->areq, err); - cryp->areq = NULL; + kfree(cryp->in_sg); + kfree(cryp->out_sg); } else { - crypto_finalize_ablkcipher_request(cryp->engine, cryp->req, - err); - cryp->req = NULL; + if (cryp->in_sg != cryp->req->src) + kfree(cryp->in_sg); + if (cryp->out_sg != cryp->req->dst) + kfree(cryp->out_sg); } - memset(cryp->ctx->key, 0, cryp->ctx->keylen); + if (cryp->payload_in) { + stm32_cryp_write(cryp, cryp->caps->cr, reg | CR_CRYPEN); + + ret = stm32_cryp_wait_input(cryp); + if (ret) { + dev_err(cryp->dev, "input ready timeout after dma\n"); + stm32_cryp_finish_req(cryp, ret); + return; + } + stm32_cryp_irq_write_data(cryp); + + ret = stm32_cryp_wait_output(cryp); + if (ret) { + dev_err(cryp->dev, "output ready timeout after dma\n"); + stm32_cryp_finish_req(cryp, ret); + return; + } + stm32_cryp_irq_read_data(cryp); + } - mutex_unlock(&cryp->lock); + stm32_cryp_finish_req(cryp, 0); } -static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) +static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp) +{ + int ret; + struct dma_async_tx_descriptor *tx_in; + u32 reg; + size_t align_size; + + ret = dma_map_sg(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE); + if (!ret) { + dev_err(cryp->dev, "dma_map_sg() error\n"); + return -ENOMEM; + } + + dma_sync_sg_for_device(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE); + + tx_in = dmaengine_prep_slave_sg(cryp->dma_lch_in, cryp->header_sg, cryp->header_sg_len, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_in) { + dev_err(cryp->dev, "IN prep_slave_sg() failed\n"); + return -EINVAL; + } + + tx_in->callback_param = cryp; + tx_in->callback = stm32_cryp_header_dma_callback; + + /* Advance scatterwalk to not DMA'ed data */ + align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize); + scatterwalk_skip(&cryp->in_walk, align_size); + cryp->header_in -= align_size; + + ret = dma_submit_error(dmaengine_submit(tx_in)); + if (ret < 0) { + dev_err(cryp->dev, "DMA in submit failed\n"); + return ret; + } + dma_async_issue_pending(cryp->dma_lch_in); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg | DMACR_DIEN); + + return 0; +} + +static int stm32_cryp_dma_start(struct stm32_cryp *cryp) +{ + int ret; + size_t align_size; + struct dma_async_tx_descriptor *tx_in, *tx_out; + u32 reg; + + if (cryp->in_sg != cryp->out_sg) { + ret = dma_map_sg(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE); + if (!ret) { + dev_err(cryp->dev, "dma_map_sg() error\n"); + return -ENOMEM; + } + } + + ret = dma_map_sg(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE); + if (!ret) { + dev_err(cryp->dev, "dma_map_sg() error\n"); + return -ENOMEM; + } + + dma_sync_sg_for_device(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE); + + tx_in = dmaengine_prep_slave_sg(cryp->dma_lch_in, cryp->in_sg, cryp->in_sg_len, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_in) { + dev_err(cryp->dev, "IN prep_slave_sg() failed\n"); + return -EINVAL; + } + + /* No callback necessary */ + tx_in->callback_param = cryp; + tx_in->callback = NULL; + + tx_out = dmaengine_prep_slave_sg(cryp->dma_lch_out, cryp->out_sg, cryp->out_sg_len, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_out) { + dev_err(cryp->dev, "OUT prep_slave_sg() failed\n"); + return -EINVAL; + } + + reinit_completion(&cryp->dma_completion); + tx_out->callback = stm32_cryp_dma_callback; + tx_out->callback_param = cryp; + + /* Advance scatterwalk to not DMA'ed data */ + align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); + scatterwalk_skip(&cryp->in_walk, align_size); + cryp->payload_in -= align_size; + + ret = dma_submit_error(dmaengine_submit(tx_in)); + if (ret < 0) { + dev_err(cryp->dev, "DMA in submit failed\n"); + return ret; + } + dma_async_issue_pending(cryp->dma_lch_in); + + /* Advance scatterwalk to not DMA'ed data */ + scatterwalk_skip(&cryp->out_walk, align_size); + cryp->payload_out -= align_size; + ret = dma_submit_error(dmaengine_submit(tx_out)); + if (ret < 0) { + dev_err(cryp->dev, "DMA out submit failed\n"); + return ret; + } + dma_async_issue_pending(cryp->dma_lch_out); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg | DMACR_DOEN | DMACR_DIEN); + + if (!wait_for_completion_timeout(&cryp->dma_completion, msecs_to_jiffies(1000))) { + dev_err(cryp->dev, "DMA out timed out\n"); + dmaengine_terminate_sync(cryp->dma_lch_out); + return -ETIMEDOUT; + } + + return 0; +} + +static int stm32_cryp_it_start(struct stm32_cryp *cryp) { /* Enable interrupt and let the IRQ handler do everything */ - stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT); + stm32_cryp_write(cryp, cryp->caps->imsc, IMSCR_IN | IMSCR_OUT); return 0; } static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq); -static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, - void *areq); -static int stm32_cryp_cra_init(struct crypto_tfm *tfm) +static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm) { - struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm); + crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx)); - tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx); - - ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req; - ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req; - ctx->enginectx.op.unprepare_request = NULL; return 0; } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq); -static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, - void *areq); static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm) { - struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm); - - tfm->reqsize = sizeof(struct stm32_cryp_reqctx); - - ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req; - ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req; - ctx->enginectx.op.unprepare_request = NULL; + crypto_aead_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx)); return 0; } -static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode) +static int stm32_cryp_crypt(struct skcipher_request *req, unsigned long mode) { - struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); - struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req); + struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx( + crypto_skcipher_reqtfm(req)); + struct stm32_cryp_reqctx *rctx = skcipher_request_ctx(req); struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx); if (!cryp) @@ -712,7 +1125,7 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode) rctx->mode = mode; - return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req); + return crypto_transfer_skcipher_request_to_engine(cryp->engine, req); } static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode) @@ -729,10 +1142,10 @@ static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode) return crypto_transfer_aead_request_to_engine(cryp->engine, req); } -static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +static int stm32_cryp_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -740,7 +1153,7 @@ static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key, return 0; } -static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +static int stm32_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && @@ -750,22 +1163,18 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, return stm32_cryp_setkey(tfm, key, keylen); } -static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +static int stm32_cryp_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - if (keylen != DES_KEY_SIZE) - return -EINVAL; - else - return stm32_cryp_setkey(tfm, key, keylen); + return verify_skcipher_des_key(tfm, key) ?: + stm32_cryp_setkey(tfm, key, keylen); } -static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +static int stm32_cryp_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - if (keylen != (3 * DES_KEY_SIZE)) - return -EINVAL; - else - return stm32_cryp_setkey(tfm, key, keylen); + return verify_skcipher_des3_key(tfm, key) ?: + stm32_cryp_setkey(tfm, key, keylen); } static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, @@ -786,7 +1195,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL; + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; } static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, @@ -808,33 +1230,63 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, return 0; } -static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); } -static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); } -static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); } -static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); } -static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req) { + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); } -static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req) { + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); } @@ -848,196 +1300,482 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req) return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM); } +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (iv[0] < 1 || iv[0] > 7) + return -EINVAL; + + return 0; +} + static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req) { + int err; + + err = crypto_ccm_check_iv(req->iv); + if (err) + return err; + return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT); } static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req) { + int err; + + err = crypto_ccm_check_iv(req->iv); + if (err) + return err; + return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM); } -static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); } -static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); } -static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); } -static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); } -static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); } -static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); } -static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req) +static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); } -static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req) +static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); } -static int stm32_cryp_prepare_req(struct ablkcipher_request *req, +static enum stm32_dma_mode stm32_cryp_dma_check_sg(struct scatterlist *test_sg, size_t len, + size_t block_size) +{ + struct scatterlist *sg; + int i; + + if (len <= 16) + return NO_DMA; /* Faster */ + + for_each_sg(test_sg, sg, sg_nents(test_sg), i) { + if (!IS_ALIGNED(sg->length, block_size) && !sg_is_last(sg)) + return NO_DMA; + + if (sg->offset % sizeof(u32)) + return NO_DMA; + + if (sg_is_last(sg) && !IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) + return DMA_NEED_SG_TRUNC; + } + + return DMA_PLAIN_SG; +} + +static enum stm32_dma_mode stm32_cryp_dma_check(struct stm32_cryp *cryp, struct scatterlist *in_sg, + struct scatterlist *out_sg) +{ + enum stm32_dma_mode ret = DMA_PLAIN_SG; + + if (!is_aes(cryp)) + return NO_DMA; + + if (!cryp->dma_lch_in || !cryp->dma_lch_out) + return NO_DMA; + + ret = stm32_cryp_dma_check_sg(in_sg, cryp->payload_in, AES_BLOCK_SIZE); + if (ret == NO_DMA) + return ret; + + ret = stm32_cryp_dma_check_sg(out_sg, cryp->payload_out, AES_BLOCK_SIZE); + if (ret == NO_DMA) + return ret; + + /* Check CTR counter overflow */ + if (is_aes(cryp) && is_ctr(cryp)) { + u32 c; + __be32 iv3; + + memcpy(&iv3, &cryp->req->iv[3 * sizeof(u32)], sizeof(iv3)); + c = be32_to_cpu(iv3); + if ((c + cryp->payload_in) < cryp->payload_in) + return NO_DMA; + } + + /* Workaround */ + if (is_aes(cryp) && is_ctr(cryp) && ret == DMA_NEED_SG_TRUNC) + return NO_DMA; + + return ret; +} + +static int stm32_cryp_truncate_sg(struct scatterlist **new_sg, size_t *new_sg_len, + struct scatterlist *sg, off_t skip, size_t size) +{ + struct scatterlist *cur; + int alloc_sg_len; + + *new_sg_len = 0; + + if (!sg || !size) { + *new_sg = NULL; + return 0; + } + + alloc_sg_len = sg_nents_for_len(sg, skip + size); + if (alloc_sg_len < 0) + return alloc_sg_len; + + /* We allocate to much sg entry, but it is easier */ + *new_sg = kmalloc_array((size_t)alloc_sg_len, sizeof(struct scatterlist), GFP_KERNEL); + if (!*new_sg) + return -ENOMEM; + + sg_init_table(*new_sg, (unsigned int)alloc_sg_len); + + cur = *new_sg; + while (sg && size) { + unsigned int len = sg->length; + unsigned int offset = sg->offset; + + if (skip > len) { + skip -= len; + sg = sg_next(sg); + continue; + } + + if (skip) { + len -= skip; + offset += skip; + skip = 0; + } + + if (size < len) + len = size; + + if (len > 0) { + (*new_sg_len)++; + size -= len; + sg_set_page(cur, sg_page(sg), len, offset); + if (size == 0) + sg_mark_end(cur); + cur = sg_next(cur); + } + + sg = sg_next(sg); + } + + return 0; +} + +static int stm32_cryp_cipher_prepare(struct stm32_cryp *cryp, struct scatterlist *in_sg, + struct scatterlist *out_sg) +{ + size_t align_size; + int ret; + + cryp->dma_mode = stm32_cryp_dma_check(cryp, in_sg, out_sg); + + scatterwalk_start(&cryp->in_walk, in_sg); + scatterwalk_start(&cryp->out_walk, out_sg); + + if (cryp->dma_mode == NO_DMA) { + cryp->flags &= ~FLG_IN_OUT_DMA; + + if (is_ctr(cryp)) + memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); + + } else if (cryp->dma_mode == DMA_NEED_SG_TRUNC) { + + cryp->flags |= FLG_IN_OUT_DMA; + + align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); + ret = stm32_cryp_truncate_sg(&cryp->in_sg, &cryp->in_sg_len, in_sg, 0, align_size); + if (ret) + return ret; + + ret = stm32_cryp_truncate_sg(&cryp->out_sg, &cryp->out_sg_len, out_sg, 0, + align_size); + if (ret) { + kfree(cryp->in_sg); + return ret; + } + } else { + cryp->flags |= FLG_IN_OUT_DMA; + + cryp->in_sg = in_sg; + cryp->out_sg = out_sg; + + ret = sg_nents_for_len(cryp->in_sg, cryp->payload_in); + if (ret < 0) + return ret; + cryp->in_sg_len = (size_t)ret; + + ret = sg_nents_for_len(out_sg, cryp->payload_out); + if (ret < 0) + return ret; + cryp->out_sg_len = (size_t)ret; + } + + return 0; +} + +static int stm32_cryp_aead_prepare(struct stm32_cryp *cryp, struct scatterlist *in_sg, + struct scatterlist *out_sg) +{ + size_t align_size; + off_t skip; + int ret, ret2; + + cryp->header_sg = NULL; + cryp->in_sg = NULL; + cryp->out_sg = NULL; + + if (!cryp->dma_lch_in || !cryp->dma_lch_out) { + cryp->dma_mode = NO_DMA; + cryp->flags &= ~(FLG_IN_OUT_DMA | FLG_HEADER_DMA); + + return 0; + } + + /* CCM hw_init may have advanced in header */ + skip = cryp->areq->assoclen - cryp->header_in; + + align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize); + ret = stm32_cryp_truncate_sg(&cryp->header_sg, &cryp->header_sg_len, in_sg, skip, + align_size); + if (ret) + return ret; + + ret = stm32_cryp_dma_check_sg(cryp->header_sg, align_size, AES_BLOCK_SIZE); + if (ret == NO_DMA) { + /* We cannot DMA the header */ + kfree(cryp->header_sg); + cryp->header_sg = NULL; + + cryp->flags &= ~FLG_HEADER_DMA; + } else { + cryp->flags |= FLG_HEADER_DMA; + } + + /* Now skip all header to be at payload start */ + skip = cryp->areq->assoclen; + align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); + ret = stm32_cryp_truncate_sg(&cryp->in_sg, &cryp->in_sg_len, in_sg, skip, align_size); + if (ret) { + kfree(cryp->header_sg); + return ret; + } + + /* For out buffer align_size is same as in buffer */ + ret = stm32_cryp_truncate_sg(&cryp->out_sg, &cryp->out_sg_len, out_sg, skip, align_size); + if (ret) { + kfree(cryp->header_sg); + kfree(cryp->in_sg); + return ret; + } + + ret = stm32_cryp_dma_check_sg(cryp->in_sg, align_size, AES_BLOCK_SIZE); + ret2 = stm32_cryp_dma_check_sg(cryp->out_sg, align_size, AES_BLOCK_SIZE); + if (ret == NO_DMA || ret2 == NO_DMA) { + kfree(cryp->in_sg); + cryp->in_sg = NULL; + + kfree(cryp->out_sg); + cryp->out_sg = NULL; + + cryp->flags &= ~FLG_IN_OUT_DMA; + } else { + cryp->flags |= FLG_IN_OUT_DMA; + } + + return 0; +} + +static int stm32_cryp_prepare_req(struct skcipher_request *req, struct aead_request *areq) { struct stm32_cryp_ctx *ctx; struct stm32_cryp *cryp; struct stm32_cryp_reqctx *rctx; + struct scatterlist *in_sg, *out_sg; int ret; if (!req && !areq) return -EINVAL; - ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) : + ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) : crypto_aead_ctx(crypto_aead_reqtfm(areq)); cryp = ctx->cryp; - if (!cryp) - return -ENODEV; - - mutex_lock(&cryp->lock); - - rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq); + rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); rctx->mode &= FLG_MODE_MASK; - ctx->cryp = cryp; - cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode; cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE; cryp->ctx = ctx; if (req) { cryp->req = req; - cryp->total_in = req->nbytes; - cryp->total_out = cryp->total_in; + cryp->areq = NULL; + cryp->header_in = 0; + cryp->payload_in = req->cryptlen; + cryp->payload_out = req->cryptlen; + cryp->authsize = 0; + + in_sg = req->src; + out_sg = req->dst; + + ret = stm32_cryp_cipher_prepare(cryp, in_sg, out_sg); + if (ret) + return ret; + + ret = stm32_cryp_hw_init(cryp); } else { /* * Length of input and output data: * Encryption case: - * INPUT = AssocData || PlainText + * INPUT = AssocData || PlainText * <- assoclen -> <- cryptlen -> - * <------- total_in -----------> * - * OUTPUT = AssocData || CipherText || AuthTag - * <- assoclen -> <- cryptlen -> <- authsize -> - * <---------------- total_out -----------------> + * OUTPUT = AssocData || CipherText || AuthTag + * <- assoclen -> <-- cryptlen --> <- authsize -> * * Decryption case: - * INPUT = AssocData || CipherText || AuthTag - * <- assoclen -> <--------- cryptlen ---------> - * <- authsize -> - * <---------------- total_in ------------------> + * INPUT = AssocData || CipherTex || AuthTag + * <- assoclen ---> <---------- cryptlen ----------> * - * OUTPUT = AssocData || PlainText - * <- assoclen -> <- crypten - authsize -> - * <---------- total_out -----------------> + * OUTPUT = AssocData || PlainText + * <- assoclen -> <- cryptlen - authsize -> */ cryp->areq = areq; + cryp->req = NULL; cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); - cryp->total_in = areq->assoclen + areq->cryptlen; - if (is_encrypt(cryp)) - /* Append auth tag to output */ - cryp->total_out = cryp->total_in + cryp->authsize; - else - /* No auth tag in output */ - cryp->total_out = cryp->total_in - cryp->authsize; - } - - cryp->total_in_save = cryp->total_in; - cryp->total_out_save = cryp->total_out; - - cryp->in_sg = req ? req->src : areq->src; - cryp->out_sg = req ? req->dst : areq->dst; - cryp->out_sg_save = cryp->out_sg; - - cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); - if (cryp->in_sg_len < 0) { - dev_err(cryp->dev, "Cannot get in_sg_len\n"); - ret = cryp->in_sg_len; - goto out; - } + if (is_encrypt(cryp)) { + cryp->payload_in = areq->cryptlen; + cryp->header_in = areq->assoclen; + cryp->payload_out = areq->cryptlen; + } else { + cryp->payload_in = areq->cryptlen - cryp->authsize; + cryp->header_in = areq->assoclen; + cryp->payload_out = cryp->payload_in; + } - cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); - if (cryp->out_sg_len < 0) { - dev_err(cryp->dev, "Cannot get out_sg_len\n"); - ret = cryp->out_sg_len; - goto out; - } + in_sg = areq->src; + out_sg = areq->dst; - ret = stm32_cryp_copy_sgs(cryp); - if (ret) - goto out; + scatterwalk_start(&cryp->in_walk, in_sg); + /* In output, jump after assoc data */ + scatterwalk_start_at_pos(&cryp->out_walk, out_sg, + areq->assoclen); - scatterwalk_start(&cryp->in_walk, cryp->in_sg); - scatterwalk_start(&cryp->out_walk, cryp->out_sg); + ret = stm32_cryp_hw_init(cryp); + if (ret) + return ret; - if (is_gcm(cryp) || is_ccm(cryp)) { - /* In output, jump after assoc data */ - scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen); - cryp->total_out -= cryp->areq->assoclen; + ret = stm32_cryp_aead_prepare(cryp, in_sg, out_sg); } - ret = stm32_cryp_hw_init(cryp); -out: - if (ret) - mutex_unlock(&cryp->lock); - return ret; } -static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, - void *areq) -{ - struct ablkcipher_request *req = container_of(areq, - struct ablkcipher_request, - base); - - return stm32_cryp_prepare_req(req, NULL); -} - static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq) { - struct ablkcipher_request *req = container_of(areq, - struct ablkcipher_request, + struct skcipher_request *req = container_of(areq, + struct skcipher_request, base); - struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); + struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx( + crypto_skcipher_reqtfm(req)); struct stm32_cryp *cryp = ctx->cryp; + int ret; if (!cryp) return -ENODEV; - return stm32_cryp_cpu_start(cryp); -} + ret = stm32_cryp_prepare_req(req, NULL); + if (ret) + return ret; -static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq) -{ - struct aead_request *req = container_of(areq, struct aead_request, - base); + if (cryp->flags & FLG_IN_OUT_DMA) + ret = stm32_cryp_dma_start(cryp); + else + ret = stm32_cryp_it_start(cryp); - return stm32_cryp_prepare_req(NULL, req); + if (ret == -ETIMEDOUT) + stm32_cryp_finish_req(cryp, ret); + + return ret; } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) @@ -1046,98 +1784,79 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) base); struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct stm32_cryp *cryp = ctx->cryp; + int err; if (!cryp) return -ENODEV; - if (unlikely(!cryp->areq->assoclen && - !stm32_cryp_get_input_text_len(cryp))) { + err = stm32_cryp_prepare_req(NULL, req); + if (err) + return err; + + if (!stm32_cryp_get_input_text_len(cryp) && !cryp->header_in && + !(cryp->flags & FLG_HEADER_DMA)) { /* No input data to process: get tag and finish */ stm32_cryp_finish_req(cryp, 0); return 0; } - return stm32_cryp_cpu_start(cryp); -} - -static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, - unsigned int n) -{ - scatterwalk_advance(&cryp->out_walk, n); - - if (unlikely(cryp->out_sg->length == _walked_out)) { - cryp->out_sg = sg_next(cryp->out_sg); - if (cryp->out_sg) { - scatterwalk_start(&cryp->out_walk, cryp->out_sg); - return (sg_virt(cryp->out_sg) + _walked_out); - } - } - - return (u32 *)((u8 *)dst + n); -} - -static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src, - unsigned int n) -{ - scatterwalk_advance(&cryp->in_walk, n); + if (cryp->flags & FLG_HEADER_DMA) + return stm32_cryp_header_dma_start(cryp); - if (unlikely(cryp->in_sg->length == _walked_in)) { - cryp->in_sg = sg_next(cryp->in_sg); - if (cryp->in_sg) { - scatterwalk_start(&cryp->in_walk, cryp->in_sg); - return (sg_virt(cryp->in_sg) + _walked_in); - } - } + if (!cryp->header_in && cryp->flags & FLG_IN_OUT_DMA) + return stm32_cryp_dma_start(cryp); - return (u32 *)((u8 *)src + n); + return stm32_cryp_it_start(cryp); } static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) { - u32 cfg, size_bit, *dst, d32; - u8 *d8; - unsigned int i, j; + u32 cfg, size_bit; + unsigned int i; int ret = 0; /* Update Config */ - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_PH_MASK; cfg |= CR_PH_FINAL; cfg &= ~CR_DEC_NOT_ENC; cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); if (is_gcm(cryp)) { /* GCM: write aad and payload size (in bits) */ size_bit = cryp->areq->assoclen * 8; if (cryp->caps->swap_final) - size_bit = cpu_to_be32(size_bit); + size_bit = (__force u32)cpu_to_be32(size_bit); - stm32_cryp_write(cryp, CRYP_DIN, 0); - stm32_cryp_write(cryp, CRYP_DIN, size_bit); + stm32_cryp_write(cryp, cryp->caps->din, 0); + stm32_cryp_write(cryp, cryp->caps->din, size_bit); size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen : - cryp->areq->cryptlen - AES_BLOCK_SIZE; + cryp->areq->cryptlen - cryp->authsize; size_bit *= 8; if (cryp->caps->swap_final) - size_bit = cpu_to_be32(size_bit); + size_bit = (__force u32)cpu_to_be32(size_bit); - stm32_cryp_write(cryp, CRYP_DIN, 0); - stm32_cryp_write(cryp, CRYP_DIN, size_bit); + stm32_cryp_write(cryp, cryp->caps->din, 0); + stm32_cryp_write(cryp, cryp->caps->din, size_bit); } else { /* CCM: write CTR0 */ - u8 iv[AES_BLOCK_SIZE]; - u32 *iv32 = (u32 *)iv; + u32 iv32[AES_BLOCK_32]; + u8 *iv = (u8 *)iv32; + __be32 *biv = (__be32 *)iv32; memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); for (i = 0; i < AES_BLOCK_32; i++) { + u32 xiv = iv32[i]; + if (!cryp->caps->padding_wa) - *iv32 = cpu_to_be32(*iv32); - stm32_cryp_write(cryp, CRYP_DIN, *iv32++); + xiv = be32_to_cpu(biv[i]); + stm32_cryp_write(cryp, cryp->caps->din, xiv); } } @@ -1149,42 +1868,17 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) } if (is_encrypt(cryp)) { - /* Get and write tag */ - dst = sg_virt(cryp->out_sg) + _walked_out; + u32 out_tag[AES_BLOCK_32]; - for (i = 0; i < AES_BLOCK_32; i++) { - if (cryp->total_out >= sizeof(u32)) { - /* Read a full u32 */ - *dst = stm32_cryp_read(cryp, CRYP_DOUT); - - dst = stm32_cryp_next_out(cryp, dst, - sizeof(u32)); - cryp->total_out -= sizeof(u32); - } else if (!cryp->total_out) { - /* Empty fifo out (data from input padding) */ - stm32_cryp_read(cryp, CRYP_DOUT); - } else { - /* Read less than an u32 */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - d8 = (u8 *)&d32; - - for (j = 0; j < cryp->total_out; j++) { - *((u8 *)dst) = *(d8++); - dst = stm32_cryp_next_out(cryp, dst, 1); - } - cryp->total_out = 0; - } - } + /* Get and write tag */ + readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); + memcpy_to_scatterwalk(&cryp->out_walk, out_tag, cryp->authsize); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; - scatterwalk_map_and_copy(in_tag, cryp->in_sg, - cryp->total_in_save - cryp->authsize, - cryp->authsize, 0); - - for (i = 0; i < AES_BLOCK_32; i++) - out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); + memcpy_from_scatterwalk(in_tag, &cryp->in_walk, cryp->authsize); + readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); if (crypto_memneq(in_tag, out_tag, cryp->authsize)) ret = -EBADMSG; @@ -1192,7 +1886,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) /* Disable cryp */ cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); return ret; } @@ -1201,181 +1895,118 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) { u32 cr; - if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) { - cryp->last_ctr[3] = 0; - cryp->last_ctr[2]++; - if (!cryp->last_ctr[2]) { - cryp->last_ctr[1]++; - if (!cryp->last_ctr[1]) - cryp->last_ctr[0]++; - } + if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) { + /* + * In this case, we need to increment manually the ctr counter, + * as HW doesn't handle the U32 carry. + */ + crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr)); - cr = stm32_cryp_read(cryp, CRYP_CR); - stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); + cr = stm32_cryp_read(cryp, cryp->caps->cr); + stm32_cryp_write(cryp, cryp->caps->cr, cr & ~CR_CRYPEN); - stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr); + stm32_cryp_hw_write_iv(cryp, cryp->last_ctr); - stm32_cryp_write(cryp, CRYP_CR, cr); + stm32_cryp_write(cryp, cryp->caps->cr, cr); } - cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR); - cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR); - cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR); - cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR); + /* The IV registers are BE */ + cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l)); + cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r)); + cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l)); + cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r)); } -static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp) +static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) { - unsigned int i, j; - u32 d32, *dst; - u8 *d8; - size_t tag_size; - - /* Do no read tag now (if any) */ - if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) - tag_size = cryp->authsize; - else - tag_size = 0; - - dst = sg_virt(cryp->out_sg) + _walked_out; - - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { - if (likely(cryp->total_out - tag_size >= sizeof(u32))) { - /* Read a full u32 */ - *dst = stm32_cryp_read(cryp, CRYP_DOUT); - - dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); - cryp->total_out -= sizeof(u32); - } else if (cryp->total_out == tag_size) { - /* Empty fifo out (data from input padding) */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - } else { - /* Read less than an u32 */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - d8 = (u8 *)&d32; - - for (j = 0; j < cryp->total_out - tag_size; j++) { - *((u8 *)dst) = *(d8++); - dst = stm32_cryp_next_out(cryp, dst, 1); - } - cryp->total_out = tag_size; - } - } + u32 block[AES_BLOCK_32]; - return !(cryp->total_out - tag_size) || !cryp->total_in; + readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, + cryp->payload_out); } static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { - unsigned int i, j; - u32 *src; - u8 d8[4]; - size_t tag_size; - - /* Do no write tag (if any) */ - if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) - tag_size = cryp->authsize; - else - tag_size = 0; - - src = sg_virt(cryp->in_sg) + _walked_in; - - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { - if (likely(cryp->total_in - tag_size >= sizeof(u32))) { - /* Write a full u32 */ - stm32_cryp_write(cryp, CRYP_DIN, *src); + u32 block[AES_BLOCK_32] = {0}; - src = stm32_cryp_next_in(cryp, src, sizeof(u32)); - cryp->total_in -= sizeof(u32); - } else if (cryp->total_in == tag_size) { - /* Write padding data */ - stm32_cryp_write(cryp, CRYP_DIN, 0); - } else { - /* Write less than an u32 */ - memset(d8, 0, sizeof(u32)); - for (j = 0; j < cryp->total_in - tag_size; j++) { - d8[j] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - } - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - cryp->total_in = tag_size; - } - } + memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_in)); + writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32)); + cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) { int err; - u32 cfg, tmp[AES_BLOCK_32]; - size_t total_in_ori = cryp->total_in; - struct scatterlist *out_sg_ori = cryp->out_sg; + u32 cfg, block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ /* a) disable ip */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - cfg = stm32_cryp_read(cryp, CRYP_CR); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) Update IV1R */ - stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2); + stm32_cryp_write(cryp, cryp->caps->iv1r, cryp->gcm_ctr - 2); /* c) change mode to CTR */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CTR; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* a) enable IP */ cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); - cryp->total_in = total_in_ori; + /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (write gcm header)\n"); + dev_err(cryp->dev, "Timeout (write gcm last data)\n"); return stm32_cryp_finish_req(cryp, err); } /* c) get and store encrypted data */ - stm32_cryp_irq_read_data(cryp); - scatterwalk_map_and_copy(tmp, out_sg_ori, - cryp->total_in_save - total_in_ori, - total_in_ori, 0); + /* + * Same code as stm32_cryp_irq_read_data(), but we want to store + * block value + */ + readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); + + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, + cryp->payload_out); /* d) change mode back to AES GCM */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_GCM; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* e) change phase to Final */ cfg &= ~CR_PH_MASK; cfg |= CR_PH_FINAL; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) write padded data */ - for (i = 0; i < AES_BLOCK_32; i++) { - if (cryp->total_in) - stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); - else - stm32_cryp_write(cryp, CRYP_DIN, 0); - - cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); - } + writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); /* g) Empty fifo out */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (write gcm header)\n"); + dev_err(cryp->dev, "Timeout (write gcm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_read(cryp, CRYP_DOUT); + stm32_cryp_read(cryp, cryp->caps->dout); /* h) run the he normal Final phase */ stm32_cryp_finish_req(cryp, 0); @@ -1383,38 +2014,34 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp) { - u32 cfg, payload_bytes; + u32 cfg; /* disable ip, set NPBLB and reneable ip */ - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); - payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize : - cryp->total_in; - cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT; + cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT; cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) { int err = 0; u32 cfg, iv1tmp; - u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32]; - size_t last_total_out, total_in_ori = cryp->total_in; - struct scatterlist *out_sg_ori = cryp->out_sg; + u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32]; + u32 block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ - cryp->flags |= FLG_CCM_PADDED_WA; /* a) disable ip */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) get IV1 from CRYP_CSGCMCCM7 */ iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4); @@ -1424,34 +2051,36 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4); /* d) Write IV1R */ - stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp); + stm32_cryp_write(cryp, cryp->caps->iv1r, iv1tmp); /* e) change mode to CTR */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CTR; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* a) enable IP */ cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); - cryp->total_in = total_in_ori; + /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); + dev_err(cryp->dev, "Timeout (write ccm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } /* c) get and store decrypted data */ - last_total_out = cryp->total_out; - stm32_cryp_irq_read_data(cryp); + /* + * Same code as stm32_cryp_irq_read_data(), but we want to store + * block value + */ + readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - memset(tmp, 0, sizeof(tmp)); - scatterwalk_map_and_copy(tmp, out_sg_ori, - cryp->total_out_save - last_total_out, - last_total_out, 0); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) Load again CRYP_CSGCMCCMxR */ for (i = 0; i < ARRAY_SIZE(cstmp2); i++) @@ -1460,24 +2089,24 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) /* e) change mode back to AES CCM */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CCM; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) change phase to header */ cfg &= ~CR_PH_MASK; cfg |= CR_PH_HEADER; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* g) XOR and write padded data */ - for (i = 0; i < ARRAY_SIZE(tmp); i++) { - tmp[i] ^= cstmp1[i]; - tmp[i] ^= cstmp2[i]; - stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); + for (i = 0; i < ARRAY_SIZE(block); i++) { + block[i] ^= cstmp1[i]; + block[i] ^= cstmp2[i]; + stm32_cryp_write(cryp, cryp->caps->din, block[i]); } /* h) wait for completion */ err = stm32_cryp_wait_busy(cryp); if (err) - dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); + dev_err(cryp->dev, "Timeout (write ccm padded data)\n"); /* i) run the he normal Final phase */ stm32_cryp_finish_req(cryp, err); @@ -1485,30 +2114,34 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) { - if (unlikely(!cryp->total_in)) { + if (unlikely(!cryp->payload_in)) { dev_warn(cryp->dev, "No more data to process\n"); return; } - if (unlikely(cryp->total_in < AES_BLOCK_SIZE && + if (unlikely(cryp->payload_in < AES_BLOCK_SIZE && (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) && is_encrypt(cryp))) { /* Padding for AES GCM encryption */ - if (cryp->caps->padding_wa) + if (cryp->caps->padding_wa) { /* Special case 1 */ - return stm32_cryp_irq_write_gcm_padded_data(cryp); + stm32_cryp_irq_write_gcm_padded_data(cryp); + return; + } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); } - if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) && + if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) && (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) && is_decrypt(cryp))) { /* Padding for AES CCM decryption */ - if (cryp->caps->padding_wa) + if (cryp->caps->padding_wa) { /* Special case 2 */ - return stm32_cryp_irq_write_ccm_padded_data(cryp); + stm32_cryp_irq_write_ccm_padded_data(cryp); + return; + } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); @@ -1520,192 +2153,62 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) stm32_cryp_irq_write_block(cryp); } -static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp) +static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) { - int err; - unsigned int i, j; - u32 cfg, *src; - - src = sg_virt(cryp->in_sg) + _walked_in; + u32 block[AES_BLOCK_32] = {0}; + size_t written; - for (i = 0; i < AES_BLOCK_32; i++) { - stm32_cryp_write(cryp, CRYP_DIN, *src); - - src = stm32_cryp_next_in(cryp, src, sizeof(u32)); - cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); - - /* Check if whole header written */ - if ((cryp->total_in_save - cryp->total_in) == - cryp->areq->assoclen) { - /* Write padding if needed */ - for (j = i + 1; j < AES_BLOCK_32; j++) - stm32_cryp_write(cryp, CRYP_DIN, 0); - - /* Wait for completion */ - err = stm32_cryp_wait_busy(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (gcm header)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - if (stm32_cryp_get_input_text_len(cryp)) { - /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cfg &= ~CR_PH_MASK; - cfg |= CR_PH_PAYLOAD; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - /* Phase 4 : tag */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - } - - break; - } + written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); - if (!cryp->total_in) - break; - } -} + memcpy_from_scatterwalk(block, &cryp->in_walk, written); -static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp) -{ - int err; - unsigned int i = 0, j, k; - u32 alen, cfg, *src; - u8 d8[4]; - - src = sg_virt(cryp->in_sg) + _walked_in; - alen = cryp->areq->assoclen; - - if (!_walked_in) { - if (cryp->areq->assoclen <= 65280) { - /* Write first u32 of B1 */ - d8[0] = (alen >> 8) & 0xFF; - d8[1] = alen & 0xFF; - d8[2] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - d8[3] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - cryp->total_in -= min_t(size_t, 2, cryp->total_in); - } else { - /* Build the two first u32 of B1 */ - d8[0] = 0xFF; - d8[1] = 0xFE; - d8[2] = alen & 0xFF000000; - d8[3] = alen & 0x00FF0000; - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - d8[0] = alen & 0x0000FF00; - d8[1] = alen & 0x000000FF; - d8[2] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - d8[3] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - cryp->total_in -= min_t(size_t, 2, cryp->total_in); - } - } + writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); - /* Write next u32 */ - for (; i < AES_BLOCK_32; i++) { - /* Build an u32 */ - memset(d8, 0, sizeof(u32)); - for (k = 0; k < sizeof(u32); k++) { - d8[k] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); + cryp->header_in -= written; - cryp->total_in -= min_t(size_t, 1, cryp->total_in); - if ((cryp->total_in_save - cryp->total_in) == alen) - break; - } - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - - if ((cryp->total_in_save - cryp->total_in) == alen) { - /* Write padding if needed */ - for (j = i + 1; j < AES_BLOCK_32; j++) - stm32_cryp_write(cryp, CRYP_DIN, 0); - - /* Wait for completion */ - err = stm32_cryp_wait_busy(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (ccm header)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - if (stm32_cryp_get_input_text_len(cryp)) { - /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cfg &= ~CR_PH_MASK; - cfg |= CR_PH_PAYLOAD; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - /* Phase 4 : tag */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - } - - break; - } - } + stm32_crypt_gcmccm_end_header(cryp); } static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) { struct stm32_cryp *cryp = arg; u32 ph; + u32 it_mask = stm32_cryp_read(cryp, cryp->caps->imsc); if (cryp->irq_status & MISR_OUT) /* Output FIFO IRQ: read data */ - if (unlikely(stm32_cryp_irq_read_data(cryp))) { - /* All bytes processed, finish */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - return IRQ_HANDLED; - } + stm32_cryp_irq_read_data(cryp); if (cryp->irq_status & MISR_IN) { - if (is_gcm(cryp)) { - ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; + if (is_gcm(cryp) || is_ccm(cryp)) { + ph = stm32_cryp_read(cryp, cryp->caps->cr) & CR_PH_MASK; if (unlikely(ph == CR_PH_HEADER)) /* Write Header */ - stm32_cryp_irq_write_gcm_header(cryp); - else - /* Input FIFO IRQ: write data */ - stm32_cryp_irq_write_data(cryp); - cryp->gcm_ctr++; - } else if (is_ccm(cryp)) { - ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; - if (unlikely(ph == CR_PH_HEADER)) - /* Write Header */ - stm32_cryp_irq_write_ccm_header(cryp); + stm32_cryp_irq_write_gcmccm_header(cryp); else /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); + if (is_gcm(cryp)) + cryp->gcm_ctr++; } else { /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); } } + /* Mask useless interrupts */ + if (!cryp->payload_in && !cryp->header_in) + it_mask &= ~IMSCR_IN; + if (!cryp->payload_out) + it_mask &= ~IMSCR_OUT; + stm32_cryp_write(cryp, cryp->caps->imsc, it_mask); + + if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) { + local_bh_disable(); + stm32_cryp_finish_req(cryp, 0); + local_bh_enable(); + } + return IRQ_HANDLED; } @@ -1713,212 +2216,351 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg) { struct stm32_cryp *cryp = arg; - cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR); + cryp->irq_status = stm32_cryp_read(cryp, cryp->caps->mis); return IRQ_WAKE_THREAD; } -static struct crypto_alg crypto_algs[] = { -{ - .cra_name = "ecb(aes)", - .cra_driver_name = "stm32-ecb-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = stm32_cryp_aes_setkey, - .encrypt = stm32_cryp_aes_ecb_encrypt, - .decrypt = stm32_cryp_aes_ecb_decrypt, +static int stm32_cryp_dma_init(struct stm32_cryp *cryp) +{ + struct dma_slave_config dma_conf; + struct dma_chan *chan; + int ret; + + memset(&dma_conf, 0, sizeof(dma_conf)); + + dma_conf.direction = DMA_MEM_TO_DEV; + dma_conf.dst_addr = cryp->phys_base + cryp->caps->din; + dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.dst_maxburst = CRYP_DMA_BURST_REG; + dma_conf.device_fc = false; + + chan = dma_request_chan(cryp->dev, "in"); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + cryp->dma_lch_in = chan; + ret = dmaengine_slave_config(cryp->dma_lch_in, &dma_conf); + if (ret) { + dma_release_channel(cryp->dma_lch_in); + cryp->dma_lch_in = NULL; + dev_err(cryp->dev, "Couldn't configure DMA in slave.\n"); + return ret; + } + + memset(&dma_conf, 0, sizeof(dma_conf)); + + dma_conf.direction = DMA_DEV_TO_MEM; + dma_conf.src_addr = cryp->phys_base + cryp->caps->dout; + dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.src_maxburst = CRYP_DMA_BURST_REG; + dma_conf.device_fc = false; + + chan = dma_request_chan(cryp->dev, "out"); + if (IS_ERR(chan)) { + dma_release_channel(cryp->dma_lch_in); + cryp->dma_lch_in = NULL; + return PTR_ERR(chan); + } + + cryp->dma_lch_out = chan; + + ret = dmaengine_slave_config(cryp->dma_lch_out, &dma_conf); + if (ret) { + dma_release_channel(cryp->dma_lch_out); + cryp->dma_lch_out = NULL; + dev_err(cryp->dev, "Couldn't configure DMA out slave.\n"); + dma_release_channel(cryp->dma_lch_in); + cryp->dma_lch_in = NULL; + return ret; } + + init_completion(&cryp->dma_completion); + + return 0; +} + +static struct skcipher_engine_alg crypto_algs[] = { +{ + .base = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "stm32-ecb-aes", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = stm32_cryp_aes_setkey, + .encrypt = stm32_cryp_aes_ecb_encrypt, + .decrypt = stm32_cryp_aes_ecb_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .cra_name = "cbc(aes)", - .cra_driver_name = "stm32-cbc-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = stm32_cryp_aes_setkey, - .encrypt = stm32_cryp_aes_cbc_encrypt, - .decrypt = stm32_cryp_aes_cbc_decrypt, - } + .base = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "stm32-cbc-aes", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = stm32_cryp_aes_setkey, + .encrypt = stm32_cryp_aes_cbc_encrypt, + .decrypt = stm32_cryp_aes_cbc_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .cra_name = "ctr(aes)", - .cra_driver_name = "stm32-ctr-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = stm32_cryp_aes_setkey, - .encrypt = stm32_cryp_aes_ctr_encrypt, - .decrypt = stm32_cryp_aes_ctr_decrypt, - } + .base = { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "stm32-ctr-aes", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = stm32_cryp_aes_setkey, + .encrypt = stm32_cryp_aes_ctr_encrypt, + .decrypt = stm32_cryp_aes_ctr_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .cra_name = "ecb(des)", - .cra_driver_name = "stm32-ecb-des", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = DES_BLOCK_SIZE, - .max_keysize = DES_BLOCK_SIZE, - .setkey = stm32_cryp_des_setkey, - .encrypt = stm32_cryp_des_ecb_encrypt, - .decrypt = stm32_cryp_des_ecb_decrypt, - } + .base = { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "stm32-ecb-des", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = DES_BLOCK_SIZE, + .max_keysize = DES_BLOCK_SIZE, + .setkey = stm32_cryp_des_setkey, + .encrypt = stm32_cryp_des_ecb_encrypt, + .decrypt = stm32_cryp_des_ecb_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .cra_name = "cbc(des)", - .cra_driver_name = "stm32-cbc-des", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = DES_BLOCK_SIZE, - .max_keysize = DES_BLOCK_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = stm32_cryp_des_setkey, - .encrypt = stm32_cryp_des_cbc_encrypt, - .decrypt = stm32_cryp_des_cbc_decrypt, - } + .base = { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "stm32-cbc-des", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = DES_BLOCK_SIZE, + .max_keysize = DES_BLOCK_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = stm32_cryp_des_setkey, + .encrypt = stm32_cryp_des_cbc_encrypt, + .decrypt = stm32_cryp_des_cbc_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "stm32-ecb-des3", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = 3 * DES_BLOCK_SIZE, - .max_keysize = 3 * DES_BLOCK_SIZE, - .setkey = stm32_cryp_tdes_setkey, - .encrypt = stm32_cryp_tdes_ecb_encrypt, - .decrypt = stm32_cryp_tdes_ecb_decrypt, - } + .base = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "stm32-ecb-des3", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = 3 * DES_BLOCK_SIZE, + .max_keysize = 3 * DES_BLOCK_SIZE, + .setkey = stm32_cryp_tdes_setkey, + .encrypt = stm32_cryp_tdes_ecb_encrypt, + .decrypt = stm32_cryp_tdes_ecb_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "stm32-cbc-des3", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = stm32_cryp_cra_init, - .cra_ablkcipher = { - .min_keysize = 3 * DES_BLOCK_SIZE, - .max_keysize = 3 * DES_BLOCK_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = stm32_cryp_tdes_setkey, - .encrypt = stm32_cryp_tdes_cbc_encrypt, - .decrypt = stm32_cryp_tdes_cbc_decrypt, - } + .base = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "stm32-cbc-des3", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, + + .init = stm32_cryp_init_tfm, + .min_keysize = 3 * DES_BLOCK_SIZE, + .max_keysize = 3 * DES_BLOCK_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = stm32_cryp_tdes_setkey, + .encrypt = stm32_cryp_tdes_cbc_encrypt, + .decrypt = stm32_cryp_tdes_cbc_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, }; -static struct aead_alg aead_algs[] = { +static struct aead_engine_alg aead_algs[] = { { - .setkey = stm32_cryp_aes_aead_setkey, - .setauthsize = stm32_cryp_aes_gcm_setauthsize, - .encrypt = stm32_cryp_aes_gcm_encrypt, - .decrypt = stm32_cryp_aes_gcm_decrypt, - .init = stm32_cryp_aes_aead_init, - .ivsize = 12, - .maxauthsize = AES_BLOCK_SIZE, + .base.setkey = stm32_cryp_aes_aead_setkey, + .base.setauthsize = stm32_cryp_aes_gcm_setauthsize, + .base.encrypt = stm32_cryp_aes_gcm_encrypt, + .base.decrypt = stm32_cryp_aes_gcm_decrypt, + .base.init = stm32_cryp_aes_aead_init, + .base.ivsize = 12, + .base.maxauthsize = AES_BLOCK_SIZE, - .base = { + .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "stm32-gcm-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, + .cra_alignmask = 0, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = stm32_cryp_aead_one_req, + }, }, { - .setkey = stm32_cryp_aes_aead_setkey, - .setauthsize = stm32_cryp_aes_ccm_setauthsize, - .encrypt = stm32_cryp_aes_ccm_encrypt, - .decrypt = stm32_cryp_aes_ccm_decrypt, - .init = stm32_cryp_aes_aead_init, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, + .base.setkey = stm32_cryp_aes_aead_setkey, + .base.setauthsize = stm32_cryp_aes_ccm_setauthsize, + .base.encrypt = stm32_cryp_aes_ccm_encrypt, + .base.decrypt = stm32_cryp_aes_ccm_decrypt, + .base.init = stm32_cryp_aes_aead_init, + .base.ivsize = AES_BLOCK_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, - .base = { + .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "stm32-ccm-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, + .cra_alignmask = 0, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = stm32_cryp_aead_one_req, + }, }, }; +static const struct stm32_cryp_caps ux500_data = { + .aeads_support = false, + .linear_aes_key = true, + .kp_mode = false, + .iv_protection = true, + .swap_final = true, + .padding_wa = true, + .cr = UX500_CRYP_CR, + .sr = UX500_CRYP_SR, + .din = UX500_CRYP_DIN, + .dout = UX500_CRYP_DOUT, + .dmacr = UX500_CRYP_DMACR, + .imsc = UX500_CRYP_IMSC, + .mis = UX500_CRYP_MIS, + .k1l = UX500_CRYP_K1L, + .k1r = UX500_CRYP_K1R, + .k3r = UX500_CRYP_K3R, + .iv0l = UX500_CRYP_IV0L, + .iv0r = UX500_CRYP_IV0R, + .iv1l = UX500_CRYP_IV1L, + .iv1r = UX500_CRYP_IV1R, +}; + static const struct stm32_cryp_caps f7_data = { + .aeads_support = true, + .linear_aes_key = false, + .kp_mode = true, + .iv_protection = false, .swap_final = true, .padding_wa = true, + .cr = CRYP_CR, + .sr = CRYP_SR, + .din = CRYP_DIN, + .dout = CRYP_DOUT, + .dmacr = CRYP_DMACR, + .imsc = CRYP_IMSCR, + .mis = CRYP_MISR, + .k1l = CRYP_K1LR, + .k1r = CRYP_K1RR, + .k3r = CRYP_K3RR, + .iv0l = CRYP_IV0LR, + .iv0r = CRYP_IV0RR, + .iv1l = CRYP_IV1LR, + .iv1r = CRYP_IV1RR, }; static const struct stm32_cryp_caps mp1_data = { + .aeads_support = true, + .linear_aes_key = false, + .kp_mode = true, + .iv_protection = false, .swap_final = false, .padding_wa = false, + .cr = CRYP_CR, + .sr = CRYP_SR, + .din = CRYP_DIN, + .dout = CRYP_DOUT, + .dmacr = CRYP_DMACR, + .imsc = CRYP_IMSCR, + .mis = CRYP_MISR, + .k1l = CRYP_K1LR, + .k1r = CRYP_K1RR, + .k3r = CRYP_K3RR, + .iv0l = CRYP_IV0LR, + .iv0r = CRYP_IV0RR, + .iv1l = CRYP_IV1LR, + .iv1r = CRYP_IV1RR, }; static const struct of_device_id stm32_dt_ids[] = { + { .compatible = "stericsson,ux500-cryp", .data = &ux500_data}, { .compatible = "st,stm32f756-cryp", .data = &f7_data}, { .compatible = "st,stm32mp1-cryp", .data = &mp1_data}, {}, @@ -1929,7 +2571,6 @@ static int stm32_cryp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm32_cryp *cryp; - struct resource *res; struct reset_control *rst; int irq, ret; @@ -1943,18 +2584,15 @@ static int stm32_cryp_probe(struct platform_device *pdev) cryp->dev = dev; - mutex_init(&cryp->lock); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cryp->regs = devm_ioremap_resource(dev, res); + cryp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cryp->regs)) return PTR_ERR(cryp->regs); + cryp->phys_base = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; + irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Cannot get IRQ resource\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, stm32_cryp_irq_thread, IRQF_ONESHOT, @@ -1966,7 +2604,8 @@ static int stm32_cryp_probe(struct platform_device *pdev) cryp->clk = devm_clk_get(dev, NULL); if (IS_ERR(cryp->clk)) { - dev_err(dev, "Could not get clock\n"); + dev_err_probe(dev, PTR_ERR(cryp->clk), "Could not get clock\n"); + return PTR_ERR(cryp->clk); } @@ -1984,7 +2623,11 @@ static int stm32_cryp_probe(struct platform_device *pdev) pm_runtime_enable(dev); rst = devm_reset_control_get(dev, NULL); - if (!IS_ERR(rst)) { + if (IS_ERR(rst)) { + ret = PTR_ERR(rst); + if (ret == -EPROBE_DEFER) + goto err_rst; + } else { reset_control_assert(rst); udelay(2); reset_control_deassert(rst); @@ -1992,6 +2635,17 @@ static int stm32_cryp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cryp); + ret = stm32_cryp_dma_init(cryp); + switch (ret) { + case 0: + break; + case -ENODEV: + dev_dbg(dev, "DMA mode not available\n"); + break; + default: + goto err_dma; + } + spin_lock(&cryp_list.lock); list_add(&cryp->list, &cryp_list.dev_list); spin_unlock(&cryp_list.lock); @@ -2010,15 +2664,17 @@ static int stm32_cryp_probe(struct platform_device *pdev) goto err_engine2; } - ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); + ret = crypto_engine_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); if (ret) { dev_err(dev, "Could not register algs\n"); goto err_algs; } - ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); - if (ret) - goto err_aead_algs; + if (cryp->caps->aeads_support) { + ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + if (ret) + goto err_aead_algs; + } dev_info(dev, "Initialized\n"); @@ -2027,7 +2683,7 @@ static int stm32_cryp_probe(struct platform_device *pdev) return 0; err_aead_algs: - crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); + crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); err_algs: err_engine2: crypto_engine_exit(cryp->engine); @@ -2036,8 +2692,12 @@ err_engine1: list_del(&cryp->list); spin_unlock(&cryp_list.lock); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev); + if (cryp->dma_lch_in) + dma_release_channel(cryp->dma_lch_in); + if (cryp->dma_lch_out) + dma_release_channel(cryp->dma_lch_out); +err_dma: +err_rst: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); @@ -2046,20 +2706,16 @@ err_engine1: return ret; } -static int stm32_cryp_remove(struct platform_device *pdev) +static void stm32_cryp_remove(struct platform_device *pdev) { struct stm32_cryp *cryp = platform_get_drvdata(pdev); int ret; - if (!cryp) - return -ENODEV; - ret = pm_runtime_get_sync(cryp->dev); - if (ret < 0) - return ret; - crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); - crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); + if (cryp->caps->aeads_support) + crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); crypto_engine_exit(cryp->engine); @@ -2067,12 +2723,17 @@ static int stm32_cryp_remove(struct platform_device *pdev) list_del(&cryp->list); spin_unlock(&cryp_list.lock); + if (cryp->dma_lch_in) + dma_release_channel(cryp->dma_lch_in); + + if (cryp->dma_lch_out) + dma_release_channel(cryp->dma_lch_out); + pm_runtime_disable(cryp->dev); pm_runtime_put_noidle(cryp->dev); - clk_disable_unprepare(cryp->clk); - - return 0; + if (ret >= 0) + clk_disable_unprepare(cryp->clk); } #ifdef CONFIG_PM @@ -2120,5 +2781,5 @@ static struct platform_driver stm32_cryp_driver = { module_platform_driver(stm32_cryp_driver); MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); -MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver"); +MODULE_DESCRIPTION("STMicroelectronics STM32 CRYP hardware driver"); MODULE_LICENSE("GPL"); |
